xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/config/arm/arm_mve.h (revision 4c3eb207d36f67d31994830c0a694161fc1ca39b)
1 /* Arm MVE intrinsics include file.
2 
3    Copyright (C) 2019-2020 Free Software Foundation, Inc.
4    Contributed by Arm.
5 
6    This file is part of GCC.
7 
8    GCC is free software; you can redistribute it and/or modify it
9    under the terms of the GNU General Public License as published
10    by the Free Software Foundation; either version 3, or (at your
11    option) any later version.
12 
13    GCC is distributed in the hope that it will be useful, but WITHOUT
14    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
16    License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with GCC; see the file COPYING3.  If not see
20    <http://www.gnu.org/licenses/>.  */
21 
22 #ifndef _GCC_ARM_MVE_H
23 #define _GCC_ARM_MVE_H
24 
25 #if __ARM_BIG_ENDIAN
26 #error "MVE intrinsics are not supported in Big-Endian mode."
27 #elif !__ARM_FEATURE_MVE
28 #error "MVE feature not supported"
29 #else
30 
31 #include <stdint.h>
32 #ifndef  __cplusplus
33 #include <stdbool.h>
34 #endif
35 #include "arm_mve_types.h"
36 
37 #ifndef __ARM_MVE_PRESERVE_USER_NAMESPACE
38 #define vst4q(__addr, __value) __arm_vst4q(__addr, __value)
39 #define vdupq_n(__a) __arm_vdupq_n(__a)
40 #define vabsq(__a) __arm_vabsq(__a)
41 #define vclsq(__a) __arm_vclsq(__a)
42 #define vclzq(__a) __arm_vclzq(__a)
43 #define vnegq(__a) __arm_vnegq(__a)
44 #define vaddlvq(__a) __arm_vaddlvq(__a)
45 #define vaddvq(__a) __arm_vaddvq(__a)
46 #define vmovlbq(__a) __arm_vmovlbq(__a)
47 #define vmovltq(__a) __arm_vmovltq(__a)
48 #define vmvnq(__a) __arm_vmvnq(__a)
49 #define vrev16q(__a) __arm_vrev16q(__a)
50 #define vrev32q(__a) __arm_vrev32q(__a)
51 #define vrev64q(__a) __arm_vrev64q(__a)
52 #define vqabsq(__a) __arm_vqabsq(__a)
53 #define vqnegq(__a) __arm_vqnegq(__a)
54 #define vshrq(__a, __imm) __arm_vshrq(__a, __imm)
55 #define vaddlvq_p(__a, __p) __arm_vaddlvq_p(__a, __p)
56 #define vcmpneq(__a, __b) __arm_vcmpneq(__a, __b)
57 #define vshlq(__a, __b) __arm_vshlq(__a, __b)
58 #define vsubq(__a, __b) __arm_vsubq(__a, __b)
59 #define vrmulhq(__a, __b) __arm_vrmulhq(__a, __b)
60 #define vrhaddq(__a, __b) __arm_vrhaddq(__a, __b)
61 #define vqsubq(__a, __b) __arm_vqsubq(__a, __b)
62 #define vqaddq(__a, __b) __arm_vqaddq(__a, __b)
63 #define vorrq(__a, __b) __arm_vorrq(__a, __b)
64 #define vornq(__a, __b) __arm_vornq(__a, __b)
65 #define vmulq(__a, __b) __arm_vmulq(__a, __b)
66 #define vmulltq_int(__a, __b) __arm_vmulltq_int(__a, __b)
67 #define vmullbq_int(__a, __b) __arm_vmullbq_int(__a, __b)
68 #define vmulhq(__a, __b) __arm_vmulhq(__a, __b)
69 #define vmladavq(__a, __b) __arm_vmladavq(__a, __b)
70 #define vminvq(__a, __b) __arm_vminvq(__a, __b)
71 #define vminq(__a, __b) __arm_vminq(__a, __b)
72 #define vmaxvq(__a, __b) __arm_vmaxvq(__a, __b)
73 #define vmaxq(__a, __b) __arm_vmaxq(__a, __b)
74 #define vhsubq(__a, __b) __arm_vhsubq(__a, __b)
75 #define vhaddq(__a, __b) __arm_vhaddq(__a, __b)
76 #define veorq(__a, __b) __arm_veorq(__a, __b)
77 #define vcmphiq(__a, __b) __arm_vcmphiq(__a, __b)
78 #define vcmpeqq(__a, __b) __arm_vcmpeqq(__a, __b)
79 #define vcmpcsq(__a, __b) __arm_vcmpcsq(__a, __b)
80 #define vcaddq_rot90(__a, __b) __arm_vcaddq_rot90(__a, __b)
81 #define vcaddq_rot270(__a, __b) __arm_vcaddq_rot270(__a, __b)
82 #define vbicq(__a, __b) __arm_vbicq(__a, __b)
83 #define vandq(__a, __b) __arm_vandq(__a, __b)
84 #define vaddvq_p(__a, __p) __arm_vaddvq_p(__a, __p)
85 #define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b)
86 #define vaddq(__a, __b) __arm_vaddq(__a, __b)
87 #define vabdq(__a, __b) __arm_vabdq(__a, __b)
88 #define vshlq_r(__a, __b) __arm_vshlq_r(__a, __b)
89 #define vrshlq(__a, __b) __arm_vrshlq(__a, __b)
90 #define vqshlq(__a, __b) __arm_vqshlq(__a, __b)
91 #define vqshlq_r(__a, __b) __arm_vqshlq_r(__a, __b)
92 #define vqrshlq(__a, __b) __arm_vqrshlq(__a, __b)
93 #define vminavq(__a, __b) __arm_vminavq(__a, __b)
94 #define vminaq(__a, __b) __arm_vminaq(__a, __b)
95 #define vmaxavq(__a, __b) __arm_vmaxavq(__a, __b)
96 #define vmaxaq(__a, __b) __arm_vmaxaq(__a, __b)
97 #define vbrsrq(__a, __b) __arm_vbrsrq(__a, __b)
98 #define vshlq_n(__a, __imm) __arm_vshlq_n(__a, __imm)
99 #define vrshrq(__a, __imm) __arm_vrshrq(__a, __imm)
100 #define vqshlq_n(__a, __imm) __arm_vqshlq_n(__a, __imm)
101 #define vcmpltq(__a, __b) __arm_vcmpltq(__a, __b)
102 #define vcmpleq(__a, __b) __arm_vcmpleq(__a, __b)
103 #define vcmpgtq(__a, __b) __arm_vcmpgtq(__a, __b)
104 #define vcmpgeq(__a, __b) __arm_vcmpgeq(__a, __b)
105 #define vqshluq(__a, __imm) __arm_vqshluq(__a, __imm)
106 #define vqrdmulhq(__a, __b) __arm_vqrdmulhq(__a, __b)
107 #define vqdmulhq(__a, __b) __arm_vqdmulhq(__a, __b)
108 #define vmlsdavxq(__a, __b) __arm_vmlsdavxq(__a, __b)
109 #define vmlsdavq(__a, __b) __arm_vmlsdavq(__a, __b)
110 #define vmladavxq(__a, __b) __arm_vmladavxq(__a, __b)
111 #define vhcaddq_rot90(__a, __b) __arm_vhcaddq_rot90(__a, __b)
112 #define vhcaddq_rot270(__a, __b) __arm_vhcaddq_rot270(__a, __b)
113 #define vqmovntq(__a, __b) __arm_vqmovntq(__a, __b)
114 #define vqmovnbq(__a, __b) __arm_vqmovnbq(__a, __b)
115 #define vmulltq_poly(__a, __b) __arm_vmulltq_poly(__a, __b)
116 #define vmullbq_poly(__a, __b) __arm_vmullbq_poly(__a, __b)
117 #define vmovntq(__a, __b) __arm_vmovntq(__a, __b)
118 #define vmovnbq(__a, __b) __arm_vmovnbq(__a, __b)
119 #define vmlaldavq(__a, __b) __arm_vmlaldavq(__a, __b)
120 #define vqmovuntq(__a, __b) __arm_vqmovuntq(__a, __b)
121 #define vqmovunbq(__a, __b) __arm_vqmovunbq(__a, __b)
122 #define vshlltq(__a, __imm) __arm_vshlltq(__a, __imm)
123 #define vshllbq(__a, __imm) __arm_vshllbq(__a, __imm)
124 #define vqdmulltq(__a, __b) __arm_vqdmulltq(__a, __b)
125 #define vqdmullbq(__a, __b) __arm_vqdmullbq(__a, __b)
126 #define vmlsldavxq(__a, __b) __arm_vmlsldavxq(__a, __b)
127 #define vmlsldavq(__a, __b) __arm_vmlsldavq(__a, __b)
128 #define vmlaldavxq(__a, __b) __arm_vmlaldavxq(__a, __b)
129 #define vrmlaldavhq(__a, __b) __arm_vrmlaldavhq(__a, __b)
130 #define vaddlvaq(__a, __b) __arm_vaddlvaq(__a, __b)
131 #define vrmlsldavhxq(__a, __b) __arm_vrmlsldavhxq(__a, __b)
132 #define vrmlsldavhq(__a, __b) __arm_vrmlsldavhq(__a, __b)
133 #define vrmlaldavhxq(__a, __b) __arm_vrmlaldavhxq(__a, __b)
134 #define vabavq(__a, __b, __c) __arm_vabavq(__a, __b, __c)
135 #define vbicq_m_n(__a, __imm, __p) __arm_vbicq_m_n(__a, __imm, __p)
136 #define vqrshrnbq(__a, __b, __imm) __arm_vqrshrnbq(__a, __b, __imm)
137 #define vqrshrunbq(__a, __b, __imm) __arm_vqrshrunbq(__a, __b, __imm)
138 #define vrmlaldavhaq(__a, __b, __c) __arm_vrmlaldavhaq(__a, __b, __c)
139 #define vshlcq(__a, __b, __imm) __arm_vshlcq(__a, __b, __imm)
140 #define vpselq(__a, __b, __p) __arm_vpselq(__a, __b, __p)
141 #define vrev64q_m(__inactive, __a, __p) __arm_vrev64q_m(__inactive, __a, __p)
142 #define vqrdmlashq(__a, __b, __c) __arm_vqrdmlashq(__a, __b, __c)
143 #define vqrdmlahq(__a, __b, __c) __arm_vqrdmlahq(__a, __b, __c)
144 #define vqdmlashq(__a, __b, __c) __arm_vqdmlashq(__a, __b, __c)
145 #define vqdmlahq(__a, __b, __c) __arm_vqdmlahq(__a, __b, __c)
146 #define vmvnq_m(__inactive, __a, __p) __arm_vmvnq_m(__inactive, __a, __p)
147 #define vmlasq(__a, __b, __c) __arm_vmlasq(__a, __b, __c)
148 #define vmlaq(__a, __b, __c) __arm_vmlaq(__a, __b, __c)
149 #define vmladavq_p(__a, __b, __p) __arm_vmladavq_p(__a, __b, __p)
150 #define vmladavaq(__a, __b, __c) __arm_vmladavaq(__a, __b, __c)
151 #define vminvq_p(__a, __b, __p) __arm_vminvq_p(__a, __b, __p)
152 #define vmaxvq_p(__a, __b, __p) __arm_vmaxvq_p(__a, __b, __p)
153 #define vdupq_m(__inactive, __a, __p) __arm_vdupq_m(__inactive, __a, __p)
154 #define vcmpneq_m(__a, __b, __p) __arm_vcmpneq_m(__a, __b, __p)
155 #define vcmphiq_m(__a, __b, __p) __arm_vcmphiq_m(__a, __b, __p)
156 #define vcmpeqq_m(__a, __b, __p) __arm_vcmpeqq_m(__a, __b, __p)
157 #define vcmpcsq_m(__a, __b, __p) __arm_vcmpcsq_m(__a, __b, __p)
158 #define vcmpcsq_m_n(__a, __b, __p) __arm_vcmpcsq_m_n(__a, __b, __p)
159 #define vclzq_m(__inactive, __a, __p) __arm_vclzq_m(__inactive, __a, __p)
160 #define vaddvaq_p(__a, __b, __p) __arm_vaddvaq_p(__a, __b, __p)
161 #define vsriq(__a, __b, __imm) __arm_vsriq(__a, __b, __imm)
162 #define vsliq(__a, __b, __imm) __arm_vsliq(__a, __b, __imm)
163 #define vshlq_m_r(__a, __b, __p) __arm_vshlq_m_r(__a, __b, __p)
164 #define vrshlq_m_n(__a, __b, __p) __arm_vrshlq_m_n(__a, __b, __p)
165 #define vqshlq_m_r(__a, __b, __p) __arm_vqshlq_m_r(__a, __b, __p)
166 #define vqrshlq_m_n(__a, __b, __p) __arm_vqrshlq_m_n(__a, __b, __p)
167 #define vminavq_p(__a, __b, __p) __arm_vminavq_p(__a, __b, __p)
168 #define vminaq_m(__a, __b, __p) __arm_vminaq_m(__a, __b, __p)
169 #define vmaxavq_p(__a, __b, __p) __arm_vmaxavq_p(__a, __b, __p)
170 #define vmaxaq_m(__a, __b, __p) __arm_vmaxaq_m(__a, __b, __p)
171 #define vcmpltq_m(__a, __b, __p) __arm_vcmpltq_m(__a, __b, __p)
172 #define vcmpleq_m(__a, __b, __p) __arm_vcmpleq_m(__a, __b, __p)
173 #define vcmpgtq_m(__a, __b, __p) __arm_vcmpgtq_m(__a, __b, __p)
174 #define vcmpgeq_m(__a, __b, __p) __arm_vcmpgeq_m(__a, __b, __p)
175 #define vqnegq_m(__inactive, __a, __p) __arm_vqnegq_m(__inactive, __a, __p)
176 #define vqabsq_m(__inactive, __a, __p) __arm_vqabsq_m(__inactive, __a, __p)
177 #define vnegq_m(__inactive, __a, __p) __arm_vnegq_m(__inactive, __a, __p)
178 #define vmlsdavxq_p(__a, __b, __p) __arm_vmlsdavxq_p(__a, __b, __p)
179 #define vmlsdavq_p(__a, __b, __p) __arm_vmlsdavq_p(__a, __b, __p)
180 #define vmladavxq_p(__a, __b, __p) __arm_vmladavxq_p(__a, __b, __p)
181 #define vclsq_m(__inactive, __a, __p) __arm_vclsq_m(__inactive, __a, __p)
182 #define vabsq_m(__inactive, __a, __p) __arm_vabsq_m(__inactive, __a, __p)
183 #define vqrdmlsdhxq(__inactive, __a, __b) __arm_vqrdmlsdhxq(__inactive, __a, __b)
184 #define vqrdmlsdhq(__inactive, __a, __b) __arm_vqrdmlsdhq(__inactive, __a, __b)
185 #define vqrdmladhxq(__inactive, __a, __b) __arm_vqrdmladhxq(__inactive, __a, __b)
186 #define vqrdmladhq(__inactive, __a, __b) __arm_vqrdmladhq(__inactive, __a, __b)
187 #define vqdmlsdhxq(__inactive, __a, __b) __arm_vqdmlsdhxq(__inactive, __a, __b)
188 #define vqdmlsdhq(__inactive, __a, __b) __arm_vqdmlsdhq(__inactive, __a, __b)
189 #define vqdmladhxq(__inactive, __a, __b) __arm_vqdmladhxq(__inactive, __a, __b)
190 #define vqdmladhq(__inactive, __a, __b) __arm_vqdmladhq(__inactive, __a, __b)
191 #define vmlsdavaxq(__a, __b, __c) __arm_vmlsdavaxq(__a, __b, __c)
192 #define vmlsdavaq(__a, __b, __c) __arm_vmlsdavaq(__a, __b, __c)
193 #define vmladavaxq(__a, __b, __c) __arm_vmladavaxq(__a, __b, __c)
194 #define vrmlaldavhaxq(__a, __b, __c) __arm_vrmlaldavhaxq(__a, __b, __c)
195 #define vrmlsldavhaq(__a, __b, __c) __arm_vrmlsldavhaq(__a, __b, __c)
196 #define vrmlsldavhaxq(__a, __b, __c) __arm_vrmlsldavhaxq(__a, __b, __c)
197 #define vaddlvaq_p(__a, __b, __p) __arm_vaddlvaq_p(__a, __b, __p)
198 #define vrev16q_m(__inactive, __a, __p) __arm_vrev16q_m(__inactive, __a, __p)
199 #define vrmlaldavhq_p(__a, __b, __p) __arm_vrmlaldavhq_p(__a, __b, __p)
200 #define vrmlaldavhxq_p(__a, __b, __p) __arm_vrmlaldavhxq_p(__a, __b, __p)
201 #define vrmlsldavhq_p(__a, __b, __p) __arm_vrmlsldavhq_p(__a, __b, __p)
202 #define vrmlsldavhxq_p(__a, __b, __p) __arm_vrmlsldavhxq_p(__a, __b, __p)
203 #define vorrq_m_n(__a, __imm, __p) __arm_vorrq_m_n(__a, __imm, __p)
204 #define vqrshrntq(__a, __b, __imm) __arm_vqrshrntq(__a, __b, __imm)
205 #define vqshrnbq(__a, __b, __imm) __arm_vqshrnbq(__a, __b, __imm)
206 #define vqshrntq(__a, __b, __imm) __arm_vqshrntq(__a, __b, __imm)
207 #define vrshrnbq(__a, __b, __imm) __arm_vrshrnbq(__a, __b, __imm)
208 #define vrshrntq(__a, __b, __imm) __arm_vrshrntq(__a, __b, __imm)
209 #define vshrnbq(__a, __b, __imm) __arm_vshrnbq(__a, __b, __imm)
210 #define vshrntq(__a, __b, __imm) __arm_vshrntq(__a, __b, __imm)
211 #define vmlaldavaq(__a, __b, __c) __arm_vmlaldavaq(__a, __b, __c)
212 #define vmlaldavaxq(__a, __b, __c) __arm_vmlaldavaxq(__a, __b, __c)
213 #define vmlsldavaq(__a, __b, __c) __arm_vmlsldavaq(__a, __b, __c)
214 #define vmlsldavaxq(__a, __b, __c) __arm_vmlsldavaxq(__a, __b, __c)
215 #define vmlaldavq_p(__a, __b, __p) __arm_vmlaldavq_p(__a, __b, __p)
216 #define vmlaldavxq_p(__a, __b, __p) __arm_vmlaldavxq_p(__a, __b, __p)
217 #define vmlsldavq_p(__a, __b, __p) __arm_vmlsldavq_p(__a, __b, __p)
218 #define vmlsldavxq_p(__a, __b, __p) __arm_vmlsldavxq_p(__a, __b, __p)
219 #define vmovlbq_m(__inactive, __a, __p) __arm_vmovlbq_m(__inactive, __a, __p)
220 #define vmovltq_m(__inactive, __a, __p) __arm_vmovltq_m(__inactive, __a, __p)
221 #define vmovnbq_m(__a, __b, __p) __arm_vmovnbq_m(__a, __b, __p)
222 #define vmovntq_m(__a, __b, __p) __arm_vmovntq_m(__a, __b, __p)
223 #define vqmovnbq_m(__a, __b, __p) __arm_vqmovnbq_m(__a, __b, __p)
224 #define vqmovntq_m(__a, __b, __p) __arm_vqmovntq_m(__a, __b, __p)
225 #define vrev32q_m(__inactive, __a, __p) __arm_vrev32q_m(__inactive, __a, __p)
226 #define vqrshruntq(__a, __b, __imm) __arm_vqrshruntq(__a, __b, __imm)
227 #define vqshrunbq(__a, __b, __imm) __arm_vqshrunbq(__a, __b, __imm)
228 #define vqshruntq(__a, __b, __imm) __arm_vqshruntq(__a, __b, __imm)
229 #define vqmovunbq_m(__a, __b, __p) __arm_vqmovunbq_m(__a, __b, __p)
230 #define vqmovuntq_m(__a, __b, __p) __arm_vqmovuntq_m(__a, __b, __p)
231 #define vsriq_m(__a, __b, __imm, __p) __arm_vsriq_m(__a, __b, __imm, __p)
232 #define vsubq_m(__inactive, __a, __b, __p) __arm_vsubq_m(__inactive, __a, __b, __p)
233 #define vqshluq_m(__inactive, __a, __imm, __p) __arm_vqshluq_m(__inactive, __a, __imm, __p)
234 #define vabavq_p(__a, __b, __c, __p) __arm_vabavq_p(__a, __b, __c, __p)
235 #define vshlq_m(__inactive, __a, __b, __p) __arm_vshlq_m(__inactive, __a, __b, __p)
236 #define vabdq_m(__inactive, __a, __b, __p) __arm_vabdq_m(__inactive, __a, __b, __p)
237 #define vaddq_m(__inactive, __a, __b, __p) __arm_vaddq_m(__inactive, __a, __b, __p)
238 #define vandq_m(__inactive, __a, __b, __p) __arm_vandq_m(__inactive, __a, __b, __p)
239 #define vbicq_m(__inactive, __a, __b, __p) __arm_vbicq_m(__inactive, __a, __b, __p)
240 #define vbrsrq_m(__inactive, __a, __b, __p) __arm_vbrsrq_m(__inactive, __a, __b, __p)
241 #define vcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m(__inactive, __a, __b, __p)
242 #define vcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m(__inactive, __a, __b, __p)
243 #define veorq_m(__inactive, __a, __b, __p) __arm_veorq_m(__inactive, __a, __b, __p)
244 #define vhaddq_m(__inactive, __a, __b, __p) __arm_vhaddq_m(__inactive, __a, __b, __p)
245 #define vhcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m(__inactive, __a, __b, __p)
246 #define vhcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m(__inactive, __a, __b, __p)
247 #define vhsubq_m(__inactive, __a, __b, __p) __arm_vhsubq_m(__inactive, __a, __b, __p)
248 #define vmaxq_m(__inactive, __a, __b, __p) __arm_vmaxq_m(__inactive, __a, __b, __p)
249 #define vminq_m(__inactive, __a, __b, __p) __arm_vminq_m(__inactive, __a, __b, __p)
250 #define vmladavaq_p(__a, __b, __c, __p) __arm_vmladavaq_p(__a, __b, __c, __p)
251 #define vmladavaxq_p(__a, __b, __c, __p) __arm_vmladavaxq_p(__a, __b, __c, __p)
252 #define vmlaq_m(__a, __b, __c, __p) __arm_vmlaq_m(__a, __b, __c, __p)
253 #define vmlasq_m(__a, __b, __c, __p) __arm_vmlasq_m(__a, __b, __c, __p)
254 #define vmlsdavaq_p(__a, __b, __c, __p) __arm_vmlsdavaq_p(__a, __b, __c, __p)
255 #define vmlsdavaxq_p(__a, __b, __c, __p) __arm_vmlsdavaxq_p(__a, __b, __c, __p)
256 #define vmulhq_m(__inactive, __a, __b, __p) __arm_vmulhq_m(__inactive, __a, __b, __p)
257 #define vmullbq_int_m(__inactive, __a, __b, __p) __arm_vmullbq_int_m(__inactive, __a, __b, __p)
258 #define vmulltq_int_m(__inactive, __a, __b, __p) __arm_vmulltq_int_m(__inactive, __a, __b, __p)
259 #define vmulq_m(__inactive, __a, __b, __p) __arm_vmulq_m(__inactive, __a, __b, __p)
260 #define vornq_m(__inactive, __a, __b, __p) __arm_vornq_m(__inactive, __a, __b, __p)
261 #define vorrq_m(__inactive, __a, __b, __p) __arm_vorrq_m(__inactive, __a, __b, __p)
262 #define vqaddq_m(__inactive, __a, __b, __p) __arm_vqaddq_m(__inactive, __a, __b, __p)
263 #define vqdmladhq_m(__inactive, __a, __b, __p) __arm_vqdmladhq_m(__inactive, __a, __b, __p)
264 #define vqdmlashq_m(__a, __b, __c, __p) __arm_vqdmlashq_m(__a, __b, __c, __p)
265 #define vqdmladhxq_m(__inactive, __a, __b, __p) __arm_vqdmladhxq_m(__inactive, __a, __b, __p)
266 #define vqdmlahq_m(__a, __b, __c, __p) __arm_vqdmlahq_m(__a, __b, __c, __p)
267 #define vqdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m(__inactive, __a, __b, __p)
268 #define vqdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m(__inactive, __a, __b, __p)
269 #define vqdmulhq_m(__inactive, __a, __b, __p) __arm_vqdmulhq_m(__inactive, __a, __b, __p)
270 #define vqrdmladhq_m(__inactive, __a, __b, __p) __arm_vqrdmladhq_m(__inactive, __a, __b, __p)
271 #define vqrdmladhxq_m(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m(__inactive, __a, __b, __p)
272 #define vqrdmlahq_m(__a, __b, __c, __p) __arm_vqrdmlahq_m(__a, __b, __c, __p)
273 #define vqrdmlashq_m(__a, __b, __c, __p) __arm_vqrdmlashq_m(__a, __b, __c, __p)
274 #define vqrdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m(__inactive, __a, __b, __p)
275 #define vqrdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m(__inactive, __a, __b, __p)
276 #define vqrdmulhq_m(__inactive, __a, __b, __p) __arm_vqrdmulhq_m(__inactive, __a, __b, __p)
277 #define vqrshlq_m(__inactive, __a, __b, __p) __arm_vqrshlq_m(__inactive, __a, __b, __p)
278 #define vqshlq_m_n(__inactive, __a, __imm, __p) __arm_vqshlq_m_n(__inactive, __a, __imm, __p)
279 #define vqshlq_m(__inactive, __a, __b, __p) __arm_vqshlq_m(__inactive, __a, __b, __p)
280 #define vqsubq_m(__inactive, __a, __b, __p) __arm_vqsubq_m(__inactive, __a, __b, __p)
281 #define vrhaddq_m(__inactive, __a, __b, __p) __arm_vrhaddq_m(__inactive, __a, __b, __p)
282 #define vrmulhq_m(__inactive, __a, __b, __p) __arm_vrmulhq_m(__inactive, __a, __b, __p)
283 #define vrshlq_m(__inactive, __a, __b, __p) __arm_vrshlq_m(__inactive, __a, __b, __p)
284 #define vrshrq_m(__inactive, __a, __imm, __p) __arm_vrshrq_m(__inactive, __a, __imm, __p)
285 #define vshlq_m_n(__inactive, __a, __imm, __p) __arm_vshlq_m_n(__inactive, __a, __imm, __p)
286 #define vshrq_m(__inactive, __a, __imm, __p) __arm_vshrq_m(__inactive, __a, __imm, __p)
287 #define vsliq_m(__a, __b, __imm, __p) __arm_vsliq_m(__a, __b, __imm, __p)
288 #define vmlaldavaq_p(__a, __b, __c, __p) __arm_vmlaldavaq_p(__a, __b, __c, __p)
289 #define vmlaldavaxq_p(__a, __b, __c, __p) __arm_vmlaldavaxq_p(__a, __b, __c, __p)
290 #define vmlsldavaq_p(__a, __b, __c, __p) __arm_vmlsldavaq_p(__a, __b, __c, __p)
291 #define vmlsldavaxq_p(__a, __b, __c, __p) __arm_vmlsldavaxq_p(__a, __b, __c, __p)
292 #define vmullbq_poly_m(__inactive, __a, __b, __p) __arm_vmullbq_poly_m(__inactive, __a, __b, __p)
293 #define vmulltq_poly_m(__inactive, __a, __b, __p) __arm_vmulltq_poly_m(__inactive, __a, __b, __p)
294 #define vqdmullbq_m(__inactive, __a, __b, __p) __arm_vqdmullbq_m(__inactive, __a, __b, __p)
295 #define vqdmulltq_m(__inactive, __a, __b, __p) __arm_vqdmulltq_m(__inactive, __a, __b, __p)
296 #define vqrshrnbq_m(__a, __b, __imm, __p) __arm_vqrshrnbq_m(__a, __b, __imm, __p)
297 #define vqrshrntq_m(__a, __b, __imm, __p) __arm_vqrshrntq_m(__a, __b, __imm, __p)
298 #define vqrshrunbq_m(__a, __b, __imm, __p) __arm_vqrshrunbq_m(__a, __b, __imm, __p)
299 #define vqrshruntq_m(__a, __b, __imm, __p) __arm_vqrshruntq_m(__a, __b, __imm, __p)
300 #define vqshrnbq_m(__a, __b, __imm, __p) __arm_vqshrnbq_m(__a, __b, __imm, __p)
301 #define vqshrntq_m(__a, __b, __imm, __p) __arm_vqshrntq_m(__a, __b, __imm, __p)
302 #define vqshrunbq_m(__a, __b, __imm, __p) __arm_vqshrunbq_m(__a, __b, __imm, __p)
303 #define vqshruntq_m(__a, __b, __imm, __p) __arm_vqshruntq_m(__a, __b, __imm, __p)
304 #define vrmlaldavhaq_p(__a, __b, __c, __p) __arm_vrmlaldavhaq_p(__a, __b, __c, __p)
305 #define vrmlaldavhaxq_p(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p(__a, __b, __c, __p)
306 #define vrmlsldavhaq_p(__a, __b, __c, __p) __arm_vrmlsldavhaq_p(__a, __b, __c, __p)
307 #define vrmlsldavhaxq_p(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p(__a, __b, __c, __p)
308 #define vrshrnbq_m(__a, __b, __imm, __p) __arm_vrshrnbq_m(__a, __b, __imm, __p)
309 #define vrshrntq_m(__a, __b, __imm, __p) __arm_vrshrntq_m(__a, __b, __imm, __p)
310 #define vshllbq_m(__inactive, __a, __imm, __p) __arm_vshllbq_m(__inactive, __a, __imm, __p)
311 #define vshlltq_m(__inactive, __a, __imm, __p) __arm_vshlltq_m(__inactive, __a, __imm, __p)
312 #define vshrnbq_m(__a, __b, __imm, __p) __arm_vshrnbq_m(__a, __b, __imm, __p)
313 #define vshrntq_m(__a, __b, __imm, __p) __arm_vshrntq_m(__a, __b, __imm, __p)
314 #define vstrbq_scatter_offset(__base, __offset, __value) __arm_vstrbq_scatter_offset(__base, __offset, __value)
315 #define vstrbq(__addr, __value) __arm_vstrbq(__addr, __value)
316 #define vstrwq_scatter_base(__addr, __offset, __value) __arm_vstrwq_scatter_base(__addr, __offset, __value)
317 #define vldrbq_gather_offset(__base, __offset) __arm_vldrbq_gather_offset(__base, __offset)
318 #define vstrbq_p(__addr, __value, __p) __arm_vstrbq_p(__addr, __value, __p)
319 #define vstrbq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p(__base, __offset, __value, __p)
320 #define vstrwq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p(__addr, __offset, __value, __p)
321 #define vldrbq_gather_offset_z(__base, __offset, __p) __arm_vldrbq_gather_offset_z(__base, __offset, __p)
322 #define vld1q(__base) __arm_vld1q(__base)
323 #define vldrhq_gather_offset(__base, __offset) __arm_vldrhq_gather_offset(__base, __offset)
324 #define vldrhq_gather_offset_z(__base, __offset, __p) __arm_vldrhq_gather_offset_z(__base, __offset, __p)
325 #define vldrhq_gather_shifted_offset(__base, __offset) __arm_vldrhq_gather_shifted_offset(__base, __offset)
326 #define vldrhq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z(__base, __offset, __p)
327 #define vldrdq_gather_offset(__base, __offset) __arm_vldrdq_gather_offset(__base, __offset)
328 #define vldrdq_gather_offset_z(__base, __offset, __p) __arm_vldrdq_gather_offset_z(__base, __offset, __p)
329 #define vldrdq_gather_shifted_offset(__base, __offset) __arm_vldrdq_gather_shifted_offset(__base, __offset)
330 #define vldrdq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z(__base, __offset, __p)
331 #define vldrwq_gather_offset(__base, __offset) __arm_vldrwq_gather_offset(__base, __offset)
332 #define vldrwq_gather_offset_z(__base, __offset, __p) __arm_vldrwq_gather_offset_z(__base, __offset, __p)
333 #define vldrwq_gather_shifted_offset(__base, __offset) __arm_vldrwq_gather_shifted_offset(__base, __offset)
334 #define vldrwq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z(__base, __offset, __p)
335 #define vst1q(__addr, __value) __arm_vst1q(__addr, __value)
336 #define vstrhq_scatter_offset(__base, __offset, __value) __arm_vstrhq_scatter_offset(__base, __offset, __value)
337 #define vstrhq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p(__base, __offset, __value, __p)
338 #define vstrhq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset(__base, __offset, __value)
339 #define vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p)
340 #define vstrhq(__addr, __value) __arm_vstrhq(__addr, __value)
341 #define vstrhq_p(__addr, __value, __p) __arm_vstrhq_p(__addr, __value, __p)
342 #define vstrwq(__addr, __value) __arm_vstrwq(__addr, __value)
343 #define vstrwq_p(__addr, __value, __p) __arm_vstrwq_p(__addr, __value, __p)
344 #define vstrdq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p(__addr, __offset, __value, __p)
345 #define vstrdq_scatter_base(__addr, __offset, __value) __arm_vstrdq_scatter_base(__addr, __offset, __value)
346 #define vstrdq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p(__base, __offset, __value, __p)
347 #define vstrdq_scatter_offset(__base, __offset, __value) __arm_vstrdq_scatter_offset(__base, __offset, __value)
348 #define vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p)
349 #define vstrdq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset(__base, __offset, __value)
350 #define vstrwq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p(__base, __offset, __value, __p)
351 #define vstrwq_scatter_offset(__base, __offset, __value) __arm_vstrwq_scatter_offset(__base, __offset, __value)
352 #define vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p)
353 #define vstrwq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset(__base, __offset, __value)
354 #define vuninitializedq(__v) __arm_vuninitializedq(__v)
355 #define vreinterpretq_s16(__a) __arm_vreinterpretq_s16(__a)
356 #define vreinterpretq_s32(__a) __arm_vreinterpretq_s32(__a)
357 #define vreinterpretq_s64(__a) __arm_vreinterpretq_s64(__a)
358 #define vreinterpretq_s8(__a) __arm_vreinterpretq_s8(__a)
359 #define vreinterpretq_u16(__a) __arm_vreinterpretq_u16(__a)
360 #define vreinterpretq_u32(__a) __arm_vreinterpretq_u32(__a)
361 #define vreinterpretq_u64(__a) __arm_vreinterpretq_u64(__a)
362 #define vreinterpretq_u8(__a) __arm_vreinterpretq_u8(__a)
363 #define vddupq_m(__inactive, __a, __imm, __p) __arm_vddupq_m(__inactive, __a, __imm, __p)
364 #define vddupq_u8(__a, __imm) __arm_vddupq_u8(__a, __imm)
365 #define vddupq_u32(__a, __imm) __arm_vddupq_u32(__a, __imm)
366 #define vddupq_u16(__a, __imm) __arm_vddupq_u16(__a, __imm)
367 #define vdwdupq_m(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m(__inactive, __a, __b, __imm, __p)
368 #define vdwdupq_u8(__a, __b, __imm) __arm_vdwdupq_u8(__a, __b, __imm)
369 #define vdwdupq_u32(__a, __b, __imm) __arm_vdwdupq_u32(__a, __b, __imm)
370 #define vdwdupq_u16(__a, __b, __imm) __arm_vdwdupq_u16(__a, __b, __imm)
371 #define vidupq_m(__inactive, __a, __imm, __p) __arm_vidupq_m(__inactive, __a, __imm, __p)
372 #define vidupq_u8(__a, __imm) __arm_vidupq_u8(__a, __imm)
373 #define vidupq_u32(__a, __imm) __arm_vidupq_u32(__a, __imm)
374 #define vidupq_u16(__a, __imm) __arm_vidupq_u16(__a, __imm)
375 #define viwdupq_m(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m(__inactive, __a, __b, __imm, __p)
376 #define viwdupq_u8(__a, __b, __imm) __arm_viwdupq_u8(__a, __b, __imm)
377 #define viwdupq_u32(__a, __b, __imm) __arm_viwdupq_u32(__a, __b, __imm)
378 #define viwdupq_u16(__a, __b, __imm) __arm_viwdupq_u16(__a, __b, __imm)
379 #define vstrdq_scatter_base_wb(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb(__addr, __offset, __value)
380 #define vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p)
381 #define vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p)
382 #define vstrwq_scatter_base_wb(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb(__addr, __offset, __value)
383 #define vddupq_x_u8(__a, __imm, __p) __arm_vddupq_x_u8(__a, __imm, __p)
384 #define vddupq_x_u16(__a, __imm, __p) __arm_vddupq_x_u16(__a, __imm, __p)
385 #define vddupq_x_u32(__a, __imm, __p) __arm_vddupq_x_u32(__a, __imm, __p)
386 #define vdwdupq_x_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_u8(__a, __b, __imm, __p)
387 #define vdwdupq_x_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_u16(__a, __b, __imm, __p)
388 #define vdwdupq_x_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_u32(__a, __b, __imm, __p)
389 #define vidupq_x_u8(__a, __imm, __p) __arm_vidupq_x_u8(__a, __imm, __p)
390 #define vidupq_x_u16(__a, __imm, __p) __arm_vidupq_x_u16(__a, __imm, __p)
391 #define vidupq_x_u32(__a, __imm, __p) __arm_vidupq_x_u32(__a, __imm, __p)
392 #define viwdupq_x_u8(__a, __b, __imm, __p) __arm_viwdupq_x_u8(__a, __b, __imm, __p)
393 #define viwdupq_x_u16(__a, __b, __imm, __p) __arm_viwdupq_x_u16(__a, __b, __imm, __p)
394 #define viwdupq_x_u32(__a, __b, __imm, __p) __arm_viwdupq_x_u32(__a, __b, __imm, __p)
395 #define vminq_x(__a, __b, __p) __arm_vminq_x(__a, __b, __p)
396 #define vmaxq_x(__a, __b, __p) __arm_vmaxq_x(__a, __b, __p)
397 #define vabdq_x(__a, __b, __p) __arm_vabdq_x(__a, __b, __p)
398 #define vabsq_x(__a, __p) __arm_vabsq_x(__a, __p)
399 #define vaddq_x(__a, __b, __p) __arm_vaddq_x(__a, __b, __p)
400 #define vclsq_x(__a, __p) __arm_vclsq_x(__a, __p)
401 #define vclzq_x(__a, __p) __arm_vclzq_x(__a, __p)
402 #define vnegq_x(__a, __p) __arm_vnegq_x(__a, __p)
403 #define vmulhq_x(__a, __b, __p) __arm_vmulhq_x(__a, __b, __p)
404 #define vmullbq_poly_x(__a, __b, __p) __arm_vmullbq_poly_x(__a, __b, __p)
405 #define vmullbq_int_x(__a, __b, __p) __arm_vmullbq_int_x(__a, __b, __p)
406 #define vmulltq_poly_x(__a, __b, __p) __arm_vmulltq_poly_x(__a, __b, __p)
407 #define vmulltq_int_x(__a, __b, __p) __arm_vmulltq_int_x(__a, __b, __p)
408 #define vmulq_x(__a, __b, __p) __arm_vmulq_x(__a, __b, __p)
409 #define vsubq_x(__a, __b, __p) __arm_vsubq_x(__a, __b, __p)
410 #define vcaddq_rot90_x(__a, __b, __p) __arm_vcaddq_rot90_x(__a, __b, __p)
411 #define vcaddq_rot270_x(__a, __b, __p) __arm_vcaddq_rot270_x(__a, __b, __p)
412 #define vhaddq_x(__a, __b, __p) __arm_vhaddq_x(__a, __b, __p)
413 #define vhcaddq_rot90_x(__a, __b, __p) __arm_vhcaddq_rot90_x(__a, __b, __p)
414 #define vhcaddq_rot270_x(__a, __b, __p) __arm_vhcaddq_rot270_x(__a, __b, __p)
415 #define vhsubq_x(__a, __b, __p) __arm_vhsubq_x(__a, __b, __p)
416 #define vrhaddq_x(__a, __b, __p) __arm_vrhaddq_x(__a, __b, __p)
417 #define vrmulhq_x(__a, __b, __p) __arm_vrmulhq_x(__a, __b, __p)
418 #define vandq_x(__a, __b, __p) __arm_vandq_x(__a, __b, __p)
419 #define vbicq_x(__a, __b, __p) __arm_vbicq_x(__a, __b, __p)
420 #define vbrsrq_x(__a, __b, __p) __arm_vbrsrq_x(__a, __b, __p)
421 #define veorq_x(__a, __b, __p) __arm_veorq_x(__a, __b, __p)
422 #define vmovlbq_x(__a, __p) __arm_vmovlbq_x(__a, __p)
423 #define vmovltq_x(__a, __p) __arm_vmovltq_x(__a, __p)
424 #define vmvnq_x(__a, __p) __arm_vmvnq_x(__a, __p)
425 #define vornq_x(__a, __b, __p) __arm_vornq_x(__a, __b, __p)
426 #define vorrq_x(__a, __b, __p) __arm_vorrq_x(__a, __b, __p)
427 #define vrev16q_x(__a, __p) __arm_vrev16q_x(__a, __p)
428 #define vrev32q_x(__a, __p) __arm_vrev32q_x(__a, __p)
429 #define vrev64q_x(__a, __p) __arm_vrev64q_x(__a, __p)
430 #define vrshlq_x(__a, __b, __p) __arm_vrshlq_x(__a, __b, __p)
431 #define vshllbq_x(__a, __imm, __p) __arm_vshllbq_x(__a, __imm, __p)
432 #define vshlltq_x(__a, __imm, __p) __arm_vshlltq_x(__a, __imm, __p)
433 #define vshlq_x(__a, __b, __p) __arm_vshlq_x(__a, __b, __p)
434 #define vshlq_x_n(__a, __imm, __p) __arm_vshlq_x_n(__a, __imm, __p)
435 #define vrshrq_x(__a, __imm, __p) __arm_vrshrq_x(__a, __imm, __p)
436 #define vshrq_x(__a, __imm, __p) __arm_vshrq_x(__a, __imm, __p)
437 #define vadciq(__a, __b, __carry_out) __arm_vadciq(__a, __b, __carry_out)
438 #define vadciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m(__inactive, __a, __b, __carry_out, __p)
439 #define vadcq(__a, __b, __carry) __arm_vadcq(__a, __b, __carry)
440 #define vadcq_m(__inactive, __a, __b, __carry, __p) __arm_vadcq_m(__inactive, __a, __b, __carry, __p)
441 #define vsbciq(__a, __b, __carry_out) __arm_vsbciq(__a, __b, __carry_out)
442 #define vsbciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m(__inactive, __a, __b, __carry_out, __p)
443 #define vsbcq(__a, __b, __carry) __arm_vsbcq(__a, __b, __carry)
444 #define vsbcq_m(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m(__inactive, __a, __b, __carry, __p)
445 #define vst1q_p(__addr, __value, __p) __arm_vst1q_p(__addr, __value, __p)
446 #define vst2q(__addr, __value) __arm_vst2q(__addr, __value)
447 #define vld1q_z(__base, __p) __arm_vld1q_z(__base, __p)
448 #define vld2q(__addr) __arm_vld2q(__addr)
449 #define vld4q(__addr) __arm_vld4q(__addr)
450 #define vsetq_lane(__a, __b, __idx) __arm_vsetq_lane(__a, __b, __idx)
451 #define vgetq_lane(__a, __idx) __arm_vgetq_lane(__a, __idx)
452 #define vshlcq_m(__a, __b, __imm, __p) __arm_vshlcq_m(__a, __b, __imm, __p)
453 #define vrndxq(__a) __arm_vrndxq(__a)
454 #define vrndq(__a) __arm_vrndq(__a)
455 #define vrndpq(__a) __arm_vrndpq(__a)
456 #define vrndnq(__a) __arm_vrndnq(__a)
457 #define vrndmq(__a) __arm_vrndmq(__a)
458 #define vrndaq(__a) __arm_vrndaq(__a)
459 #define vcvttq_f32(__a) __arm_vcvttq_f32(__a)
460 #define vcvtbq_f32(__a) __arm_vcvtbq_f32(__a)
461 #define vcvtq(__a) __arm_vcvtq(__a)
462 #define vcvtq_n(__a, __imm6) __arm_vcvtq_n(__a, __imm6)
463 #define vminnmvq(__a, __b) __arm_vminnmvq(__a, __b)
464 #define vminnmq(__a, __b) __arm_vminnmq(__a, __b)
465 #define vminnmavq(__a, __b) __arm_vminnmavq(__a, __b)
466 #define vminnmaq(__a, __b) __arm_vminnmaq(__a, __b)
467 #define vmaxnmvq(__a, __b) __arm_vmaxnmvq(__a, __b)
468 #define vmaxnmq(__a, __b) __arm_vmaxnmq(__a, __b)
469 #define vmaxnmavq(__a, __b) __arm_vmaxnmavq(__a, __b)
470 #define vmaxnmaq(__a, __b) __arm_vmaxnmaq(__a, __b)
471 #define vcmulq_rot90(__a, __b) __arm_vcmulq_rot90(__a, __b)
472 #define vcmulq_rot270(__a, __b) __arm_vcmulq_rot270(__a, __b)
473 #define vcmulq_rot180(__a, __b) __arm_vcmulq_rot180(__a, __b)
474 #define vcmulq(__a, __b) __arm_vcmulq(__a, __b)
475 #define vcvtaq_m(__inactive, __a, __p) __arm_vcvtaq_m(__inactive, __a, __p)
476 #define vcvtq_m(__inactive, __a, __p) __arm_vcvtq_m(__inactive, __a, __p)
477 #define vcvtbq_m(__a, __b, __p) __arm_vcvtbq_m(__a, __b, __p)
478 #define vcvttq_m(__a, __b, __p) __arm_vcvttq_m(__a, __b, __p)
479 #define vcmlaq(__a, __b, __c) __arm_vcmlaq(__a, __b, __c)
480 #define vcmlaq_rot180(__a, __b, __c) __arm_vcmlaq_rot180(__a, __b, __c)
481 #define vcmlaq_rot270(__a, __b, __c) __arm_vcmlaq_rot270(__a, __b, __c)
482 #define vcmlaq_rot90(__a, __b, __c) __arm_vcmlaq_rot90(__a, __b, __c)
483 #define vfmaq(__a, __b, __c) __arm_vfmaq(__a, __b, __c)
484 #define vfmasq(__a, __b, __c) __arm_vfmasq(__a, __b, __c)
485 #define vfmsq(__a, __b, __c) __arm_vfmsq(__a, __b, __c)
486 #define vcvtmq_m(__inactive, __a, __p) __arm_vcvtmq_m(__inactive, __a, __p)
487 #define vcvtnq_m(__inactive, __a, __p) __arm_vcvtnq_m(__inactive, __a, __p)
488 #define vcvtpq_m(__inactive, __a, __p) __arm_vcvtpq_m(__inactive, __a, __p)
489 #define vmaxnmaq_m(__a, __b, __p) __arm_vmaxnmaq_m(__a, __b, __p)
490 #define vmaxnmavq_p(__a, __b, __p) __arm_vmaxnmavq_p(__a, __b, __p)
491 #define vmaxnmvq_p(__a, __b, __p) __arm_vmaxnmvq_p(__a, __b, __p)
492 #define vminnmaq_m(__a, __b, __p) __arm_vminnmaq_m(__a, __b, __p)
493 #define vminnmavq_p(__a, __b, __p) __arm_vminnmavq_p(__a, __b, __p)
494 #define vminnmvq_p(__a, __b, __p) __arm_vminnmvq_p(__a, __b, __p)
495 #define vrndaq_m(__inactive, __a, __p) __arm_vrndaq_m(__inactive, __a, __p)
496 #define vrndmq_m(__inactive, __a, __p) __arm_vrndmq_m(__inactive, __a, __p)
497 #define vrndnq_m(__inactive, __a, __p) __arm_vrndnq_m(__inactive, __a, __p)
498 #define vrndpq_m(__inactive, __a, __p) __arm_vrndpq_m(__inactive, __a, __p)
499 #define vrndq_m(__inactive, __a, __p) __arm_vrndq_m(__inactive, __a, __p)
500 #define vrndxq_m(__inactive, __a, __p) __arm_vrndxq_m(__inactive, __a, __p)
501 #define vcvtq_m_n(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n(__inactive, __a, __imm6, __p)
502 #define vcmlaq_m(__a, __b, __c, __p) __arm_vcmlaq_m(__a, __b, __c, __p)
503 #define vcmlaq_rot180_m(__a, __b, __c, __p) __arm_vcmlaq_rot180_m(__a, __b, __c, __p)
504 #define vcmlaq_rot270_m(__a, __b, __c, __p) __arm_vcmlaq_rot270_m(__a, __b, __c, __p)
505 #define vcmlaq_rot90_m(__a, __b, __c, __p) __arm_vcmlaq_rot90_m(__a, __b, __c, __p)
506 #define vcmulq_m(__inactive, __a, __b, __p) __arm_vcmulq_m(__inactive, __a, __b, __p)
507 #define vcmulq_rot180_m(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m(__inactive, __a, __b, __p)
508 #define vcmulq_rot270_m(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m(__inactive, __a, __b, __p)
509 #define vcmulq_rot90_m(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m(__inactive, __a, __b, __p)
510 #define vfmaq_m(__a, __b, __c, __p) __arm_vfmaq_m(__a, __b, __c, __p)
511 #define vfmasq_m(__a, __b, __c, __p) __arm_vfmasq_m(__a, __b, __c, __p)
512 #define vfmsq_m(__a, __b, __c, __p) __arm_vfmsq_m(__a, __b, __c, __p)
513 #define vmaxnmq_m(__inactive, __a, __b, __p) __arm_vmaxnmq_m(__inactive, __a, __b, __p)
514 #define vminnmq_m(__inactive, __a, __b, __p) __arm_vminnmq_m(__inactive, __a, __b, __p)
515 #define vreinterpretq_f16(__a) __arm_vreinterpretq_f16(__a)
516 #define vreinterpretq_f32(__a) __arm_vreinterpretq_f32(__a)
517 #define vminnmq_x(__a, __b, __p) __arm_vminnmq_x(__a, __b, __p)
518 #define vmaxnmq_x(__a, __b, __p) __arm_vmaxnmq_x(__a, __b, __p)
519 #define vcmulq_x(__a, __b, __p) __arm_vcmulq_x(__a, __b, __p)
520 #define vcmulq_rot90_x(__a, __b, __p) __arm_vcmulq_rot90_x(__a, __b, __p)
521 #define vcmulq_rot180_x(__a, __b, __p) __arm_vcmulq_rot180_x(__a, __b, __p)
522 #define vcmulq_rot270_x(__a, __b, __p) __arm_vcmulq_rot270_x(__a, __b, __p)
523 #define vcvtq_x(__a, __p) __arm_vcvtq_x(__a, __p)
524 #define vcvtq_x_n(__a, __imm6, __p) __arm_vcvtq_x_n(__a, __imm6, __p)
525 #define vrndq_x(__a, __p) __arm_vrndq_x(__a, __p)
526 #define vrndnq_x(__a, __p) __arm_vrndnq_x(__a, __p)
527 #define vrndmq_x(__a, __p) __arm_vrndmq_x(__a, __p)
528 #define vrndpq_x(__a, __p) __arm_vrndpq_x(__a, __p)
529 #define vrndaq_x(__a, __p) __arm_vrndaq_x(__a, __p)
530 #define vrndxq_x(__a, __p) __arm_vrndxq_x(__a, __p)
531 
532 
533 #define vst4q_s8( __addr, __value) __arm_vst4q_s8( __addr, __value)
534 #define vst4q_s16( __addr, __value) __arm_vst4q_s16( __addr, __value)
535 #define vst4q_s32( __addr, __value) __arm_vst4q_s32( __addr, __value)
536 #define vst4q_u8( __addr, __value) __arm_vst4q_u8( __addr, __value)
537 #define vst4q_u16( __addr, __value) __arm_vst4q_u16( __addr, __value)
538 #define vst4q_u32( __addr, __value) __arm_vst4q_u32( __addr, __value)
539 #define vst4q_f16( __addr, __value) __arm_vst4q_f16( __addr, __value)
540 #define vst4q_f32( __addr, __value) __arm_vst4q_f32( __addr, __value)
541 #define vrndxq_f16(__a) __arm_vrndxq_f16(__a)
542 #define vrndxq_f32(__a) __arm_vrndxq_f32(__a)
543 #define vrndq_f16(__a) __arm_vrndq_f16(__a)
544 #define vrndq_f32(__a) __arm_vrndq_f32(__a)
545 #define vrndpq_f16(__a) __arm_vrndpq_f16(__a)
546 #define vrndpq_f32(__a) __arm_vrndpq_f32(__a)
547 #define vrndnq_f16(__a) __arm_vrndnq_f16(__a)
548 #define vrndnq_f32(__a) __arm_vrndnq_f32(__a)
549 #define vrndmq_f16(__a) __arm_vrndmq_f16(__a)
550 #define vrndmq_f32(__a) __arm_vrndmq_f32(__a)
551 #define vrndaq_f16(__a) __arm_vrndaq_f16(__a)
552 #define vrndaq_f32(__a) __arm_vrndaq_f32(__a)
553 #define vrev64q_f16(__a) __arm_vrev64q_f16(__a)
554 #define vrev64q_f32(__a) __arm_vrev64q_f32(__a)
555 #define vnegq_f16(__a) __arm_vnegq_f16(__a)
556 #define vnegq_f32(__a) __arm_vnegq_f32(__a)
557 #define vdupq_n_f16(__a) __arm_vdupq_n_f16(__a)
558 #define vdupq_n_f32(__a) __arm_vdupq_n_f32(__a)
559 #define vabsq_f16(__a) __arm_vabsq_f16(__a)
560 #define vabsq_f32(__a) __arm_vabsq_f32(__a)
561 #define vrev32q_f16(__a) __arm_vrev32q_f16(__a)
562 #define vcvttq_f32_f16(__a) __arm_vcvttq_f32_f16(__a)
563 #define vcvtbq_f32_f16(__a) __arm_vcvtbq_f32_f16(__a)
564 #define vcvtq_f16_s16(__a) __arm_vcvtq_f16_s16(__a)
565 #define vcvtq_f32_s32(__a) __arm_vcvtq_f32_s32(__a)
566 #define vcvtq_f16_u16(__a) __arm_vcvtq_f16_u16(__a)
567 #define vcvtq_f32_u32(__a) __arm_vcvtq_f32_u32(__a)
568 #define vdupq_n_s8(__a) __arm_vdupq_n_s8(__a)
569 #define vdupq_n_s16(__a) __arm_vdupq_n_s16(__a)
570 #define vdupq_n_s32(__a) __arm_vdupq_n_s32(__a)
571 #define vabsq_s8(__a) __arm_vabsq_s8(__a)
572 #define vabsq_s16(__a) __arm_vabsq_s16(__a)
573 #define vabsq_s32(__a) __arm_vabsq_s32(__a)
574 #define vclsq_s8(__a) __arm_vclsq_s8(__a)
575 #define vclsq_s16(__a) __arm_vclsq_s16(__a)
576 #define vclsq_s32(__a) __arm_vclsq_s32(__a)
577 #define vclzq_s8(__a) __arm_vclzq_s8(__a)
578 #define vclzq_s16(__a) __arm_vclzq_s16(__a)
579 #define vclzq_s32(__a) __arm_vclzq_s32(__a)
580 #define vnegq_s8(__a) __arm_vnegq_s8(__a)
581 #define vnegq_s16(__a) __arm_vnegq_s16(__a)
582 #define vnegq_s32(__a) __arm_vnegq_s32(__a)
583 #define vaddlvq_s32(__a) __arm_vaddlvq_s32(__a)
584 #define vaddvq_s8(__a) __arm_vaddvq_s8(__a)
585 #define vaddvq_s16(__a) __arm_vaddvq_s16(__a)
586 #define vaddvq_s32(__a) __arm_vaddvq_s32(__a)
587 #define vmovlbq_s8(__a) __arm_vmovlbq_s8(__a)
588 #define vmovlbq_s16(__a) __arm_vmovlbq_s16(__a)
589 #define vmovltq_s8(__a) __arm_vmovltq_s8(__a)
590 #define vmovltq_s16(__a) __arm_vmovltq_s16(__a)
591 #define vmvnq_s8(__a) __arm_vmvnq_s8(__a)
592 #define vmvnq_s16(__a) __arm_vmvnq_s16(__a)
593 #define vmvnq_s32(__a) __arm_vmvnq_s32(__a)
594 #define vmvnq_n_s16( __imm) __arm_vmvnq_n_s16( __imm)
595 #define vmvnq_n_s32( __imm) __arm_vmvnq_n_s32( __imm)
596 #define vrev16q_s8(__a) __arm_vrev16q_s8(__a)
597 #define vrev32q_s8(__a) __arm_vrev32q_s8(__a)
598 #define vrev32q_s16(__a) __arm_vrev32q_s16(__a)
599 #define vrev64q_s8(__a) __arm_vrev64q_s8(__a)
600 #define vrev64q_s16(__a) __arm_vrev64q_s16(__a)
601 #define vrev64q_s32(__a) __arm_vrev64q_s32(__a)
602 #define vqabsq_s8(__a) __arm_vqabsq_s8(__a)
603 #define vqabsq_s16(__a) __arm_vqabsq_s16(__a)
604 #define vqabsq_s32(__a) __arm_vqabsq_s32(__a)
605 #define vqnegq_s8(__a) __arm_vqnegq_s8(__a)
606 #define vqnegq_s16(__a) __arm_vqnegq_s16(__a)
607 #define vqnegq_s32(__a) __arm_vqnegq_s32(__a)
608 #define vcvtaq_s16_f16(__a) __arm_vcvtaq_s16_f16(__a)
609 #define vcvtaq_s32_f32(__a) __arm_vcvtaq_s32_f32(__a)
610 #define vcvtnq_s16_f16(__a) __arm_vcvtnq_s16_f16(__a)
611 #define vcvtnq_s32_f32(__a) __arm_vcvtnq_s32_f32(__a)
612 #define vcvtpq_s16_f16(__a) __arm_vcvtpq_s16_f16(__a)
613 #define vcvtpq_s32_f32(__a) __arm_vcvtpq_s32_f32(__a)
614 #define vcvtmq_s16_f16(__a) __arm_vcvtmq_s16_f16(__a)
615 #define vcvtmq_s32_f32(__a) __arm_vcvtmq_s32_f32(__a)
616 #define vcvtq_s16_f16(__a) __arm_vcvtq_s16_f16(__a)
617 #define vcvtq_s32_f32(__a) __arm_vcvtq_s32_f32(__a)
618 #define vrev64q_u8(__a) __arm_vrev64q_u8(__a)
619 #define vrev64q_u16(__a) __arm_vrev64q_u16(__a)
620 #define vrev64q_u32(__a) __arm_vrev64q_u32(__a)
621 #define vmvnq_u8(__a) __arm_vmvnq_u8(__a)
622 #define vmvnq_u16(__a) __arm_vmvnq_u16(__a)
623 #define vmvnq_u32(__a) __arm_vmvnq_u32(__a)
624 #define vdupq_n_u8(__a) __arm_vdupq_n_u8(__a)
625 #define vdupq_n_u16(__a) __arm_vdupq_n_u16(__a)
626 #define vdupq_n_u32(__a) __arm_vdupq_n_u32(__a)
627 #define vclzq_u8(__a) __arm_vclzq_u8(__a)
628 #define vclzq_u16(__a) __arm_vclzq_u16(__a)
629 #define vclzq_u32(__a) __arm_vclzq_u32(__a)
630 #define vaddvq_u8(__a) __arm_vaddvq_u8(__a)
631 #define vaddvq_u16(__a) __arm_vaddvq_u16(__a)
632 #define vaddvq_u32(__a) __arm_vaddvq_u32(__a)
633 #define vrev32q_u8(__a) __arm_vrev32q_u8(__a)
634 #define vrev32q_u16(__a) __arm_vrev32q_u16(__a)
635 #define vmovltq_u8(__a) __arm_vmovltq_u8(__a)
636 #define vmovltq_u16(__a) __arm_vmovltq_u16(__a)
637 #define vmovlbq_u8(__a) __arm_vmovlbq_u8(__a)
638 #define vmovlbq_u16(__a) __arm_vmovlbq_u16(__a)
639 #define vmvnq_n_u16( __imm) __arm_vmvnq_n_u16( __imm)
640 #define vmvnq_n_u32( __imm) __arm_vmvnq_n_u32( __imm)
641 #define vrev16q_u8(__a) __arm_vrev16q_u8(__a)
642 #define vaddlvq_u32(__a) __arm_vaddlvq_u32(__a)
643 #define vcvtq_u16_f16(__a) __arm_vcvtq_u16_f16(__a)
644 #define vcvtq_u32_f32(__a) __arm_vcvtq_u32_f32(__a)
645 #define vcvtpq_u16_f16(__a) __arm_vcvtpq_u16_f16(__a)
646 #define vcvtpq_u32_f32(__a) __arm_vcvtpq_u32_f32(__a)
647 #define vcvtnq_u16_f16(__a) __arm_vcvtnq_u16_f16(__a)
648 #define vcvtnq_u32_f32(__a) __arm_vcvtnq_u32_f32(__a)
649 #define vcvtmq_u16_f16(__a) __arm_vcvtmq_u16_f16(__a)
650 #define vcvtmq_u32_f32(__a) __arm_vcvtmq_u32_f32(__a)
651 #define vcvtaq_u16_f16(__a) __arm_vcvtaq_u16_f16(__a)
652 #define vcvtaq_u32_f32(__a) __arm_vcvtaq_u32_f32(__a)
653 #define vctp16q(__a) __arm_vctp16q(__a)
654 #define vctp32q(__a) __arm_vctp32q(__a)
655 #define vctp64q(__a) __arm_vctp64q(__a)
656 #define vctp8q(__a) __arm_vctp8q(__a)
657 #define vpnot(__a) __arm_vpnot(__a)
658 #define vsubq_n_f16(__a, __b) __arm_vsubq_n_f16(__a, __b)
659 #define vsubq_n_f32(__a, __b) __arm_vsubq_n_f32(__a, __b)
660 #define vbrsrq_n_f16(__a, __b) __arm_vbrsrq_n_f16(__a, __b)
661 #define vbrsrq_n_f32(__a, __b) __arm_vbrsrq_n_f32(__a, __b)
662 #define vcvtq_n_f16_s16(__a,  __imm6) __arm_vcvtq_n_f16_s16(__a,  __imm6)
663 #define vcvtq_n_f32_s32(__a,  __imm6) __arm_vcvtq_n_f32_s32(__a,  __imm6)
664 #define vcvtq_n_f16_u16(__a,  __imm6) __arm_vcvtq_n_f16_u16(__a,  __imm6)
665 #define vcvtq_n_f32_u32(__a,  __imm6) __arm_vcvtq_n_f32_u32(__a,  __imm6)
666 #define vcreateq_f16(__a, __b) __arm_vcreateq_f16(__a, __b)
667 #define vcreateq_f32(__a, __b) __arm_vcreateq_f32(__a, __b)
668 #define vcvtq_n_s16_f16(__a,  __imm6) __arm_vcvtq_n_s16_f16(__a,  __imm6)
669 #define vcvtq_n_s32_f32(__a,  __imm6) __arm_vcvtq_n_s32_f32(__a,  __imm6)
670 #define vcvtq_n_u16_f16(__a,  __imm6) __arm_vcvtq_n_u16_f16(__a,  __imm6)
671 #define vcvtq_n_u32_f32(__a,  __imm6) __arm_vcvtq_n_u32_f32(__a,  __imm6)
672 #define vcreateq_u8(__a, __b) __arm_vcreateq_u8(__a, __b)
673 #define vcreateq_u16(__a, __b) __arm_vcreateq_u16(__a, __b)
674 #define vcreateq_u32(__a, __b) __arm_vcreateq_u32(__a, __b)
675 #define vcreateq_u64(__a, __b) __arm_vcreateq_u64(__a, __b)
676 #define vcreateq_s8(__a, __b) __arm_vcreateq_s8(__a, __b)
677 #define vcreateq_s16(__a, __b) __arm_vcreateq_s16(__a, __b)
678 #define vcreateq_s32(__a, __b) __arm_vcreateq_s32(__a, __b)
679 #define vcreateq_s64(__a, __b) __arm_vcreateq_s64(__a, __b)
680 #define vshrq_n_s8(__a,  __imm) __arm_vshrq_n_s8(__a,  __imm)
681 #define vshrq_n_s16(__a,  __imm) __arm_vshrq_n_s16(__a,  __imm)
682 #define vshrq_n_s32(__a,  __imm) __arm_vshrq_n_s32(__a,  __imm)
683 #define vshrq_n_u8(__a,  __imm) __arm_vshrq_n_u8(__a,  __imm)
684 #define vshrq_n_u16(__a,  __imm) __arm_vshrq_n_u16(__a,  __imm)
685 #define vshrq_n_u32(__a,  __imm) __arm_vshrq_n_u32(__a,  __imm)
686 #define vaddlvq_p_s32(__a, __p) __arm_vaddlvq_p_s32(__a, __p)
687 #define vaddlvq_p_u32(__a, __p) __arm_vaddlvq_p_u32(__a, __p)
688 #define vcmpneq_s8(__a, __b) __arm_vcmpneq_s8(__a, __b)
689 #define vcmpneq_s16(__a, __b) __arm_vcmpneq_s16(__a, __b)
690 #define vcmpneq_s32(__a, __b) __arm_vcmpneq_s32(__a, __b)
691 #define vcmpneq_u8(__a, __b) __arm_vcmpneq_u8(__a, __b)
692 #define vcmpneq_u16(__a, __b) __arm_vcmpneq_u16(__a, __b)
693 #define vcmpneq_u32(__a, __b) __arm_vcmpneq_u32(__a, __b)
694 #define vshlq_s8(__a, __b) __arm_vshlq_s8(__a, __b)
695 #define vshlq_s16(__a, __b) __arm_vshlq_s16(__a, __b)
696 #define vshlq_s32(__a, __b) __arm_vshlq_s32(__a, __b)
697 #define vshlq_u8(__a, __b) __arm_vshlq_u8(__a, __b)
698 #define vshlq_u16(__a, __b) __arm_vshlq_u16(__a, __b)
699 #define vshlq_u32(__a, __b) __arm_vshlq_u32(__a, __b)
700 #define vsubq_u8(__a, __b) __arm_vsubq_u8(__a, __b)
701 #define vsubq_n_u8(__a, __b) __arm_vsubq_n_u8(__a, __b)
702 #define vrmulhq_u8(__a, __b) __arm_vrmulhq_u8(__a, __b)
703 #define vrhaddq_u8(__a, __b) __arm_vrhaddq_u8(__a, __b)
704 #define vqsubq_u8(__a, __b) __arm_vqsubq_u8(__a, __b)
705 #define vqsubq_n_u8(__a, __b) __arm_vqsubq_n_u8(__a, __b)
706 #define vqaddq_u8(__a, __b) __arm_vqaddq_u8(__a, __b)
707 #define vqaddq_n_u8(__a, __b) __arm_vqaddq_n_u8(__a, __b)
708 #define vorrq_u8(__a, __b) __arm_vorrq_u8(__a, __b)
709 #define vornq_u8(__a, __b) __arm_vornq_u8(__a, __b)
710 #define vmulq_u8(__a, __b) __arm_vmulq_u8(__a, __b)
711 #define vmulq_n_u8(__a, __b) __arm_vmulq_n_u8(__a, __b)
712 #define vmulltq_int_u8(__a, __b) __arm_vmulltq_int_u8(__a, __b)
713 #define vmullbq_int_u8(__a, __b) __arm_vmullbq_int_u8(__a, __b)
714 #define vmulhq_u8(__a, __b) __arm_vmulhq_u8(__a, __b)
715 #define vmladavq_u8(__a, __b) __arm_vmladavq_u8(__a, __b)
716 #define vminvq_u8(__a, __b) __arm_vminvq_u8(__a, __b)
717 #define vminq_u8(__a, __b) __arm_vminq_u8(__a, __b)
718 #define vmaxvq_u8(__a, __b) __arm_vmaxvq_u8(__a, __b)
719 #define vmaxq_u8(__a, __b) __arm_vmaxq_u8(__a, __b)
720 #define vhsubq_u8(__a, __b) __arm_vhsubq_u8(__a, __b)
721 #define vhsubq_n_u8(__a, __b) __arm_vhsubq_n_u8(__a, __b)
722 #define vhaddq_u8(__a, __b) __arm_vhaddq_u8(__a, __b)
723 #define vhaddq_n_u8(__a, __b) __arm_vhaddq_n_u8(__a, __b)
724 #define veorq_u8(__a, __b) __arm_veorq_u8(__a, __b)
725 #define vcmpneq_n_u8(__a, __b) __arm_vcmpneq_n_u8(__a, __b)
726 #define vcmphiq_u8(__a, __b) __arm_vcmphiq_u8(__a, __b)
727 #define vcmphiq_n_u8(__a, __b) __arm_vcmphiq_n_u8(__a, __b)
728 #define vcmpeqq_u8(__a, __b) __arm_vcmpeqq_u8(__a, __b)
729 #define vcmpeqq_n_u8(__a, __b) __arm_vcmpeqq_n_u8(__a, __b)
730 #define vcmpcsq_u8(__a, __b) __arm_vcmpcsq_u8(__a, __b)
731 #define vcmpcsq_n_u8(__a, __b) __arm_vcmpcsq_n_u8(__a, __b)
732 #define vcaddq_rot90_u8(__a, __b) __arm_vcaddq_rot90_u8(__a, __b)
733 #define vcaddq_rot270_u8(__a, __b) __arm_vcaddq_rot270_u8(__a, __b)
734 #define vbicq_u8(__a, __b) __arm_vbicq_u8(__a, __b)
735 #define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b)
736 #define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p)
737 #define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b)
738 #define vaddq_n_u8(__a, __b) __arm_vaddq_n_u8(__a, __b)
739 #define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b)
740 #define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b)
741 #define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b)
742 #define vrshlq_n_u8(__a, __b) __arm_vrshlq_n_u8(__a, __b)
743 #define vqshlq_u8(__a, __b) __arm_vqshlq_u8(__a, __b)
744 #define vqshlq_r_u8(__a, __b) __arm_vqshlq_r_u8(__a, __b)
745 #define vqrshlq_u8(__a, __b) __arm_vqrshlq_u8(__a, __b)
746 #define vqrshlq_n_u8(__a, __b) __arm_vqrshlq_n_u8(__a, __b)
747 #define vminavq_s8(__a, __b) __arm_vminavq_s8(__a, __b)
748 #define vminaq_s8(__a, __b) __arm_vminaq_s8(__a, __b)
749 #define vmaxavq_s8(__a, __b) __arm_vmaxavq_s8(__a, __b)
750 #define vmaxaq_s8(__a, __b) __arm_vmaxaq_s8(__a, __b)
751 #define vbrsrq_n_u8(__a, __b) __arm_vbrsrq_n_u8(__a, __b)
752 #define vshlq_n_u8(__a,  __imm) __arm_vshlq_n_u8(__a,  __imm)
753 #define vrshrq_n_u8(__a,  __imm) __arm_vrshrq_n_u8(__a,  __imm)
754 #define vqshlq_n_u8(__a,  __imm) __arm_vqshlq_n_u8(__a,  __imm)
755 #define vcmpneq_n_s8(__a, __b) __arm_vcmpneq_n_s8(__a, __b)
756 #define vcmpltq_s8(__a, __b) __arm_vcmpltq_s8(__a, __b)
757 #define vcmpltq_n_s8(__a, __b) __arm_vcmpltq_n_s8(__a, __b)
758 #define vcmpleq_s8(__a, __b) __arm_vcmpleq_s8(__a, __b)
759 #define vcmpleq_n_s8(__a, __b) __arm_vcmpleq_n_s8(__a, __b)
760 #define vcmpgtq_s8(__a, __b) __arm_vcmpgtq_s8(__a, __b)
761 #define vcmpgtq_n_s8(__a, __b) __arm_vcmpgtq_n_s8(__a, __b)
762 #define vcmpgeq_s8(__a, __b) __arm_vcmpgeq_s8(__a, __b)
763 #define vcmpgeq_n_s8(__a, __b) __arm_vcmpgeq_n_s8(__a, __b)
764 #define vcmpeqq_s8(__a, __b) __arm_vcmpeqq_s8(__a, __b)
765 #define vcmpeqq_n_s8(__a, __b) __arm_vcmpeqq_n_s8(__a, __b)
766 #define vqshluq_n_s8(__a,  __imm) __arm_vqshluq_n_s8(__a,  __imm)
767 #define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p)
768 #define vsubq_s8(__a, __b) __arm_vsubq_s8(__a, __b)
769 #define vsubq_n_s8(__a, __b) __arm_vsubq_n_s8(__a, __b)
770 #define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b)
771 #define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b)
772 #define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b)
773 #define vrmulhq_s8(__a, __b) __arm_vrmulhq_s8(__a, __b)
774 #define vrhaddq_s8(__a, __b) __arm_vrhaddq_s8(__a, __b)
775 #define vqsubq_s8(__a, __b) __arm_vqsubq_s8(__a, __b)
776 #define vqsubq_n_s8(__a, __b) __arm_vqsubq_n_s8(__a, __b)
777 #define vqshlq_s8(__a, __b) __arm_vqshlq_s8(__a, __b)
778 #define vqshlq_r_s8(__a, __b) __arm_vqshlq_r_s8(__a, __b)
779 #define vqrshlq_s8(__a, __b) __arm_vqrshlq_s8(__a, __b)
780 #define vqrshlq_n_s8(__a, __b) __arm_vqrshlq_n_s8(__a, __b)
781 #define vqrdmulhq_s8(__a, __b) __arm_vqrdmulhq_s8(__a, __b)
782 #define vqrdmulhq_n_s8(__a, __b) __arm_vqrdmulhq_n_s8(__a, __b)
783 #define vqdmulhq_s8(__a, __b) __arm_vqdmulhq_s8(__a, __b)
784 #define vqdmulhq_n_s8(__a, __b) __arm_vqdmulhq_n_s8(__a, __b)
785 #define vqaddq_s8(__a, __b) __arm_vqaddq_s8(__a, __b)
786 #define vqaddq_n_s8(__a, __b) __arm_vqaddq_n_s8(__a, __b)
787 #define vorrq_s8(__a, __b) __arm_vorrq_s8(__a, __b)
788 #define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b)
789 #define vmulq_s8(__a, __b) __arm_vmulq_s8(__a, __b)
790 #define vmulq_n_s8(__a, __b) __arm_vmulq_n_s8(__a, __b)
791 #define vmulltq_int_s8(__a, __b) __arm_vmulltq_int_s8(__a, __b)
792 #define vmullbq_int_s8(__a, __b) __arm_vmullbq_int_s8(__a, __b)
793 #define vmulhq_s8(__a, __b) __arm_vmulhq_s8(__a, __b)
794 #define vmlsdavxq_s8(__a, __b) __arm_vmlsdavxq_s8(__a, __b)
795 #define vmlsdavq_s8(__a, __b) __arm_vmlsdavq_s8(__a, __b)
796 #define vmladavxq_s8(__a, __b) __arm_vmladavxq_s8(__a, __b)
797 #define vmladavq_s8(__a, __b) __arm_vmladavq_s8(__a, __b)
798 #define vminvq_s8(__a, __b) __arm_vminvq_s8(__a, __b)
799 #define vminq_s8(__a, __b) __arm_vminq_s8(__a, __b)
800 #define vmaxvq_s8(__a, __b) __arm_vmaxvq_s8(__a, __b)
801 #define vmaxq_s8(__a, __b) __arm_vmaxq_s8(__a, __b)
802 #define vhsubq_s8(__a, __b) __arm_vhsubq_s8(__a, __b)
803 #define vhsubq_n_s8(__a, __b) __arm_vhsubq_n_s8(__a, __b)
804 #define vhcaddq_rot90_s8(__a, __b) __arm_vhcaddq_rot90_s8(__a, __b)
805 #define vhcaddq_rot270_s8(__a, __b) __arm_vhcaddq_rot270_s8(__a, __b)
806 #define vhaddq_s8(__a, __b) __arm_vhaddq_s8(__a, __b)
807 #define vhaddq_n_s8(__a, __b) __arm_vhaddq_n_s8(__a, __b)
808 #define veorq_s8(__a, __b) __arm_veorq_s8(__a, __b)
809 #define vcaddq_rot90_s8(__a, __b) __arm_vcaddq_rot90_s8(__a, __b)
810 #define vcaddq_rot270_s8(__a, __b) __arm_vcaddq_rot270_s8(__a, __b)
811 #define vbrsrq_n_s8(__a, __b) __arm_vbrsrq_n_s8(__a, __b)
812 #define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b)
813 #define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b)
814 #define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b)
815 #define vaddq_n_s8(__a, __b) __arm_vaddq_n_s8(__a, __b)
816 #define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b)
817 #define vshlq_n_s8(__a,  __imm) __arm_vshlq_n_s8(__a,  __imm)
818 #define vrshrq_n_s8(__a,  __imm) __arm_vrshrq_n_s8(__a,  __imm)
819 #define vqshlq_n_s8(__a,  __imm) __arm_vqshlq_n_s8(__a,  __imm)
820 #define vsubq_u16(__a, __b) __arm_vsubq_u16(__a, __b)
821 #define vsubq_n_u16(__a, __b) __arm_vsubq_n_u16(__a, __b)
822 #define vrmulhq_u16(__a, __b) __arm_vrmulhq_u16(__a, __b)
823 #define vrhaddq_u16(__a, __b) __arm_vrhaddq_u16(__a, __b)
824 #define vqsubq_u16(__a, __b) __arm_vqsubq_u16(__a, __b)
825 #define vqsubq_n_u16(__a, __b) __arm_vqsubq_n_u16(__a, __b)
826 #define vqaddq_u16(__a, __b) __arm_vqaddq_u16(__a, __b)
827 #define vqaddq_n_u16(__a, __b) __arm_vqaddq_n_u16(__a, __b)
828 #define vorrq_u16(__a, __b) __arm_vorrq_u16(__a, __b)
829 #define vornq_u16(__a, __b) __arm_vornq_u16(__a, __b)
830 #define vmulq_u16(__a, __b) __arm_vmulq_u16(__a, __b)
831 #define vmulq_n_u16(__a, __b) __arm_vmulq_n_u16(__a, __b)
832 #define vmulltq_int_u16(__a, __b) __arm_vmulltq_int_u16(__a, __b)
833 #define vmullbq_int_u16(__a, __b) __arm_vmullbq_int_u16(__a, __b)
834 #define vmulhq_u16(__a, __b) __arm_vmulhq_u16(__a, __b)
835 #define vmladavq_u16(__a, __b) __arm_vmladavq_u16(__a, __b)
836 #define vminvq_u16(__a, __b) __arm_vminvq_u16(__a, __b)
837 #define vminq_u16(__a, __b) __arm_vminq_u16(__a, __b)
838 #define vmaxvq_u16(__a, __b) __arm_vmaxvq_u16(__a, __b)
839 #define vmaxq_u16(__a, __b) __arm_vmaxq_u16(__a, __b)
840 #define vhsubq_u16(__a, __b) __arm_vhsubq_u16(__a, __b)
841 #define vhsubq_n_u16(__a, __b) __arm_vhsubq_n_u16(__a, __b)
842 #define vhaddq_u16(__a, __b) __arm_vhaddq_u16(__a, __b)
843 #define vhaddq_n_u16(__a, __b) __arm_vhaddq_n_u16(__a, __b)
844 #define veorq_u16(__a, __b) __arm_veorq_u16(__a, __b)
845 #define vcmpneq_n_u16(__a, __b) __arm_vcmpneq_n_u16(__a, __b)
846 #define vcmphiq_u16(__a, __b) __arm_vcmphiq_u16(__a, __b)
847 #define vcmphiq_n_u16(__a, __b) __arm_vcmphiq_n_u16(__a, __b)
848 #define vcmpeqq_u16(__a, __b) __arm_vcmpeqq_u16(__a, __b)
849 #define vcmpeqq_n_u16(__a, __b) __arm_vcmpeqq_n_u16(__a, __b)
850 #define vcmpcsq_u16(__a, __b) __arm_vcmpcsq_u16(__a, __b)
851 #define vcmpcsq_n_u16(__a, __b) __arm_vcmpcsq_n_u16(__a, __b)
852 #define vcaddq_rot90_u16(__a, __b) __arm_vcaddq_rot90_u16(__a, __b)
853 #define vcaddq_rot270_u16(__a, __b) __arm_vcaddq_rot270_u16(__a, __b)
854 #define vbicq_u16(__a, __b) __arm_vbicq_u16(__a, __b)
855 #define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b)
856 #define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p)
857 #define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b)
858 #define vaddq_n_u16(__a, __b) __arm_vaddq_n_u16(__a, __b)
859 #define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b)
860 #define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b)
861 #define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b)
862 #define vrshlq_n_u16(__a, __b) __arm_vrshlq_n_u16(__a, __b)
863 #define vqshlq_u16(__a, __b) __arm_vqshlq_u16(__a, __b)
864 #define vqshlq_r_u16(__a, __b) __arm_vqshlq_r_u16(__a, __b)
865 #define vqrshlq_u16(__a, __b) __arm_vqrshlq_u16(__a, __b)
866 #define vqrshlq_n_u16(__a, __b) __arm_vqrshlq_n_u16(__a, __b)
867 #define vminavq_s16(__a, __b) __arm_vminavq_s16(__a, __b)
868 #define vminaq_s16(__a, __b) __arm_vminaq_s16(__a, __b)
869 #define vmaxavq_s16(__a, __b) __arm_vmaxavq_s16(__a, __b)
870 #define vmaxaq_s16(__a, __b) __arm_vmaxaq_s16(__a, __b)
871 #define vbrsrq_n_u16(__a, __b) __arm_vbrsrq_n_u16(__a, __b)
872 #define vshlq_n_u16(__a,  __imm) __arm_vshlq_n_u16(__a,  __imm)
873 #define vrshrq_n_u16(__a,  __imm) __arm_vrshrq_n_u16(__a,  __imm)
874 #define vqshlq_n_u16(__a,  __imm) __arm_vqshlq_n_u16(__a,  __imm)
875 #define vcmpneq_n_s16(__a, __b) __arm_vcmpneq_n_s16(__a, __b)
876 #define vcmpltq_s16(__a, __b) __arm_vcmpltq_s16(__a, __b)
877 #define vcmpltq_n_s16(__a, __b) __arm_vcmpltq_n_s16(__a, __b)
878 #define vcmpleq_s16(__a, __b) __arm_vcmpleq_s16(__a, __b)
879 #define vcmpleq_n_s16(__a, __b) __arm_vcmpleq_n_s16(__a, __b)
880 #define vcmpgtq_s16(__a, __b) __arm_vcmpgtq_s16(__a, __b)
881 #define vcmpgtq_n_s16(__a, __b) __arm_vcmpgtq_n_s16(__a, __b)
882 #define vcmpgeq_s16(__a, __b) __arm_vcmpgeq_s16(__a, __b)
883 #define vcmpgeq_n_s16(__a, __b) __arm_vcmpgeq_n_s16(__a, __b)
884 #define vcmpeqq_s16(__a, __b) __arm_vcmpeqq_s16(__a, __b)
885 #define vcmpeqq_n_s16(__a, __b) __arm_vcmpeqq_n_s16(__a, __b)
886 #define vqshluq_n_s16(__a,  __imm) __arm_vqshluq_n_s16(__a,  __imm)
887 #define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p)
888 #define vsubq_s16(__a, __b) __arm_vsubq_s16(__a, __b)
889 #define vsubq_n_s16(__a, __b) __arm_vsubq_n_s16(__a, __b)
890 #define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b)
891 #define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b)
892 #define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b)
893 #define vrmulhq_s16(__a, __b) __arm_vrmulhq_s16(__a, __b)
894 #define vrhaddq_s16(__a, __b) __arm_vrhaddq_s16(__a, __b)
895 #define vqsubq_s16(__a, __b) __arm_vqsubq_s16(__a, __b)
896 #define vqsubq_n_s16(__a, __b) __arm_vqsubq_n_s16(__a, __b)
897 #define vqshlq_s16(__a, __b) __arm_vqshlq_s16(__a, __b)
898 #define vqshlq_r_s16(__a, __b) __arm_vqshlq_r_s16(__a, __b)
899 #define vqrshlq_s16(__a, __b) __arm_vqrshlq_s16(__a, __b)
900 #define vqrshlq_n_s16(__a, __b) __arm_vqrshlq_n_s16(__a, __b)
901 #define vqrdmulhq_s16(__a, __b) __arm_vqrdmulhq_s16(__a, __b)
902 #define vqrdmulhq_n_s16(__a, __b) __arm_vqrdmulhq_n_s16(__a, __b)
903 #define vqdmulhq_s16(__a, __b) __arm_vqdmulhq_s16(__a, __b)
904 #define vqdmulhq_n_s16(__a, __b) __arm_vqdmulhq_n_s16(__a, __b)
905 #define vqaddq_s16(__a, __b) __arm_vqaddq_s16(__a, __b)
906 #define vqaddq_n_s16(__a, __b) __arm_vqaddq_n_s16(__a, __b)
907 #define vorrq_s16(__a, __b) __arm_vorrq_s16(__a, __b)
908 #define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b)
909 #define vmulq_s16(__a, __b) __arm_vmulq_s16(__a, __b)
910 #define vmulq_n_s16(__a, __b) __arm_vmulq_n_s16(__a, __b)
911 #define vmulltq_int_s16(__a, __b) __arm_vmulltq_int_s16(__a, __b)
912 #define vmullbq_int_s16(__a, __b) __arm_vmullbq_int_s16(__a, __b)
913 #define vmulhq_s16(__a, __b) __arm_vmulhq_s16(__a, __b)
914 #define vmlsdavxq_s16(__a, __b) __arm_vmlsdavxq_s16(__a, __b)
915 #define vmlsdavq_s16(__a, __b) __arm_vmlsdavq_s16(__a, __b)
916 #define vmladavxq_s16(__a, __b) __arm_vmladavxq_s16(__a, __b)
917 #define vmladavq_s16(__a, __b) __arm_vmladavq_s16(__a, __b)
918 #define vminvq_s16(__a, __b) __arm_vminvq_s16(__a, __b)
919 #define vminq_s16(__a, __b) __arm_vminq_s16(__a, __b)
920 #define vmaxvq_s16(__a, __b) __arm_vmaxvq_s16(__a, __b)
921 #define vmaxq_s16(__a, __b) __arm_vmaxq_s16(__a, __b)
922 #define vhsubq_s16(__a, __b) __arm_vhsubq_s16(__a, __b)
923 #define vhsubq_n_s16(__a, __b) __arm_vhsubq_n_s16(__a, __b)
924 #define vhcaddq_rot90_s16(__a, __b) __arm_vhcaddq_rot90_s16(__a, __b)
925 #define vhcaddq_rot270_s16(__a, __b) __arm_vhcaddq_rot270_s16(__a, __b)
926 #define vhaddq_s16(__a, __b) __arm_vhaddq_s16(__a, __b)
927 #define vhaddq_n_s16(__a, __b) __arm_vhaddq_n_s16(__a, __b)
928 #define veorq_s16(__a, __b) __arm_veorq_s16(__a, __b)
929 #define vcaddq_rot90_s16(__a, __b) __arm_vcaddq_rot90_s16(__a, __b)
930 #define vcaddq_rot270_s16(__a, __b) __arm_vcaddq_rot270_s16(__a, __b)
931 #define vbrsrq_n_s16(__a, __b) __arm_vbrsrq_n_s16(__a, __b)
932 #define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b)
933 #define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b)
934 #define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b)
935 #define vaddq_n_s16(__a, __b) __arm_vaddq_n_s16(__a, __b)
936 #define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b)
937 #define vshlq_n_s16(__a,  __imm) __arm_vshlq_n_s16(__a,  __imm)
938 #define vrshrq_n_s16(__a,  __imm) __arm_vrshrq_n_s16(__a,  __imm)
939 #define vqshlq_n_s16(__a,  __imm) __arm_vqshlq_n_s16(__a,  __imm)
940 #define vsubq_u32(__a, __b) __arm_vsubq_u32(__a, __b)
941 #define vsubq_n_u32(__a, __b) __arm_vsubq_n_u32(__a, __b)
942 #define vrmulhq_u32(__a, __b) __arm_vrmulhq_u32(__a, __b)
943 #define vrhaddq_u32(__a, __b) __arm_vrhaddq_u32(__a, __b)
944 #define vqsubq_u32(__a, __b) __arm_vqsubq_u32(__a, __b)
945 #define vqsubq_n_u32(__a, __b) __arm_vqsubq_n_u32(__a, __b)
946 #define vqaddq_u32(__a, __b) __arm_vqaddq_u32(__a, __b)
947 #define vqaddq_n_u32(__a, __b) __arm_vqaddq_n_u32(__a, __b)
948 #define vorrq_u32(__a, __b) __arm_vorrq_u32(__a, __b)
949 #define vornq_u32(__a, __b) __arm_vornq_u32(__a, __b)
950 #define vmulq_u32(__a, __b) __arm_vmulq_u32(__a, __b)
951 #define vmulq_n_u32(__a, __b) __arm_vmulq_n_u32(__a, __b)
952 #define vmulltq_int_u32(__a, __b) __arm_vmulltq_int_u32(__a, __b)
953 #define vmullbq_int_u32(__a, __b) __arm_vmullbq_int_u32(__a, __b)
954 #define vmulhq_u32(__a, __b) __arm_vmulhq_u32(__a, __b)
955 #define vmladavq_u32(__a, __b) __arm_vmladavq_u32(__a, __b)
956 #define vminvq_u32(__a, __b) __arm_vminvq_u32(__a, __b)
957 #define vminq_u32(__a, __b) __arm_vminq_u32(__a, __b)
958 #define vmaxvq_u32(__a, __b) __arm_vmaxvq_u32(__a, __b)
959 #define vmaxq_u32(__a, __b) __arm_vmaxq_u32(__a, __b)
960 #define vhsubq_u32(__a, __b) __arm_vhsubq_u32(__a, __b)
961 #define vhsubq_n_u32(__a, __b) __arm_vhsubq_n_u32(__a, __b)
962 #define vhaddq_u32(__a, __b) __arm_vhaddq_u32(__a, __b)
963 #define vhaddq_n_u32(__a, __b) __arm_vhaddq_n_u32(__a, __b)
964 #define veorq_u32(__a, __b) __arm_veorq_u32(__a, __b)
965 #define vcmpneq_n_u32(__a, __b) __arm_vcmpneq_n_u32(__a, __b)
966 #define vcmphiq_u32(__a, __b) __arm_vcmphiq_u32(__a, __b)
967 #define vcmphiq_n_u32(__a, __b) __arm_vcmphiq_n_u32(__a, __b)
968 #define vcmpeqq_u32(__a, __b) __arm_vcmpeqq_u32(__a, __b)
969 #define vcmpeqq_n_u32(__a, __b) __arm_vcmpeqq_n_u32(__a, __b)
970 #define vcmpcsq_u32(__a, __b) __arm_vcmpcsq_u32(__a, __b)
971 #define vcmpcsq_n_u32(__a, __b) __arm_vcmpcsq_n_u32(__a, __b)
972 #define vcaddq_rot90_u32(__a, __b) __arm_vcaddq_rot90_u32(__a, __b)
973 #define vcaddq_rot270_u32(__a, __b) __arm_vcaddq_rot270_u32(__a, __b)
974 #define vbicq_u32(__a, __b) __arm_vbicq_u32(__a, __b)
975 #define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b)
976 #define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p)
977 #define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b)
978 #define vaddq_n_u32(__a, __b) __arm_vaddq_n_u32(__a, __b)
979 #define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b)
980 #define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b)
981 #define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b)
982 #define vrshlq_n_u32(__a, __b) __arm_vrshlq_n_u32(__a, __b)
983 #define vqshlq_u32(__a, __b) __arm_vqshlq_u32(__a, __b)
984 #define vqshlq_r_u32(__a, __b) __arm_vqshlq_r_u32(__a, __b)
985 #define vqrshlq_u32(__a, __b) __arm_vqrshlq_u32(__a, __b)
986 #define vqrshlq_n_u32(__a, __b) __arm_vqrshlq_n_u32(__a, __b)
987 #define vminavq_s32(__a, __b) __arm_vminavq_s32(__a, __b)
988 #define vminaq_s32(__a, __b) __arm_vminaq_s32(__a, __b)
989 #define vmaxavq_s32(__a, __b) __arm_vmaxavq_s32(__a, __b)
990 #define vmaxaq_s32(__a, __b) __arm_vmaxaq_s32(__a, __b)
991 #define vbrsrq_n_u32(__a, __b) __arm_vbrsrq_n_u32(__a, __b)
992 #define vshlq_n_u32(__a,  __imm) __arm_vshlq_n_u32(__a,  __imm)
993 #define vrshrq_n_u32(__a,  __imm) __arm_vrshrq_n_u32(__a,  __imm)
994 #define vqshlq_n_u32(__a,  __imm) __arm_vqshlq_n_u32(__a,  __imm)
995 #define vcmpneq_n_s32(__a, __b) __arm_vcmpneq_n_s32(__a, __b)
996 #define vcmpltq_s32(__a, __b) __arm_vcmpltq_s32(__a, __b)
997 #define vcmpltq_n_s32(__a, __b) __arm_vcmpltq_n_s32(__a, __b)
998 #define vcmpleq_s32(__a, __b) __arm_vcmpleq_s32(__a, __b)
999 #define vcmpleq_n_s32(__a, __b) __arm_vcmpleq_n_s32(__a, __b)
1000 #define vcmpgtq_s32(__a, __b) __arm_vcmpgtq_s32(__a, __b)
1001 #define vcmpgtq_n_s32(__a, __b) __arm_vcmpgtq_n_s32(__a, __b)
1002 #define vcmpgeq_s32(__a, __b) __arm_vcmpgeq_s32(__a, __b)
1003 #define vcmpgeq_n_s32(__a, __b) __arm_vcmpgeq_n_s32(__a, __b)
1004 #define vcmpeqq_s32(__a, __b) __arm_vcmpeqq_s32(__a, __b)
1005 #define vcmpeqq_n_s32(__a, __b) __arm_vcmpeqq_n_s32(__a, __b)
1006 #define vqshluq_n_s32(__a,  __imm) __arm_vqshluq_n_s32(__a,  __imm)
1007 #define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p)
1008 #define vsubq_s32(__a, __b) __arm_vsubq_s32(__a, __b)
1009 #define vsubq_n_s32(__a, __b) __arm_vsubq_n_s32(__a, __b)
1010 #define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b)
1011 #define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b)
1012 #define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b)
1013 #define vrmulhq_s32(__a, __b) __arm_vrmulhq_s32(__a, __b)
1014 #define vrhaddq_s32(__a, __b) __arm_vrhaddq_s32(__a, __b)
1015 #define vqsubq_s32(__a, __b) __arm_vqsubq_s32(__a, __b)
1016 #define vqsubq_n_s32(__a, __b) __arm_vqsubq_n_s32(__a, __b)
1017 #define vqshlq_s32(__a, __b) __arm_vqshlq_s32(__a, __b)
1018 #define vqshlq_r_s32(__a, __b) __arm_vqshlq_r_s32(__a, __b)
1019 #define vqrshlq_s32(__a, __b) __arm_vqrshlq_s32(__a, __b)
1020 #define vqrshlq_n_s32(__a, __b) __arm_vqrshlq_n_s32(__a, __b)
1021 #define vqrdmulhq_s32(__a, __b) __arm_vqrdmulhq_s32(__a, __b)
1022 #define vqrdmulhq_n_s32(__a, __b) __arm_vqrdmulhq_n_s32(__a, __b)
1023 #define vqdmulhq_s32(__a, __b) __arm_vqdmulhq_s32(__a, __b)
1024 #define vqdmulhq_n_s32(__a, __b) __arm_vqdmulhq_n_s32(__a, __b)
1025 #define vqaddq_s32(__a, __b) __arm_vqaddq_s32(__a, __b)
1026 #define vqaddq_n_s32(__a, __b) __arm_vqaddq_n_s32(__a, __b)
1027 #define vorrq_s32(__a, __b) __arm_vorrq_s32(__a, __b)
1028 #define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b)
1029 #define vmulq_s32(__a, __b) __arm_vmulq_s32(__a, __b)
1030 #define vmulq_n_s32(__a, __b) __arm_vmulq_n_s32(__a, __b)
1031 #define vmulltq_int_s32(__a, __b) __arm_vmulltq_int_s32(__a, __b)
1032 #define vmullbq_int_s32(__a, __b) __arm_vmullbq_int_s32(__a, __b)
1033 #define vmulhq_s32(__a, __b) __arm_vmulhq_s32(__a, __b)
1034 #define vmlsdavxq_s32(__a, __b) __arm_vmlsdavxq_s32(__a, __b)
1035 #define vmlsdavq_s32(__a, __b) __arm_vmlsdavq_s32(__a, __b)
1036 #define vmladavxq_s32(__a, __b) __arm_vmladavxq_s32(__a, __b)
1037 #define vmladavq_s32(__a, __b) __arm_vmladavq_s32(__a, __b)
1038 #define vminvq_s32(__a, __b) __arm_vminvq_s32(__a, __b)
1039 #define vminq_s32(__a, __b) __arm_vminq_s32(__a, __b)
1040 #define vmaxvq_s32(__a, __b) __arm_vmaxvq_s32(__a, __b)
1041 #define vmaxq_s32(__a, __b) __arm_vmaxq_s32(__a, __b)
1042 #define vhsubq_s32(__a, __b) __arm_vhsubq_s32(__a, __b)
1043 #define vhsubq_n_s32(__a, __b) __arm_vhsubq_n_s32(__a, __b)
1044 #define vhcaddq_rot90_s32(__a, __b) __arm_vhcaddq_rot90_s32(__a, __b)
1045 #define vhcaddq_rot270_s32(__a, __b) __arm_vhcaddq_rot270_s32(__a, __b)
1046 #define vhaddq_s32(__a, __b) __arm_vhaddq_s32(__a, __b)
1047 #define vhaddq_n_s32(__a, __b) __arm_vhaddq_n_s32(__a, __b)
1048 #define veorq_s32(__a, __b) __arm_veorq_s32(__a, __b)
1049 #define vcaddq_rot90_s32(__a, __b) __arm_vcaddq_rot90_s32(__a, __b)
1050 #define vcaddq_rot270_s32(__a, __b) __arm_vcaddq_rot270_s32(__a, __b)
1051 #define vbrsrq_n_s32(__a, __b) __arm_vbrsrq_n_s32(__a, __b)
1052 #define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b)
1053 #define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b)
1054 #define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b)
1055 #define vaddq_n_s32(__a, __b) __arm_vaddq_n_s32(__a, __b)
1056 #define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b)
1057 #define vshlq_n_s32(__a,  __imm) __arm_vshlq_n_s32(__a,  __imm)
1058 #define vrshrq_n_s32(__a,  __imm) __arm_vrshrq_n_s32(__a,  __imm)
1059 #define vqshlq_n_s32(__a,  __imm) __arm_vqshlq_n_s32(__a,  __imm)
1060 #define vqmovntq_u16(__a, __b) __arm_vqmovntq_u16(__a, __b)
1061 #define vqmovnbq_u16(__a, __b) __arm_vqmovnbq_u16(__a, __b)
1062 #define vmulltq_poly_p8(__a, __b) __arm_vmulltq_poly_p8(__a, __b)
1063 #define vmullbq_poly_p8(__a, __b) __arm_vmullbq_poly_p8(__a, __b)
1064 #define vmovntq_u16(__a, __b) __arm_vmovntq_u16(__a, __b)
1065 #define vmovnbq_u16(__a, __b) __arm_vmovnbq_u16(__a, __b)
1066 #define vmlaldavq_u16(__a, __b) __arm_vmlaldavq_u16(__a, __b)
1067 #define vqmovuntq_s16(__a, __b) __arm_vqmovuntq_s16(__a, __b)
1068 #define vqmovunbq_s16(__a, __b) __arm_vqmovunbq_s16(__a, __b)
1069 #define vshlltq_n_u8(__a,  __imm) __arm_vshlltq_n_u8(__a,  __imm)
1070 #define vshllbq_n_u8(__a,  __imm) __arm_vshllbq_n_u8(__a,  __imm)
1071 #define vorrq_n_u16(__a,  __imm) __arm_vorrq_n_u16(__a,  __imm)
1072 #define vbicq_n_u16(__a,  __imm) __arm_vbicq_n_u16(__a,  __imm)
1073 #define vcmpneq_n_f16(__a, __b) __arm_vcmpneq_n_f16(__a, __b)
1074 #define vcmpneq_f16(__a, __b) __arm_vcmpneq_f16(__a, __b)
1075 #define vcmpltq_n_f16(__a, __b) __arm_vcmpltq_n_f16(__a, __b)
1076 #define vcmpltq_f16(__a, __b) __arm_vcmpltq_f16(__a, __b)
1077 #define vcmpleq_n_f16(__a, __b) __arm_vcmpleq_n_f16(__a, __b)
1078 #define vcmpleq_f16(__a, __b) __arm_vcmpleq_f16(__a, __b)
1079 #define vcmpgtq_n_f16(__a, __b) __arm_vcmpgtq_n_f16(__a, __b)
1080 #define vcmpgtq_f16(__a, __b) __arm_vcmpgtq_f16(__a, __b)
1081 #define vcmpgeq_n_f16(__a, __b) __arm_vcmpgeq_n_f16(__a, __b)
1082 #define vcmpgeq_f16(__a, __b) __arm_vcmpgeq_f16(__a, __b)
1083 #define vcmpeqq_n_f16(__a, __b) __arm_vcmpeqq_n_f16(__a, __b)
1084 #define vcmpeqq_f16(__a, __b) __arm_vcmpeqq_f16(__a, __b)
1085 #define vsubq_f16(__a, __b) __arm_vsubq_f16(__a, __b)
1086 #define vqmovntq_s16(__a, __b) __arm_vqmovntq_s16(__a, __b)
1087 #define vqmovnbq_s16(__a, __b) __arm_vqmovnbq_s16(__a, __b)
1088 #define vqdmulltq_s16(__a, __b) __arm_vqdmulltq_s16(__a, __b)
1089 #define vqdmulltq_n_s16(__a, __b) __arm_vqdmulltq_n_s16(__a, __b)
1090 #define vqdmullbq_s16(__a, __b) __arm_vqdmullbq_s16(__a, __b)
1091 #define vqdmullbq_n_s16(__a, __b) __arm_vqdmullbq_n_s16(__a, __b)
1092 #define vorrq_f16(__a, __b) __arm_vorrq_f16(__a, __b)
1093 #define vornq_f16(__a, __b) __arm_vornq_f16(__a, __b)
1094 #define vmulq_n_f16(__a, __b) __arm_vmulq_n_f16(__a, __b)
1095 #define vmulq_f16(__a, __b) __arm_vmulq_f16(__a, __b)
1096 #define vmovntq_s16(__a, __b) __arm_vmovntq_s16(__a, __b)
1097 #define vmovnbq_s16(__a, __b) __arm_vmovnbq_s16(__a, __b)
1098 #define vmlsldavxq_s16(__a, __b) __arm_vmlsldavxq_s16(__a, __b)
1099 #define vmlsldavq_s16(__a, __b) __arm_vmlsldavq_s16(__a, __b)
1100 #define vmlaldavxq_s16(__a, __b) __arm_vmlaldavxq_s16(__a, __b)
1101 #define vmlaldavq_s16(__a, __b) __arm_vmlaldavq_s16(__a, __b)
1102 #define vminnmvq_f16(__a, __b) __arm_vminnmvq_f16(__a, __b)
1103 #define vminnmq_f16(__a, __b) __arm_vminnmq_f16(__a, __b)
1104 #define vminnmavq_f16(__a, __b) __arm_vminnmavq_f16(__a, __b)
1105 #define vminnmaq_f16(__a, __b) __arm_vminnmaq_f16(__a, __b)
1106 #define vmaxnmvq_f16(__a, __b) __arm_vmaxnmvq_f16(__a, __b)
1107 #define vmaxnmq_f16(__a, __b) __arm_vmaxnmq_f16(__a, __b)
1108 #define vmaxnmavq_f16(__a, __b) __arm_vmaxnmavq_f16(__a, __b)
1109 #define vmaxnmaq_f16(__a, __b) __arm_vmaxnmaq_f16(__a, __b)
1110 #define veorq_f16(__a, __b) __arm_veorq_f16(__a, __b)
1111 #define vcmulq_rot90_f16(__a, __b) __arm_vcmulq_rot90_f16(__a, __b)
1112 #define vcmulq_rot270_f16(__a, __b) __arm_vcmulq_rot270_f16(__a, __b)
1113 #define vcmulq_rot180_f16(__a, __b) __arm_vcmulq_rot180_f16(__a, __b)
1114 #define vcmulq_f16(__a, __b) __arm_vcmulq_f16(__a, __b)
1115 #define vcaddq_rot90_f16(__a, __b) __arm_vcaddq_rot90_f16(__a, __b)
1116 #define vcaddq_rot270_f16(__a, __b) __arm_vcaddq_rot270_f16(__a, __b)
1117 #define vbicq_f16(__a, __b) __arm_vbicq_f16(__a, __b)
1118 #define vandq_f16(__a, __b) __arm_vandq_f16(__a, __b)
1119 #define vaddq_n_f16(__a, __b) __arm_vaddq_n_f16(__a, __b)
1120 #define vabdq_f16(__a, __b) __arm_vabdq_f16(__a, __b)
1121 #define vshlltq_n_s8(__a,  __imm) __arm_vshlltq_n_s8(__a,  __imm)
1122 #define vshllbq_n_s8(__a,  __imm) __arm_vshllbq_n_s8(__a,  __imm)
1123 #define vorrq_n_s16(__a,  __imm) __arm_vorrq_n_s16(__a,  __imm)
1124 #define vbicq_n_s16(__a,  __imm) __arm_vbicq_n_s16(__a,  __imm)
1125 #define vqmovntq_u32(__a, __b) __arm_vqmovntq_u32(__a, __b)
1126 #define vqmovnbq_u32(__a, __b) __arm_vqmovnbq_u32(__a, __b)
1127 #define vmulltq_poly_p16(__a, __b) __arm_vmulltq_poly_p16(__a, __b)
1128 #define vmullbq_poly_p16(__a, __b) __arm_vmullbq_poly_p16(__a, __b)
1129 #define vmovntq_u32(__a, __b) __arm_vmovntq_u32(__a, __b)
1130 #define vmovnbq_u32(__a, __b) __arm_vmovnbq_u32(__a, __b)
1131 #define vmlaldavq_u32(__a, __b) __arm_vmlaldavq_u32(__a, __b)
1132 #define vqmovuntq_s32(__a, __b) __arm_vqmovuntq_s32(__a, __b)
1133 #define vqmovunbq_s32(__a, __b) __arm_vqmovunbq_s32(__a, __b)
1134 #define vshlltq_n_u16(__a,  __imm) __arm_vshlltq_n_u16(__a,  __imm)
1135 #define vshllbq_n_u16(__a,  __imm) __arm_vshllbq_n_u16(__a,  __imm)
1136 #define vorrq_n_u32(__a,  __imm) __arm_vorrq_n_u32(__a,  __imm)
1137 #define vbicq_n_u32(__a,  __imm) __arm_vbicq_n_u32(__a,  __imm)
1138 #define vcmpneq_n_f32(__a, __b) __arm_vcmpneq_n_f32(__a, __b)
1139 #define vcmpneq_f32(__a, __b) __arm_vcmpneq_f32(__a, __b)
1140 #define vcmpltq_n_f32(__a, __b) __arm_vcmpltq_n_f32(__a, __b)
1141 #define vcmpltq_f32(__a, __b) __arm_vcmpltq_f32(__a, __b)
1142 #define vcmpleq_n_f32(__a, __b) __arm_vcmpleq_n_f32(__a, __b)
1143 #define vcmpleq_f32(__a, __b) __arm_vcmpleq_f32(__a, __b)
1144 #define vcmpgtq_n_f32(__a, __b) __arm_vcmpgtq_n_f32(__a, __b)
1145 #define vcmpgtq_f32(__a, __b) __arm_vcmpgtq_f32(__a, __b)
1146 #define vcmpgeq_n_f32(__a, __b) __arm_vcmpgeq_n_f32(__a, __b)
1147 #define vcmpgeq_f32(__a, __b) __arm_vcmpgeq_f32(__a, __b)
1148 #define vcmpeqq_n_f32(__a, __b) __arm_vcmpeqq_n_f32(__a, __b)
1149 #define vcmpeqq_f32(__a, __b) __arm_vcmpeqq_f32(__a, __b)
1150 #define vsubq_f32(__a, __b) __arm_vsubq_f32(__a, __b)
1151 #define vqmovntq_s32(__a, __b) __arm_vqmovntq_s32(__a, __b)
1152 #define vqmovnbq_s32(__a, __b) __arm_vqmovnbq_s32(__a, __b)
1153 #define vqdmulltq_s32(__a, __b) __arm_vqdmulltq_s32(__a, __b)
1154 #define vqdmulltq_n_s32(__a, __b) __arm_vqdmulltq_n_s32(__a, __b)
1155 #define vqdmullbq_s32(__a, __b) __arm_vqdmullbq_s32(__a, __b)
1156 #define vqdmullbq_n_s32(__a, __b) __arm_vqdmullbq_n_s32(__a, __b)
1157 #define vorrq_f32(__a, __b) __arm_vorrq_f32(__a, __b)
1158 #define vornq_f32(__a, __b) __arm_vornq_f32(__a, __b)
1159 #define vmulq_n_f32(__a, __b) __arm_vmulq_n_f32(__a, __b)
1160 #define vmulq_f32(__a, __b) __arm_vmulq_f32(__a, __b)
1161 #define vmovntq_s32(__a, __b) __arm_vmovntq_s32(__a, __b)
1162 #define vmovnbq_s32(__a, __b) __arm_vmovnbq_s32(__a, __b)
1163 #define vmlsldavxq_s32(__a, __b) __arm_vmlsldavxq_s32(__a, __b)
1164 #define vmlsldavq_s32(__a, __b) __arm_vmlsldavq_s32(__a, __b)
1165 #define vmlaldavxq_s32(__a, __b) __arm_vmlaldavxq_s32(__a, __b)
1166 #define vmlaldavq_s32(__a, __b) __arm_vmlaldavq_s32(__a, __b)
1167 #define vminnmvq_f32(__a, __b) __arm_vminnmvq_f32(__a, __b)
1168 #define vminnmq_f32(__a, __b) __arm_vminnmq_f32(__a, __b)
1169 #define vminnmavq_f32(__a, __b) __arm_vminnmavq_f32(__a, __b)
1170 #define vminnmaq_f32(__a, __b) __arm_vminnmaq_f32(__a, __b)
1171 #define vmaxnmvq_f32(__a, __b) __arm_vmaxnmvq_f32(__a, __b)
1172 #define vmaxnmq_f32(__a, __b) __arm_vmaxnmq_f32(__a, __b)
1173 #define vmaxnmavq_f32(__a, __b) __arm_vmaxnmavq_f32(__a, __b)
1174 #define vmaxnmaq_f32(__a, __b) __arm_vmaxnmaq_f32(__a, __b)
1175 #define veorq_f32(__a, __b) __arm_veorq_f32(__a, __b)
1176 #define vcmulq_rot90_f32(__a, __b) __arm_vcmulq_rot90_f32(__a, __b)
1177 #define vcmulq_rot270_f32(__a, __b) __arm_vcmulq_rot270_f32(__a, __b)
1178 #define vcmulq_rot180_f32(__a, __b) __arm_vcmulq_rot180_f32(__a, __b)
1179 #define vcmulq_f32(__a, __b) __arm_vcmulq_f32(__a, __b)
1180 #define vcaddq_rot90_f32(__a, __b) __arm_vcaddq_rot90_f32(__a, __b)
1181 #define vcaddq_rot270_f32(__a, __b) __arm_vcaddq_rot270_f32(__a, __b)
1182 #define vbicq_f32(__a, __b) __arm_vbicq_f32(__a, __b)
1183 #define vandq_f32(__a, __b) __arm_vandq_f32(__a, __b)
1184 #define vaddq_n_f32(__a, __b) __arm_vaddq_n_f32(__a, __b)
1185 #define vabdq_f32(__a, __b) __arm_vabdq_f32(__a, __b)
1186 #define vshlltq_n_s16(__a,  __imm) __arm_vshlltq_n_s16(__a,  __imm)
1187 #define vshllbq_n_s16(__a,  __imm) __arm_vshllbq_n_s16(__a,  __imm)
1188 #define vorrq_n_s32(__a,  __imm) __arm_vorrq_n_s32(__a,  __imm)
1189 #define vbicq_n_s32(__a,  __imm) __arm_vbicq_n_s32(__a,  __imm)
1190 #define vrmlaldavhq_u32(__a, __b) __arm_vrmlaldavhq_u32(__a, __b)
1191 #define vctp8q_m(__a, __p) __arm_vctp8q_m(__a, __p)
1192 #define vctp64q_m(__a, __p) __arm_vctp64q_m(__a, __p)
1193 #define vctp32q_m(__a, __p) __arm_vctp32q_m(__a, __p)
1194 #define vctp16q_m(__a, __p) __arm_vctp16q_m(__a, __p)
1195 #define vaddlvaq_u32(__a, __b) __arm_vaddlvaq_u32(__a, __b)
1196 #define vrmlsldavhxq_s32(__a, __b) __arm_vrmlsldavhxq_s32(__a, __b)
1197 #define vrmlsldavhq_s32(__a, __b) __arm_vrmlsldavhq_s32(__a, __b)
1198 #define vrmlaldavhxq_s32(__a, __b) __arm_vrmlaldavhxq_s32(__a, __b)
1199 #define vrmlaldavhq_s32(__a, __b) __arm_vrmlaldavhq_s32(__a, __b)
1200 #define vcvttq_f16_f32(__a, __b) __arm_vcvttq_f16_f32(__a, __b)
1201 #define vcvtbq_f16_f32(__a, __b) __arm_vcvtbq_f16_f32(__a, __b)
1202 #define vaddlvaq_s32(__a, __b) __arm_vaddlvaq_s32(__a, __b)
1203 #define vabavq_s8(__a, __b, __c) __arm_vabavq_s8(__a, __b, __c)
1204 #define vabavq_s16(__a, __b, __c) __arm_vabavq_s16(__a, __b, __c)
1205 #define vabavq_s32(__a, __b, __c) __arm_vabavq_s32(__a, __b, __c)
1206 #define vbicq_m_n_s16(__a,  __imm, __p) __arm_vbicq_m_n_s16(__a,  __imm, __p)
1207 #define vbicq_m_n_s32(__a,  __imm, __p) __arm_vbicq_m_n_s32(__a,  __imm, __p)
1208 #define vbicq_m_n_u16(__a,  __imm, __p) __arm_vbicq_m_n_u16(__a,  __imm, __p)
1209 #define vbicq_m_n_u32(__a,  __imm, __p) __arm_vbicq_m_n_u32(__a,  __imm, __p)
1210 #define vcmpeqq_m_f16(__a, __b, __p) __arm_vcmpeqq_m_f16(__a, __b, __p)
1211 #define vcmpeqq_m_f32(__a, __b, __p) __arm_vcmpeqq_m_f32(__a, __b, __p)
1212 #define vcvtaq_m_s16_f16(__inactive, __a, __p) __arm_vcvtaq_m_s16_f16(__inactive, __a, __p)
1213 #define vcvtaq_m_u16_f16(__inactive, __a, __p) __arm_vcvtaq_m_u16_f16(__inactive, __a, __p)
1214 #define vcvtaq_m_s32_f32(__inactive, __a, __p) __arm_vcvtaq_m_s32_f32(__inactive, __a, __p)
1215 #define vcvtaq_m_u32_f32(__inactive, __a, __p) __arm_vcvtaq_m_u32_f32(__inactive, __a, __p)
1216 #define vcvtq_m_f16_s16(__inactive, __a, __p) __arm_vcvtq_m_f16_s16(__inactive, __a, __p)
1217 #define vcvtq_m_f16_u16(__inactive, __a, __p) __arm_vcvtq_m_f16_u16(__inactive, __a, __p)
1218 #define vcvtq_m_f32_s32(__inactive, __a, __p) __arm_vcvtq_m_f32_s32(__inactive, __a, __p)
1219 #define vcvtq_m_f32_u32(__inactive, __a, __p) __arm_vcvtq_m_f32_u32(__inactive, __a, __p)
1220 #define vqrshrnbq_n_s16(__a, __b,  __imm) __arm_vqrshrnbq_n_s16(__a, __b,  __imm)
1221 #define vqrshrnbq_n_u16(__a, __b,  __imm) __arm_vqrshrnbq_n_u16(__a, __b,  __imm)
1222 #define vqrshrnbq_n_s32(__a, __b,  __imm) __arm_vqrshrnbq_n_s32(__a, __b,  __imm)
1223 #define vqrshrnbq_n_u32(__a, __b,  __imm) __arm_vqrshrnbq_n_u32(__a, __b,  __imm)
1224 #define vqrshrunbq_n_s16(__a, __b,  __imm) __arm_vqrshrunbq_n_s16(__a, __b,  __imm)
1225 #define vqrshrunbq_n_s32(__a, __b,  __imm) __arm_vqrshrunbq_n_s32(__a, __b,  __imm)
1226 #define vrmlaldavhaq_s32(__a, __b, __c) __arm_vrmlaldavhaq_s32(__a, __b, __c)
1227 #define vrmlaldavhaq_u32(__a, __b, __c) __arm_vrmlaldavhaq_u32(__a, __b, __c)
1228 #define vshlcq_s8(__a,  __b,  __imm) __arm_vshlcq_s8(__a,  __b,  __imm)
1229 #define vshlcq_u8(__a,  __b,  __imm) __arm_vshlcq_u8(__a,  __b,  __imm)
1230 #define vshlcq_s16(__a,  __b,  __imm) __arm_vshlcq_s16(__a,  __b,  __imm)
1231 #define vshlcq_u16(__a,  __b,  __imm) __arm_vshlcq_u16(__a,  __b,  __imm)
1232 #define vshlcq_s32(__a,  __b,  __imm) __arm_vshlcq_s32(__a,  __b,  __imm)
1233 #define vshlcq_u32(__a,  __b,  __imm) __arm_vshlcq_u32(__a,  __b,  __imm)
1234 #define vabavq_u8(__a, __b, __c) __arm_vabavq_u8(__a, __b, __c)
1235 #define vabavq_u16(__a, __b, __c) __arm_vabavq_u16(__a, __b, __c)
1236 #define vabavq_u32(__a, __b, __c) __arm_vabavq_u32(__a, __b, __c)
1237 #define vpselq_u8(__a, __b, __p) __arm_vpselq_u8(__a, __b, __p)
1238 #define vpselq_s8(__a, __b, __p) __arm_vpselq_s8(__a, __b, __p)
1239 #define vrev64q_m_u8(__inactive, __a, __p) __arm_vrev64q_m_u8(__inactive, __a, __p)
1240 #define vmvnq_m_u8(__inactive, __a, __p) __arm_vmvnq_m_u8(__inactive, __a, __p)
1241 #define vmlasq_n_u8(__a, __b, __c) __arm_vmlasq_n_u8(__a, __b, __c)
1242 #define vmlaq_n_u8(__a, __b, __c) __arm_vmlaq_n_u8(__a, __b, __c)
1243 #define vmladavq_p_u8(__a, __b, __p) __arm_vmladavq_p_u8(__a, __b, __p)
1244 #define vmladavaq_u8(__a, __b, __c) __arm_vmladavaq_u8(__a, __b, __c)
1245 #define vminvq_p_u8(__a, __b, __p) __arm_vminvq_p_u8(__a, __b, __p)
1246 #define vmaxvq_p_u8(__a, __b, __p) __arm_vmaxvq_p_u8(__a, __b, __p)
1247 #define vdupq_m_n_u8(__inactive, __a, __p) __arm_vdupq_m_n_u8(__inactive, __a, __p)
1248 #define vcmpneq_m_u8(__a, __b, __p) __arm_vcmpneq_m_u8(__a, __b, __p)
1249 #define vcmpneq_m_n_u8(__a, __b, __p) __arm_vcmpneq_m_n_u8(__a, __b, __p)
1250 #define vcmphiq_m_u8(__a, __b, __p) __arm_vcmphiq_m_u8(__a, __b, __p)
1251 #define vcmphiq_m_n_u8(__a, __b, __p) __arm_vcmphiq_m_n_u8(__a, __b, __p)
1252 #define vcmpeqq_m_u8(__a, __b, __p) __arm_vcmpeqq_m_u8(__a, __b, __p)
1253 #define vcmpeqq_m_n_u8(__a, __b, __p) __arm_vcmpeqq_m_n_u8(__a, __b, __p)
1254 #define vcmpcsq_m_u8(__a, __b, __p) __arm_vcmpcsq_m_u8(__a, __b, __p)
1255 #define vcmpcsq_m_n_u8(__a, __b, __p) __arm_vcmpcsq_m_n_u8(__a, __b, __p)
1256 #define vclzq_m_u8(__inactive, __a, __p) __arm_vclzq_m_u8(__inactive, __a, __p)
1257 #define vaddvaq_p_u8(__a, __b, __p) __arm_vaddvaq_p_u8(__a, __b, __p)
1258 #define vsriq_n_u8(__a, __b,  __imm) __arm_vsriq_n_u8(__a, __b,  __imm)
1259 #define vsliq_n_u8(__a, __b,  __imm) __arm_vsliq_n_u8(__a, __b,  __imm)
1260 #define vshlq_m_r_u8(__a, __b, __p) __arm_vshlq_m_r_u8(__a, __b, __p)
1261 #define vrshlq_m_n_u8(__a, __b, __p) __arm_vrshlq_m_n_u8(__a, __b, __p)
1262 #define vqshlq_m_r_u8(__a, __b, __p) __arm_vqshlq_m_r_u8(__a, __b, __p)
1263 #define vqrshlq_m_n_u8(__a, __b, __p) __arm_vqrshlq_m_n_u8(__a, __b, __p)
1264 #define vminavq_p_s8(__a, __b, __p) __arm_vminavq_p_s8(__a, __b, __p)
1265 #define vminaq_m_s8(__a, __b, __p) __arm_vminaq_m_s8(__a, __b, __p)
1266 #define vmaxavq_p_s8(__a, __b, __p) __arm_vmaxavq_p_s8(__a, __b, __p)
1267 #define vmaxaq_m_s8(__a, __b, __p) __arm_vmaxaq_m_s8(__a, __b, __p)
1268 #define vcmpneq_m_s8(__a, __b, __p) __arm_vcmpneq_m_s8(__a, __b, __p)
1269 #define vcmpneq_m_n_s8(__a, __b, __p) __arm_vcmpneq_m_n_s8(__a, __b, __p)
1270 #define vcmpltq_m_s8(__a, __b, __p) __arm_vcmpltq_m_s8(__a, __b, __p)
1271 #define vcmpltq_m_n_s8(__a, __b, __p) __arm_vcmpltq_m_n_s8(__a, __b, __p)
1272 #define vcmpleq_m_s8(__a, __b, __p) __arm_vcmpleq_m_s8(__a, __b, __p)
1273 #define vcmpleq_m_n_s8(__a, __b, __p) __arm_vcmpleq_m_n_s8(__a, __b, __p)
1274 #define vcmpgtq_m_s8(__a, __b, __p) __arm_vcmpgtq_m_s8(__a, __b, __p)
1275 #define vcmpgtq_m_n_s8(__a, __b, __p) __arm_vcmpgtq_m_n_s8(__a, __b, __p)
1276 #define vcmpgeq_m_s8(__a, __b, __p) __arm_vcmpgeq_m_s8(__a, __b, __p)
1277 #define vcmpgeq_m_n_s8(__a, __b, __p) __arm_vcmpgeq_m_n_s8(__a, __b, __p)
1278 #define vcmpeqq_m_s8(__a, __b, __p) __arm_vcmpeqq_m_s8(__a, __b, __p)
1279 #define vcmpeqq_m_n_s8(__a, __b, __p) __arm_vcmpeqq_m_n_s8(__a, __b, __p)
1280 #define vshlq_m_r_s8(__a, __b, __p) __arm_vshlq_m_r_s8(__a, __b, __p)
1281 #define vrshlq_m_n_s8(__a, __b, __p) __arm_vrshlq_m_n_s8(__a, __b, __p)
1282 #define vrev64q_m_s8(__inactive, __a, __p) __arm_vrev64q_m_s8(__inactive, __a, __p)
1283 #define vqshlq_m_r_s8(__a, __b, __p) __arm_vqshlq_m_r_s8(__a, __b, __p)
1284 #define vqrshlq_m_n_s8(__a, __b, __p) __arm_vqrshlq_m_n_s8(__a, __b, __p)
1285 #define vqnegq_m_s8(__inactive, __a, __p) __arm_vqnegq_m_s8(__inactive, __a, __p)
1286 #define vqabsq_m_s8(__inactive, __a, __p) __arm_vqabsq_m_s8(__inactive, __a, __p)
1287 #define vnegq_m_s8(__inactive, __a, __p) __arm_vnegq_m_s8(__inactive, __a, __p)
1288 #define vmvnq_m_s8(__inactive, __a, __p) __arm_vmvnq_m_s8(__inactive, __a, __p)
1289 #define vmlsdavxq_p_s8(__a, __b, __p) __arm_vmlsdavxq_p_s8(__a, __b, __p)
1290 #define vmlsdavq_p_s8(__a, __b, __p) __arm_vmlsdavq_p_s8(__a, __b, __p)
1291 #define vmladavxq_p_s8(__a, __b, __p) __arm_vmladavxq_p_s8(__a, __b, __p)
1292 #define vmladavq_p_s8(__a, __b, __p) __arm_vmladavq_p_s8(__a, __b, __p)
1293 #define vminvq_p_s8(__a, __b, __p) __arm_vminvq_p_s8(__a, __b, __p)
1294 #define vmaxvq_p_s8(__a, __b, __p) __arm_vmaxvq_p_s8(__a, __b, __p)
1295 #define vdupq_m_n_s8(__inactive, __a, __p) __arm_vdupq_m_n_s8(__inactive, __a, __p)
1296 #define vclzq_m_s8(__inactive, __a, __p) __arm_vclzq_m_s8(__inactive, __a, __p)
1297 #define vclsq_m_s8(__inactive, __a, __p) __arm_vclsq_m_s8(__inactive, __a, __p)
1298 #define vaddvaq_p_s8(__a, __b, __p) __arm_vaddvaq_p_s8(__a, __b, __p)
1299 #define vabsq_m_s8(__inactive, __a, __p) __arm_vabsq_m_s8(__inactive, __a, __p)
1300 #define vqrdmlsdhxq_s8(__inactive, __a, __b) __arm_vqrdmlsdhxq_s8(__inactive, __a, __b)
1301 #define vqrdmlsdhq_s8(__inactive, __a, __b) __arm_vqrdmlsdhq_s8(__inactive, __a, __b)
1302 #define vqrdmlashq_n_s8(__a, __b, __c) __arm_vqrdmlashq_n_s8(__a, __b, __c)
1303 #define vqrdmlahq_n_s8(__a, __b, __c) __arm_vqrdmlahq_n_s8(__a, __b, __c)
1304 #define vqrdmladhxq_s8(__inactive, __a, __b) __arm_vqrdmladhxq_s8(__inactive, __a, __b)
1305 #define vqrdmladhq_s8(__inactive, __a, __b) __arm_vqrdmladhq_s8(__inactive, __a, __b)
1306 #define vqdmlsdhxq_s8(__inactive, __a, __b) __arm_vqdmlsdhxq_s8(__inactive, __a, __b)
1307 #define vqdmlsdhq_s8(__inactive, __a, __b) __arm_vqdmlsdhq_s8(__inactive, __a, __b)
1308 #define vqdmlahq_n_s8(__a, __b, __c) __arm_vqdmlahq_n_s8(__a, __b, __c)
1309 #define vqdmlashq_n_s8(__a, __b, __c) __arm_vqdmlashq_n_s8(__a, __b, __c)
1310 #define vqdmladhxq_s8(__inactive, __a, __b) __arm_vqdmladhxq_s8(__inactive, __a, __b)
1311 #define vqdmladhq_s8(__inactive, __a, __b) __arm_vqdmladhq_s8(__inactive, __a, __b)
1312 #define vmlsdavaxq_s8(__a, __b, __c) __arm_vmlsdavaxq_s8(__a, __b, __c)
1313 #define vmlsdavaq_s8(__a, __b, __c) __arm_vmlsdavaq_s8(__a, __b, __c)
1314 #define vmlasq_n_s8(__a, __b, __c) __arm_vmlasq_n_s8(__a, __b, __c)
1315 #define vmlaq_n_s8(__a, __b, __c) __arm_vmlaq_n_s8(__a, __b, __c)
1316 #define vmladavaxq_s8(__a, __b, __c) __arm_vmladavaxq_s8(__a, __b, __c)
1317 #define vmladavaq_s8(__a, __b, __c) __arm_vmladavaq_s8(__a, __b, __c)
1318 #define vsriq_n_s8(__a, __b,  __imm) __arm_vsriq_n_s8(__a, __b,  __imm)
1319 #define vsliq_n_s8(__a, __b,  __imm) __arm_vsliq_n_s8(__a, __b,  __imm)
1320 #define vpselq_u16(__a, __b, __p) __arm_vpselq_u16(__a, __b, __p)
1321 #define vpselq_s16(__a, __b, __p) __arm_vpselq_s16(__a, __b, __p)
1322 #define vrev64q_m_u16(__inactive, __a, __p) __arm_vrev64q_m_u16(__inactive, __a, __p)
1323 #define vmvnq_m_u16(__inactive, __a, __p) __arm_vmvnq_m_u16(__inactive, __a, __p)
1324 #define vmlasq_n_u16(__a, __b, __c) __arm_vmlasq_n_u16(__a, __b, __c)
1325 #define vmlaq_n_u16(__a, __b, __c) __arm_vmlaq_n_u16(__a, __b, __c)
1326 #define vmladavq_p_u16(__a, __b, __p) __arm_vmladavq_p_u16(__a, __b, __p)
1327 #define vmladavaq_u16(__a, __b, __c) __arm_vmladavaq_u16(__a, __b, __c)
1328 #define vminvq_p_u16(__a, __b, __p) __arm_vminvq_p_u16(__a, __b, __p)
1329 #define vmaxvq_p_u16(__a, __b, __p) __arm_vmaxvq_p_u16(__a, __b, __p)
1330 #define vdupq_m_n_u16(__inactive, __a, __p) __arm_vdupq_m_n_u16(__inactive, __a, __p)
1331 #define vcmpneq_m_u16(__a, __b, __p) __arm_vcmpneq_m_u16(__a, __b, __p)
1332 #define vcmpneq_m_n_u16(__a, __b, __p) __arm_vcmpneq_m_n_u16(__a, __b, __p)
1333 #define vcmphiq_m_u16(__a, __b, __p) __arm_vcmphiq_m_u16(__a, __b, __p)
1334 #define vcmphiq_m_n_u16(__a, __b, __p) __arm_vcmphiq_m_n_u16(__a, __b, __p)
1335 #define vcmpeqq_m_u16(__a, __b, __p) __arm_vcmpeqq_m_u16(__a, __b, __p)
1336 #define vcmpeqq_m_n_u16(__a, __b, __p) __arm_vcmpeqq_m_n_u16(__a, __b, __p)
1337 #define vcmpcsq_m_u16(__a, __b, __p) __arm_vcmpcsq_m_u16(__a, __b, __p)
1338 #define vcmpcsq_m_n_u16(__a, __b, __p) __arm_vcmpcsq_m_n_u16(__a, __b, __p)
1339 #define vclzq_m_u16(__inactive, __a, __p) __arm_vclzq_m_u16(__inactive, __a, __p)
1340 #define vaddvaq_p_u16(__a, __b, __p) __arm_vaddvaq_p_u16(__a, __b, __p)
1341 #define vsriq_n_u16(__a, __b,  __imm) __arm_vsriq_n_u16(__a, __b,  __imm)
1342 #define vsliq_n_u16(__a, __b,  __imm) __arm_vsliq_n_u16(__a, __b,  __imm)
1343 #define vshlq_m_r_u16(__a, __b, __p) __arm_vshlq_m_r_u16(__a, __b, __p)
1344 #define vrshlq_m_n_u16(__a, __b, __p) __arm_vrshlq_m_n_u16(__a, __b, __p)
1345 #define vqshlq_m_r_u16(__a, __b, __p) __arm_vqshlq_m_r_u16(__a, __b, __p)
1346 #define vqrshlq_m_n_u16(__a, __b, __p) __arm_vqrshlq_m_n_u16(__a, __b, __p)
1347 #define vminavq_p_s16(__a, __b, __p) __arm_vminavq_p_s16(__a, __b, __p)
1348 #define vminaq_m_s16(__a, __b, __p) __arm_vminaq_m_s16(__a, __b, __p)
1349 #define vmaxavq_p_s16(__a, __b, __p) __arm_vmaxavq_p_s16(__a, __b, __p)
1350 #define vmaxaq_m_s16(__a, __b, __p) __arm_vmaxaq_m_s16(__a, __b, __p)
1351 #define vcmpneq_m_s16(__a, __b, __p) __arm_vcmpneq_m_s16(__a, __b, __p)
1352 #define vcmpneq_m_n_s16(__a, __b, __p) __arm_vcmpneq_m_n_s16(__a, __b, __p)
1353 #define vcmpltq_m_s16(__a, __b, __p) __arm_vcmpltq_m_s16(__a, __b, __p)
1354 #define vcmpltq_m_n_s16(__a, __b, __p) __arm_vcmpltq_m_n_s16(__a, __b, __p)
1355 #define vcmpleq_m_s16(__a, __b, __p) __arm_vcmpleq_m_s16(__a, __b, __p)
1356 #define vcmpleq_m_n_s16(__a, __b, __p) __arm_vcmpleq_m_n_s16(__a, __b, __p)
1357 #define vcmpgtq_m_s16(__a, __b, __p) __arm_vcmpgtq_m_s16(__a, __b, __p)
1358 #define vcmpgtq_m_n_s16(__a, __b, __p) __arm_vcmpgtq_m_n_s16(__a, __b, __p)
1359 #define vcmpgeq_m_s16(__a, __b, __p) __arm_vcmpgeq_m_s16(__a, __b, __p)
1360 #define vcmpgeq_m_n_s16(__a, __b, __p) __arm_vcmpgeq_m_n_s16(__a, __b, __p)
1361 #define vcmpeqq_m_s16(__a, __b, __p) __arm_vcmpeqq_m_s16(__a, __b, __p)
1362 #define vcmpeqq_m_n_s16(__a, __b, __p) __arm_vcmpeqq_m_n_s16(__a, __b, __p)
1363 #define vshlq_m_r_s16(__a, __b, __p) __arm_vshlq_m_r_s16(__a, __b, __p)
1364 #define vrshlq_m_n_s16(__a, __b, __p) __arm_vrshlq_m_n_s16(__a, __b, __p)
1365 #define vrev64q_m_s16(__inactive, __a, __p) __arm_vrev64q_m_s16(__inactive, __a, __p)
1366 #define vqshlq_m_r_s16(__a, __b, __p) __arm_vqshlq_m_r_s16(__a, __b, __p)
1367 #define vqrshlq_m_n_s16(__a, __b, __p) __arm_vqrshlq_m_n_s16(__a, __b, __p)
1368 #define vqnegq_m_s16(__inactive, __a, __p) __arm_vqnegq_m_s16(__inactive, __a, __p)
1369 #define vqabsq_m_s16(__inactive, __a, __p) __arm_vqabsq_m_s16(__inactive, __a, __p)
1370 #define vnegq_m_s16(__inactive, __a, __p) __arm_vnegq_m_s16(__inactive, __a, __p)
1371 #define vmvnq_m_s16(__inactive, __a, __p) __arm_vmvnq_m_s16(__inactive, __a, __p)
1372 #define vmlsdavxq_p_s16(__a, __b, __p) __arm_vmlsdavxq_p_s16(__a, __b, __p)
1373 #define vmlsdavq_p_s16(__a, __b, __p) __arm_vmlsdavq_p_s16(__a, __b, __p)
1374 #define vmladavxq_p_s16(__a, __b, __p) __arm_vmladavxq_p_s16(__a, __b, __p)
1375 #define vmladavq_p_s16(__a, __b, __p) __arm_vmladavq_p_s16(__a, __b, __p)
1376 #define vminvq_p_s16(__a, __b, __p) __arm_vminvq_p_s16(__a, __b, __p)
1377 #define vmaxvq_p_s16(__a, __b, __p) __arm_vmaxvq_p_s16(__a, __b, __p)
1378 #define vdupq_m_n_s16(__inactive, __a, __p) __arm_vdupq_m_n_s16(__inactive, __a, __p)
1379 #define vclzq_m_s16(__inactive, __a, __p) __arm_vclzq_m_s16(__inactive, __a, __p)
1380 #define vclsq_m_s16(__inactive, __a, __p) __arm_vclsq_m_s16(__inactive, __a, __p)
1381 #define vaddvaq_p_s16(__a, __b, __p) __arm_vaddvaq_p_s16(__a, __b, __p)
1382 #define vabsq_m_s16(__inactive, __a, __p) __arm_vabsq_m_s16(__inactive, __a, __p)
1383 #define vqrdmlsdhxq_s16(__inactive, __a, __b) __arm_vqrdmlsdhxq_s16(__inactive, __a, __b)
1384 #define vqrdmlsdhq_s16(__inactive, __a, __b) __arm_vqrdmlsdhq_s16(__inactive, __a, __b)
1385 #define vqrdmlashq_n_s16(__a, __b, __c) __arm_vqrdmlashq_n_s16(__a, __b, __c)
1386 #define vqrdmlahq_n_s16(__a, __b, __c) __arm_vqrdmlahq_n_s16(__a, __b, __c)
1387 #define vqrdmladhxq_s16(__inactive, __a, __b) __arm_vqrdmladhxq_s16(__inactive, __a, __b)
1388 #define vqrdmladhq_s16(__inactive, __a, __b) __arm_vqrdmladhq_s16(__inactive, __a, __b)
1389 #define vqdmlsdhxq_s16(__inactive, __a, __b) __arm_vqdmlsdhxq_s16(__inactive, __a, __b)
1390 #define vqdmlsdhq_s16(__inactive, __a, __b) __arm_vqdmlsdhq_s16(__inactive, __a, __b)
1391 #define vqdmlashq_n_s16(__a, __b, __c) __arm_vqdmlashq_n_s16(__a, __b, __c)
1392 #define vqdmlahq_n_s16(__a, __b, __c) __arm_vqdmlahq_n_s16(__a, __b, __c)
1393 #define vqdmladhxq_s16(__inactive, __a, __b) __arm_vqdmladhxq_s16(__inactive, __a, __b)
1394 #define vqdmladhq_s16(__inactive, __a, __b) __arm_vqdmladhq_s16(__inactive, __a, __b)
1395 #define vmlsdavaxq_s16(__a, __b, __c) __arm_vmlsdavaxq_s16(__a, __b, __c)
1396 #define vmlsdavaq_s16(__a, __b, __c) __arm_vmlsdavaq_s16(__a, __b, __c)
1397 #define vmlasq_n_s16(__a, __b, __c) __arm_vmlasq_n_s16(__a, __b, __c)
1398 #define vmlaq_n_s16(__a, __b, __c) __arm_vmlaq_n_s16(__a, __b, __c)
1399 #define vmladavaxq_s16(__a, __b, __c) __arm_vmladavaxq_s16(__a, __b, __c)
1400 #define vmladavaq_s16(__a, __b, __c) __arm_vmladavaq_s16(__a, __b, __c)
1401 #define vsriq_n_s16(__a, __b,  __imm) __arm_vsriq_n_s16(__a, __b,  __imm)
1402 #define vsliq_n_s16(__a, __b,  __imm) __arm_vsliq_n_s16(__a, __b,  __imm)
1403 #define vpselq_u32(__a, __b, __p) __arm_vpselq_u32(__a, __b, __p)
1404 #define vpselq_s32(__a, __b, __p) __arm_vpselq_s32(__a, __b, __p)
1405 #define vrev64q_m_u32(__inactive, __a, __p) __arm_vrev64q_m_u32(__inactive, __a, __p)
1406 #define vmvnq_m_u32(__inactive, __a, __p) __arm_vmvnq_m_u32(__inactive, __a, __p)
1407 #define vmlasq_n_u32(__a, __b, __c) __arm_vmlasq_n_u32(__a, __b, __c)
1408 #define vmlaq_n_u32(__a, __b, __c) __arm_vmlaq_n_u32(__a, __b, __c)
1409 #define vmladavq_p_u32(__a, __b, __p) __arm_vmladavq_p_u32(__a, __b, __p)
1410 #define vmladavaq_u32(__a, __b, __c) __arm_vmladavaq_u32(__a, __b, __c)
1411 #define vminvq_p_u32(__a, __b, __p) __arm_vminvq_p_u32(__a, __b, __p)
1412 #define vmaxvq_p_u32(__a, __b, __p) __arm_vmaxvq_p_u32(__a, __b, __p)
1413 #define vdupq_m_n_u32(__inactive, __a, __p) __arm_vdupq_m_n_u32(__inactive, __a, __p)
1414 #define vcmpneq_m_u32(__a, __b, __p) __arm_vcmpneq_m_u32(__a, __b, __p)
1415 #define vcmpneq_m_n_u32(__a, __b, __p) __arm_vcmpneq_m_n_u32(__a, __b, __p)
1416 #define vcmphiq_m_u32(__a, __b, __p) __arm_vcmphiq_m_u32(__a, __b, __p)
1417 #define vcmphiq_m_n_u32(__a, __b, __p) __arm_vcmphiq_m_n_u32(__a, __b, __p)
1418 #define vcmpeqq_m_u32(__a, __b, __p) __arm_vcmpeqq_m_u32(__a, __b, __p)
1419 #define vcmpeqq_m_n_u32(__a, __b, __p) __arm_vcmpeqq_m_n_u32(__a, __b, __p)
1420 #define vcmpcsq_m_u32(__a, __b, __p) __arm_vcmpcsq_m_u32(__a, __b, __p)
1421 #define vcmpcsq_m_n_u32(__a, __b, __p) __arm_vcmpcsq_m_n_u32(__a, __b, __p)
1422 #define vclzq_m_u32(__inactive, __a, __p) __arm_vclzq_m_u32(__inactive, __a, __p)
1423 #define vaddvaq_p_u32(__a, __b, __p) __arm_vaddvaq_p_u32(__a, __b, __p)
1424 #define vsriq_n_u32(__a, __b,  __imm) __arm_vsriq_n_u32(__a, __b,  __imm)
1425 #define vsliq_n_u32(__a, __b,  __imm) __arm_vsliq_n_u32(__a, __b,  __imm)
1426 #define vshlq_m_r_u32(__a, __b, __p) __arm_vshlq_m_r_u32(__a, __b, __p)
1427 #define vrshlq_m_n_u32(__a, __b, __p) __arm_vrshlq_m_n_u32(__a, __b, __p)
1428 #define vqshlq_m_r_u32(__a, __b, __p) __arm_vqshlq_m_r_u32(__a, __b, __p)
1429 #define vqrshlq_m_n_u32(__a, __b, __p) __arm_vqrshlq_m_n_u32(__a, __b, __p)
1430 #define vminavq_p_s32(__a, __b, __p) __arm_vminavq_p_s32(__a, __b, __p)
1431 #define vminaq_m_s32(__a, __b, __p) __arm_vminaq_m_s32(__a, __b, __p)
1432 #define vmaxavq_p_s32(__a, __b, __p) __arm_vmaxavq_p_s32(__a, __b, __p)
1433 #define vmaxaq_m_s32(__a, __b, __p) __arm_vmaxaq_m_s32(__a, __b, __p)
1434 #define vcmpneq_m_s32(__a, __b, __p) __arm_vcmpneq_m_s32(__a, __b, __p)
1435 #define vcmpneq_m_n_s32(__a, __b, __p) __arm_vcmpneq_m_n_s32(__a, __b, __p)
1436 #define vcmpltq_m_s32(__a, __b, __p) __arm_vcmpltq_m_s32(__a, __b, __p)
1437 #define vcmpltq_m_n_s32(__a, __b, __p) __arm_vcmpltq_m_n_s32(__a, __b, __p)
1438 #define vcmpleq_m_s32(__a, __b, __p) __arm_vcmpleq_m_s32(__a, __b, __p)
1439 #define vcmpleq_m_n_s32(__a, __b, __p) __arm_vcmpleq_m_n_s32(__a, __b, __p)
1440 #define vcmpgtq_m_s32(__a, __b, __p) __arm_vcmpgtq_m_s32(__a, __b, __p)
1441 #define vcmpgtq_m_n_s32(__a, __b, __p) __arm_vcmpgtq_m_n_s32(__a, __b, __p)
1442 #define vcmpgeq_m_s32(__a, __b, __p) __arm_vcmpgeq_m_s32(__a, __b, __p)
1443 #define vcmpgeq_m_n_s32(__a, __b, __p) __arm_vcmpgeq_m_n_s32(__a, __b, __p)
1444 #define vcmpeqq_m_s32(__a, __b, __p) __arm_vcmpeqq_m_s32(__a, __b, __p)
1445 #define vcmpeqq_m_n_s32(__a, __b, __p) __arm_vcmpeqq_m_n_s32(__a, __b, __p)
1446 #define vshlq_m_r_s32(__a, __b, __p) __arm_vshlq_m_r_s32(__a, __b, __p)
1447 #define vrshlq_m_n_s32(__a, __b, __p) __arm_vrshlq_m_n_s32(__a, __b, __p)
1448 #define vrev64q_m_s32(__inactive, __a, __p) __arm_vrev64q_m_s32(__inactive, __a, __p)
1449 #define vqshlq_m_r_s32(__a, __b, __p) __arm_vqshlq_m_r_s32(__a, __b, __p)
1450 #define vqrshlq_m_n_s32(__a, __b, __p) __arm_vqrshlq_m_n_s32(__a, __b, __p)
1451 #define vqnegq_m_s32(__inactive, __a, __p) __arm_vqnegq_m_s32(__inactive, __a, __p)
1452 #define vqabsq_m_s32(__inactive, __a, __p) __arm_vqabsq_m_s32(__inactive, __a, __p)
1453 #define vnegq_m_s32(__inactive, __a, __p) __arm_vnegq_m_s32(__inactive, __a, __p)
1454 #define vmvnq_m_s32(__inactive, __a, __p) __arm_vmvnq_m_s32(__inactive, __a, __p)
1455 #define vmlsdavxq_p_s32(__a, __b, __p) __arm_vmlsdavxq_p_s32(__a, __b, __p)
1456 #define vmlsdavq_p_s32(__a, __b, __p) __arm_vmlsdavq_p_s32(__a, __b, __p)
1457 #define vmladavxq_p_s32(__a, __b, __p) __arm_vmladavxq_p_s32(__a, __b, __p)
1458 #define vmladavq_p_s32(__a, __b, __p) __arm_vmladavq_p_s32(__a, __b, __p)
1459 #define vminvq_p_s32(__a, __b, __p) __arm_vminvq_p_s32(__a, __b, __p)
1460 #define vmaxvq_p_s32(__a, __b, __p) __arm_vmaxvq_p_s32(__a, __b, __p)
1461 #define vdupq_m_n_s32(__inactive, __a, __p) __arm_vdupq_m_n_s32(__inactive, __a, __p)
1462 #define vclzq_m_s32(__inactive, __a, __p) __arm_vclzq_m_s32(__inactive, __a, __p)
1463 #define vclsq_m_s32(__inactive, __a, __p) __arm_vclsq_m_s32(__inactive, __a, __p)
1464 #define vaddvaq_p_s32(__a, __b, __p) __arm_vaddvaq_p_s32(__a, __b, __p)
1465 #define vabsq_m_s32(__inactive, __a, __p) __arm_vabsq_m_s32(__inactive, __a, __p)
1466 #define vqrdmlsdhxq_s32(__inactive, __a, __b) __arm_vqrdmlsdhxq_s32(__inactive, __a, __b)
1467 #define vqrdmlsdhq_s32(__inactive, __a, __b) __arm_vqrdmlsdhq_s32(__inactive, __a, __b)
1468 #define vqrdmlashq_n_s32(__a, __b, __c) __arm_vqrdmlashq_n_s32(__a, __b, __c)
1469 #define vqrdmlahq_n_s32(__a, __b, __c) __arm_vqrdmlahq_n_s32(__a, __b, __c)
1470 #define vqrdmladhxq_s32(__inactive, __a, __b) __arm_vqrdmladhxq_s32(__inactive, __a, __b)
1471 #define vqrdmladhq_s32(__inactive, __a, __b) __arm_vqrdmladhq_s32(__inactive, __a, __b)
1472 #define vqdmlsdhxq_s32(__inactive, __a, __b) __arm_vqdmlsdhxq_s32(__inactive, __a, __b)
1473 #define vqdmlsdhq_s32(__inactive, __a, __b) __arm_vqdmlsdhq_s32(__inactive, __a, __b)
1474 #define vqdmlashq_n_s32(__a, __b, __c) __arm_vqdmlashq_n_s32(__a, __b, __c)
1475 #define vqdmlahq_n_s32(__a, __b, __c) __arm_vqdmlahq_n_s32(__a, __b, __c)
1476 #define vqdmladhxq_s32(__inactive, __a, __b) __arm_vqdmladhxq_s32(__inactive, __a, __b)
1477 #define vqdmladhq_s32(__inactive, __a, __b) __arm_vqdmladhq_s32(__inactive, __a, __b)
1478 #define vmlsdavaxq_s32(__a, __b, __c) __arm_vmlsdavaxq_s32(__a, __b, __c)
1479 #define vmlsdavaq_s32(__a, __b, __c) __arm_vmlsdavaq_s32(__a, __b, __c)
1480 #define vmlasq_n_s32(__a, __b, __c) __arm_vmlasq_n_s32(__a, __b, __c)
1481 #define vmlaq_n_s32(__a, __b, __c) __arm_vmlaq_n_s32(__a, __b, __c)
1482 #define vmladavaxq_s32(__a, __b, __c) __arm_vmladavaxq_s32(__a, __b, __c)
1483 #define vmladavaq_s32(__a, __b, __c) __arm_vmladavaq_s32(__a, __b, __c)
1484 #define vsriq_n_s32(__a, __b,  __imm) __arm_vsriq_n_s32(__a, __b,  __imm)
1485 #define vsliq_n_s32(__a, __b,  __imm) __arm_vsliq_n_s32(__a, __b,  __imm)
1486 #define vpselq_u64(__a, __b, __p) __arm_vpselq_u64(__a, __b, __p)
1487 #define vpselq_s64(__a, __b, __p) __arm_vpselq_s64(__a, __b, __p)
1488 #define vrmlaldavhaxq_s32(__a, __b, __c) __arm_vrmlaldavhaxq_s32(__a, __b, __c)
1489 #define vrmlsldavhaq_s32(__a, __b, __c) __arm_vrmlsldavhaq_s32(__a, __b, __c)
1490 #define vrmlsldavhaxq_s32(__a, __b, __c) __arm_vrmlsldavhaxq_s32(__a, __b, __c)
1491 #define vaddlvaq_p_s32(__a, __b, __p) __arm_vaddlvaq_p_s32(__a, __b, __p)
1492 #define vcvtbq_m_f16_f32(__a, __b, __p) __arm_vcvtbq_m_f16_f32(__a, __b, __p)
1493 #define vcvtbq_m_f32_f16(__inactive, __a, __p) __arm_vcvtbq_m_f32_f16(__inactive, __a, __p)
1494 #define vcvttq_m_f16_f32(__a, __b, __p) __arm_vcvttq_m_f16_f32(__a, __b, __p)
1495 #define vcvttq_m_f32_f16(__inactive, __a, __p) __arm_vcvttq_m_f32_f16(__inactive, __a, __p)
1496 #define vrev16q_m_s8(__inactive, __a, __p) __arm_vrev16q_m_s8(__inactive, __a, __p)
1497 #define vrev32q_m_f16(__inactive, __a, __p) __arm_vrev32q_m_f16(__inactive, __a, __p)
1498 #define vrmlaldavhq_p_s32(__a, __b, __p) __arm_vrmlaldavhq_p_s32(__a, __b, __p)
1499 #define vrmlaldavhxq_p_s32(__a, __b, __p) __arm_vrmlaldavhxq_p_s32(__a, __b, __p)
1500 #define vrmlsldavhq_p_s32(__a, __b, __p) __arm_vrmlsldavhq_p_s32(__a, __b, __p)
1501 #define vrmlsldavhxq_p_s32(__a, __b, __p) __arm_vrmlsldavhxq_p_s32(__a, __b, __p)
1502 #define vaddlvaq_p_u32(__a, __b, __p) __arm_vaddlvaq_p_u32(__a, __b, __p)
1503 #define vrev16q_m_u8(__inactive, __a, __p) __arm_vrev16q_m_u8(__inactive, __a, __p)
1504 #define vrmlaldavhq_p_u32(__a, __b, __p) __arm_vrmlaldavhq_p_u32(__a, __b, __p)
1505 #define vmvnq_m_n_s16(__inactive,  __imm, __p) __arm_vmvnq_m_n_s16(__inactive,  __imm, __p)
1506 #define vorrq_m_n_s16(__a,  __imm, __p) __arm_vorrq_m_n_s16(__a,  __imm, __p)
1507 #define vqrshrntq_n_s16(__a, __b,  __imm) __arm_vqrshrntq_n_s16(__a, __b,  __imm)
1508 #define vqshrnbq_n_s16(__a, __b,  __imm) __arm_vqshrnbq_n_s16(__a, __b,  __imm)
1509 #define vqshrntq_n_s16(__a, __b,  __imm) __arm_vqshrntq_n_s16(__a, __b,  __imm)
1510 #define vrshrnbq_n_s16(__a, __b,  __imm) __arm_vrshrnbq_n_s16(__a, __b,  __imm)
1511 #define vrshrntq_n_s16(__a, __b,  __imm) __arm_vrshrntq_n_s16(__a, __b,  __imm)
1512 #define vshrnbq_n_s16(__a, __b,  __imm) __arm_vshrnbq_n_s16(__a, __b,  __imm)
1513 #define vshrntq_n_s16(__a, __b,  __imm) __arm_vshrntq_n_s16(__a, __b,  __imm)
1514 #define vcmlaq_f16(__a, __b, __c) __arm_vcmlaq_f16(__a, __b, __c)
1515 #define vcmlaq_rot180_f16(__a, __b, __c) __arm_vcmlaq_rot180_f16(__a, __b, __c)
1516 #define vcmlaq_rot270_f16(__a, __b, __c) __arm_vcmlaq_rot270_f16(__a, __b, __c)
1517 #define vcmlaq_rot90_f16(__a, __b, __c) __arm_vcmlaq_rot90_f16(__a, __b, __c)
1518 #define vfmaq_f16(__a, __b, __c) __arm_vfmaq_f16(__a, __b, __c)
1519 #define vfmaq_n_f16(__a, __b, __c) __arm_vfmaq_n_f16(__a, __b, __c)
1520 #define vfmasq_n_f16(__a, __b, __c) __arm_vfmasq_n_f16(__a, __b, __c)
1521 #define vfmsq_f16(__a, __b, __c) __arm_vfmsq_f16(__a, __b, __c)
1522 #define vmlaldavaq_s16(__a, __b, __c) __arm_vmlaldavaq_s16(__a, __b, __c)
1523 #define vmlaldavaxq_s16(__a, __b, __c) __arm_vmlaldavaxq_s16(__a, __b, __c)
1524 #define vmlsldavaq_s16(__a, __b, __c) __arm_vmlsldavaq_s16(__a, __b, __c)
1525 #define vmlsldavaxq_s16(__a, __b, __c) __arm_vmlsldavaxq_s16(__a, __b, __c)
1526 #define vabsq_m_f16(__inactive, __a, __p) __arm_vabsq_m_f16(__inactive, __a, __p)
1527 #define vcvtmq_m_s16_f16(__inactive, __a, __p) __arm_vcvtmq_m_s16_f16(__inactive, __a, __p)
1528 #define vcvtnq_m_s16_f16(__inactive, __a, __p) __arm_vcvtnq_m_s16_f16(__inactive, __a, __p)
1529 #define vcvtpq_m_s16_f16(__inactive, __a, __p) __arm_vcvtpq_m_s16_f16(__inactive, __a, __p)
1530 #define vcvtq_m_s16_f16(__inactive, __a, __p) __arm_vcvtq_m_s16_f16(__inactive, __a, __p)
1531 #define vdupq_m_n_f16(__inactive, __a, __p) __arm_vdupq_m_n_f16(__inactive, __a, __p)
1532 #define vmaxnmaq_m_f16(__a, __b, __p) __arm_vmaxnmaq_m_f16(__a, __b, __p)
1533 #define vmaxnmavq_p_f16(__a, __b, __p) __arm_vmaxnmavq_p_f16(__a, __b, __p)
1534 #define vmaxnmvq_p_f16(__a, __b, __p) __arm_vmaxnmvq_p_f16(__a, __b, __p)
1535 #define vminnmaq_m_f16(__a, __b, __p) __arm_vminnmaq_m_f16(__a, __b, __p)
1536 #define vminnmavq_p_f16(__a, __b, __p) __arm_vminnmavq_p_f16(__a, __b, __p)
1537 #define vminnmvq_p_f16(__a, __b, __p) __arm_vminnmvq_p_f16(__a, __b, __p)
1538 #define vmlaldavq_p_s16(__a, __b, __p) __arm_vmlaldavq_p_s16(__a, __b, __p)
1539 #define vmlaldavxq_p_s16(__a, __b, __p) __arm_vmlaldavxq_p_s16(__a, __b, __p)
1540 #define vmlsldavq_p_s16(__a, __b, __p) __arm_vmlsldavq_p_s16(__a, __b, __p)
1541 #define vmlsldavxq_p_s16(__a, __b, __p) __arm_vmlsldavxq_p_s16(__a, __b, __p)
1542 #define vmovlbq_m_s8(__inactive, __a, __p) __arm_vmovlbq_m_s8(__inactive, __a, __p)
1543 #define vmovltq_m_s8(__inactive, __a, __p) __arm_vmovltq_m_s8(__inactive, __a, __p)
1544 #define vmovnbq_m_s16(__a, __b, __p) __arm_vmovnbq_m_s16(__a, __b, __p)
1545 #define vmovntq_m_s16(__a, __b, __p) __arm_vmovntq_m_s16(__a, __b, __p)
1546 #define vnegq_m_f16(__inactive, __a, __p) __arm_vnegq_m_f16(__inactive, __a, __p)
1547 #define vpselq_f16(__a, __b, __p) __arm_vpselq_f16(__a, __b, __p)
1548 #define vqmovnbq_m_s16(__a, __b, __p) __arm_vqmovnbq_m_s16(__a, __b, __p)
1549 #define vqmovntq_m_s16(__a, __b, __p) __arm_vqmovntq_m_s16(__a, __b, __p)
1550 #define vrev32q_m_s8(__inactive, __a, __p) __arm_vrev32q_m_s8(__inactive, __a, __p)
1551 #define vrev64q_m_f16(__inactive, __a, __p) __arm_vrev64q_m_f16(__inactive, __a, __p)
1552 #define vrndaq_m_f16(__inactive, __a, __p) __arm_vrndaq_m_f16(__inactive, __a, __p)
1553 #define vrndmq_m_f16(__inactive, __a, __p) __arm_vrndmq_m_f16(__inactive, __a, __p)
1554 #define vrndnq_m_f16(__inactive, __a, __p) __arm_vrndnq_m_f16(__inactive, __a, __p)
1555 #define vrndpq_m_f16(__inactive, __a, __p) __arm_vrndpq_m_f16(__inactive, __a, __p)
1556 #define vrndq_m_f16(__inactive, __a, __p) __arm_vrndq_m_f16(__inactive, __a, __p)
1557 #define vrndxq_m_f16(__inactive, __a, __p) __arm_vrndxq_m_f16(__inactive, __a, __p)
1558 #define vcmpeqq_m_n_f16(__a, __b, __p) __arm_vcmpeqq_m_n_f16(__a, __b, __p)
1559 #define vcmpgeq_m_f16(__a, __b, __p) __arm_vcmpgeq_m_f16(__a, __b, __p)
1560 #define vcmpgeq_m_n_f16(__a, __b, __p) __arm_vcmpgeq_m_n_f16(__a, __b, __p)
1561 #define vcmpgtq_m_f16(__a, __b, __p) __arm_vcmpgtq_m_f16(__a, __b, __p)
1562 #define vcmpgtq_m_n_f16(__a, __b, __p) __arm_vcmpgtq_m_n_f16(__a, __b, __p)
1563 #define vcmpleq_m_f16(__a, __b, __p) __arm_vcmpleq_m_f16(__a, __b, __p)
1564 #define vcmpleq_m_n_f16(__a, __b, __p) __arm_vcmpleq_m_n_f16(__a, __b, __p)
1565 #define vcmpltq_m_f16(__a, __b, __p) __arm_vcmpltq_m_f16(__a, __b, __p)
1566 #define vcmpltq_m_n_f16(__a, __b, __p) __arm_vcmpltq_m_n_f16(__a, __b, __p)
1567 #define vcmpneq_m_f16(__a, __b, __p) __arm_vcmpneq_m_f16(__a, __b, __p)
1568 #define vcmpneq_m_n_f16(__a, __b, __p) __arm_vcmpneq_m_n_f16(__a, __b, __p)
1569 #define vmvnq_m_n_u16(__inactive,  __imm, __p) __arm_vmvnq_m_n_u16(__inactive,  __imm, __p)
1570 #define vorrq_m_n_u16(__a,  __imm, __p) __arm_vorrq_m_n_u16(__a,  __imm, __p)
1571 #define vqrshruntq_n_s16(__a, __b,  __imm) __arm_vqrshruntq_n_s16(__a, __b,  __imm)
1572 #define vqshrunbq_n_s16(__a, __b,  __imm) __arm_vqshrunbq_n_s16(__a, __b,  __imm)
1573 #define vqshruntq_n_s16(__a, __b,  __imm) __arm_vqshruntq_n_s16(__a, __b,  __imm)
1574 #define vcvtmq_m_u16_f16(__inactive, __a, __p) __arm_vcvtmq_m_u16_f16(__inactive, __a, __p)
1575 #define vcvtnq_m_u16_f16(__inactive, __a, __p) __arm_vcvtnq_m_u16_f16(__inactive, __a, __p)
1576 #define vcvtpq_m_u16_f16(__inactive, __a, __p) __arm_vcvtpq_m_u16_f16(__inactive, __a, __p)
1577 #define vcvtq_m_u16_f16(__inactive, __a, __p) __arm_vcvtq_m_u16_f16(__inactive, __a, __p)
1578 #define vqmovunbq_m_s16(__a, __b, __p) __arm_vqmovunbq_m_s16(__a, __b, __p)
1579 #define vqmovuntq_m_s16(__a, __b, __p) __arm_vqmovuntq_m_s16(__a, __b, __p)
1580 #define vqrshrntq_n_u16(__a, __b,  __imm) __arm_vqrshrntq_n_u16(__a, __b,  __imm)
1581 #define vqshrnbq_n_u16(__a, __b,  __imm) __arm_vqshrnbq_n_u16(__a, __b,  __imm)
1582 #define vqshrntq_n_u16(__a, __b,  __imm) __arm_vqshrntq_n_u16(__a, __b,  __imm)
1583 #define vrshrnbq_n_u16(__a, __b,  __imm) __arm_vrshrnbq_n_u16(__a, __b,  __imm)
1584 #define vrshrntq_n_u16(__a, __b,  __imm) __arm_vrshrntq_n_u16(__a, __b,  __imm)
1585 #define vshrnbq_n_u16(__a, __b,  __imm) __arm_vshrnbq_n_u16(__a, __b,  __imm)
1586 #define vshrntq_n_u16(__a, __b,  __imm) __arm_vshrntq_n_u16(__a, __b,  __imm)
1587 #define vmlaldavaq_u16(__a, __b, __c) __arm_vmlaldavaq_u16(__a, __b, __c)
1588 #define vmlaldavq_p_u16(__a, __b, __p) __arm_vmlaldavq_p_u16(__a, __b, __p)
1589 #define vmovlbq_m_u8(__inactive, __a, __p) __arm_vmovlbq_m_u8(__inactive, __a, __p)
1590 #define vmovltq_m_u8(__inactive, __a, __p) __arm_vmovltq_m_u8(__inactive, __a, __p)
1591 #define vmovnbq_m_u16(__a, __b, __p) __arm_vmovnbq_m_u16(__a, __b, __p)
1592 #define vmovntq_m_u16(__a, __b, __p) __arm_vmovntq_m_u16(__a, __b, __p)
1593 #define vqmovnbq_m_u16(__a, __b, __p) __arm_vqmovnbq_m_u16(__a, __b, __p)
1594 #define vqmovntq_m_u16(__a, __b, __p) __arm_vqmovntq_m_u16(__a, __b, __p)
1595 #define vrev32q_m_u8(__inactive, __a, __p) __arm_vrev32q_m_u8(__inactive, __a, __p)
1596 #define vmvnq_m_n_s32(__inactive,  __imm, __p) __arm_vmvnq_m_n_s32(__inactive,  __imm, __p)
1597 #define vorrq_m_n_s32(__a,  __imm, __p) __arm_vorrq_m_n_s32(__a,  __imm, __p)
1598 #define vqrshrntq_n_s32(__a, __b,  __imm) __arm_vqrshrntq_n_s32(__a, __b,  __imm)
1599 #define vqshrnbq_n_s32(__a, __b,  __imm) __arm_vqshrnbq_n_s32(__a, __b,  __imm)
1600 #define vqshrntq_n_s32(__a, __b,  __imm) __arm_vqshrntq_n_s32(__a, __b,  __imm)
1601 #define vrshrnbq_n_s32(__a, __b,  __imm) __arm_vrshrnbq_n_s32(__a, __b,  __imm)
1602 #define vrshrntq_n_s32(__a, __b,  __imm) __arm_vrshrntq_n_s32(__a, __b,  __imm)
1603 #define vshrnbq_n_s32(__a, __b,  __imm) __arm_vshrnbq_n_s32(__a, __b,  __imm)
1604 #define vshrntq_n_s32(__a, __b,  __imm) __arm_vshrntq_n_s32(__a, __b,  __imm)
1605 #define vcmlaq_f32(__a, __b, __c) __arm_vcmlaq_f32(__a, __b, __c)
1606 #define vcmlaq_rot180_f32(__a, __b, __c) __arm_vcmlaq_rot180_f32(__a, __b, __c)
1607 #define vcmlaq_rot270_f32(__a, __b, __c) __arm_vcmlaq_rot270_f32(__a, __b, __c)
1608 #define vcmlaq_rot90_f32(__a, __b, __c) __arm_vcmlaq_rot90_f32(__a, __b, __c)
1609 #define vfmaq_f32(__a, __b, __c) __arm_vfmaq_f32(__a, __b, __c)
1610 #define vfmaq_n_f32(__a, __b, __c) __arm_vfmaq_n_f32(__a, __b, __c)
1611 #define vfmasq_n_f32(__a, __b, __c) __arm_vfmasq_n_f32(__a, __b, __c)
1612 #define vfmsq_f32(__a, __b, __c) __arm_vfmsq_f32(__a, __b, __c)
1613 #define vmlaldavaq_s32(__a, __b, __c) __arm_vmlaldavaq_s32(__a, __b, __c)
1614 #define vmlaldavaxq_s32(__a, __b, __c) __arm_vmlaldavaxq_s32(__a, __b, __c)
1615 #define vmlsldavaq_s32(__a, __b, __c) __arm_vmlsldavaq_s32(__a, __b, __c)
1616 #define vmlsldavaxq_s32(__a, __b, __c) __arm_vmlsldavaxq_s32(__a, __b, __c)
1617 #define vabsq_m_f32(__inactive, __a, __p) __arm_vabsq_m_f32(__inactive, __a, __p)
1618 #define vcvtmq_m_s32_f32(__inactive, __a, __p) __arm_vcvtmq_m_s32_f32(__inactive, __a, __p)
1619 #define vcvtnq_m_s32_f32(__inactive, __a, __p) __arm_vcvtnq_m_s32_f32(__inactive, __a, __p)
1620 #define vcvtpq_m_s32_f32(__inactive, __a, __p) __arm_vcvtpq_m_s32_f32(__inactive, __a, __p)
1621 #define vcvtq_m_s32_f32(__inactive, __a, __p) __arm_vcvtq_m_s32_f32(__inactive, __a, __p)
1622 #define vdupq_m_n_f32(__inactive, __a, __p) __arm_vdupq_m_n_f32(__inactive, __a, __p)
1623 #define vmaxnmaq_m_f32(__a, __b, __p) __arm_vmaxnmaq_m_f32(__a, __b, __p)
1624 #define vmaxnmavq_p_f32(__a, __b, __p) __arm_vmaxnmavq_p_f32(__a, __b, __p)
1625 #define vmaxnmvq_p_f32(__a, __b, __p) __arm_vmaxnmvq_p_f32(__a, __b, __p)
1626 #define vminnmaq_m_f32(__a, __b, __p) __arm_vminnmaq_m_f32(__a, __b, __p)
1627 #define vminnmavq_p_f32(__a, __b, __p) __arm_vminnmavq_p_f32(__a, __b, __p)
1628 #define vminnmvq_p_f32(__a, __b, __p) __arm_vminnmvq_p_f32(__a, __b, __p)
1629 #define vmlaldavq_p_s32(__a, __b, __p) __arm_vmlaldavq_p_s32(__a, __b, __p)
1630 #define vmlaldavxq_p_s32(__a, __b, __p) __arm_vmlaldavxq_p_s32(__a, __b, __p)
1631 #define vmlsldavq_p_s32(__a, __b, __p) __arm_vmlsldavq_p_s32(__a, __b, __p)
1632 #define vmlsldavxq_p_s32(__a, __b, __p) __arm_vmlsldavxq_p_s32(__a, __b, __p)
1633 #define vmovlbq_m_s16(__inactive, __a, __p) __arm_vmovlbq_m_s16(__inactive, __a, __p)
1634 #define vmovltq_m_s16(__inactive, __a, __p) __arm_vmovltq_m_s16(__inactive, __a, __p)
1635 #define vmovnbq_m_s32(__a, __b, __p) __arm_vmovnbq_m_s32(__a, __b, __p)
1636 #define vmovntq_m_s32(__a, __b, __p) __arm_vmovntq_m_s32(__a, __b, __p)
1637 #define vnegq_m_f32(__inactive, __a, __p) __arm_vnegq_m_f32(__inactive, __a, __p)
1638 #define vpselq_f32(__a, __b, __p) __arm_vpselq_f32(__a, __b, __p)
1639 #define vqmovnbq_m_s32(__a, __b, __p) __arm_vqmovnbq_m_s32(__a, __b, __p)
1640 #define vqmovntq_m_s32(__a, __b, __p) __arm_vqmovntq_m_s32(__a, __b, __p)
1641 #define vrev32q_m_s16(__inactive, __a, __p) __arm_vrev32q_m_s16(__inactive, __a, __p)
1642 #define vrev64q_m_f32(__inactive, __a, __p) __arm_vrev64q_m_f32(__inactive, __a, __p)
1643 #define vrndaq_m_f32(__inactive, __a, __p) __arm_vrndaq_m_f32(__inactive, __a, __p)
1644 #define vrndmq_m_f32(__inactive, __a, __p) __arm_vrndmq_m_f32(__inactive, __a, __p)
1645 #define vrndnq_m_f32(__inactive, __a, __p) __arm_vrndnq_m_f32(__inactive, __a, __p)
1646 #define vrndpq_m_f32(__inactive, __a, __p) __arm_vrndpq_m_f32(__inactive, __a, __p)
1647 #define vrndq_m_f32(__inactive, __a, __p) __arm_vrndq_m_f32(__inactive, __a, __p)
1648 #define vrndxq_m_f32(__inactive, __a, __p) __arm_vrndxq_m_f32(__inactive, __a, __p)
1649 #define vcmpeqq_m_n_f32(__a, __b, __p) __arm_vcmpeqq_m_n_f32(__a, __b, __p)
1650 #define vcmpgeq_m_f32(__a, __b, __p) __arm_vcmpgeq_m_f32(__a, __b, __p)
1651 #define vcmpgeq_m_n_f32(__a, __b, __p) __arm_vcmpgeq_m_n_f32(__a, __b, __p)
1652 #define vcmpgtq_m_f32(__a, __b, __p) __arm_vcmpgtq_m_f32(__a, __b, __p)
1653 #define vcmpgtq_m_n_f32(__a, __b, __p) __arm_vcmpgtq_m_n_f32(__a, __b, __p)
1654 #define vcmpleq_m_f32(__a, __b, __p) __arm_vcmpleq_m_f32(__a, __b, __p)
1655 #define vcmpleq_m_n_f32(__a, __b, __p) __arm_vcmpleq_m_n_f32(__a, __b, __p)
1656 #define vcmpltq_m_f32(__a, __b, __p) __arm_vcmpltq_m_f32(__a, __b, __p)
1657 #define vcmpltq_m_n_f32(__a, __b, __p) __arm_vcmpltq_m_n_f32(__a, __b, __p)
1658 #define vcmpneq_m_f32(__a, __b, __p) __arm_vcmpneq_m_f32(__a, __b, __p)
1659 #define vcmpneq_m_n_f32(__a, __b, __p) __arm_vcmpneq_m_n_f32(__a, __b, __p)
1660 #define vmvnq_m_n_u32(__inactive,  __imm, __p) __arm_vmvnq_m_n_u32(__inactive,  __imm, __p)
1661 #define vorrq_m_n_u32(__a,  __imm, __p) __arm_vorrq_m_n_u32(__a,  __imm, __p)
1662 #define vqrshruntq_n_s32(__a, __b,  __imm) __arm_vqrshruntq_n_s32(__a, __b,  __imm)
1663 #define vqshrunbq_n_s32(__a, __b,  __imm) __arm_vqshrunbq_n_s32(__a, __b,  __imm)
1664 #define vqshruntq_n_s32(__a, __b,  __imm) __arm_vqshruntq_n_s32(__a, __b,  __imm)
1665 #define vcvtmq_m_u32_f32(__inactive, __a, __p) __arm_vcvtmq_m_u32_f32(__inactive, __a, __p)
1666 #define vcvtnq_m_u32_f32(__inactive, __a, __p) __arm_vcvtnq_m_u32_f32(__inactive, __a, __p)
1667 #define vcvtpq_m_u32_f32(__inactive, __a, __p) __arm_vcvtpq_m_u32_f32(__inactive, __a, __p)
1668 #define vcvtq_m_u32_f32(__inactive, __a, __p) __arm_vcvtq_m_u32_f32(__inactive, __a, __p)
1669 #define vqmovunbq_m_s32(__a, __b, __p) __arm_vqmovunbq_m_s32(__a, __b, __p)
1670 #define vqmovuntq_m_s32(__a, __b, __p) __arm_vqmovuntq_m_s32(__a, __b, __p)
1671 #define vqrshrntq_n_u32(__a, __b,  __imm) __arm_vqrshrntq_n_u32(__a, __b,  __imm)
1672 #define vqshrnbq_n_u32(__a, __b,  __imm) __arm_vqshrnbq_n_u32(__a, __b,  __imm)
1673 #define vqshrntq_n_u32(__a, __b,  __imm) __arm_vqshrntq_n_u32(__a, __b,  __imm)
1674 #define vrshrnbq_n_u32(__a, __b,  __imm) __arm_vrshrnbq_n_u32(__a, __b,  __imm)
1675 #define vrshrntq_n_u32(__a, __b,  __imm) __arm_vrshrntq_n_u32(__a, __b,  __imm)
1676 #define vshrnbq_n_u32(__a, __b,  __imm) __arm_vshrnbq_n_u32(__a, __b,  __imm)
1677 #define vshrntq_n_u32(__a, __b,  __imm) __arm_vshrntq_n_u32(__a, __b,  __imm)
1678 #define vmlaldavaq_u32(__a, __b, __c) __arm_vmlaldavaq_u32(__a, __b, __c)
1679 #define vmlaldavq_p_u32(__a, __b, __p) __arm_vmlaldavq_p_u32(__a, __b, __p)
1680 #define vmovlbq_m_u16(__inactive, __a, __p) __arm_vmovlbq_m_u16(__inactive, __a, __p)
1681 #define vmovltq_m_u16(__inactive, __a, __p) __arm_vmovltq_m_u16(__inactive, __a, __p)
1682 #define vmovnbq_m_u32(__a, __b, __p) __arm_vmovnbq_m_u32(__a, __b, __p)
1683 #define vmovntq_m_u32(__a, __b, __p) __arm_vmovntq_m_u32(__a, __b, __p)
1684 #define vqmovnbq_m_u32(__a, __b, __p) __arm_vqmovnbq_m_u32(__a, __b, __p)
1685 #define vqmovntq_m_u32(__a, __b, __p) __arm_vqmovntq_m_u32(__a, __b, __p)
1686 #define vrev32q_m_u16(__inactive, __a, __p) __arm_vrev32q_m_u16(__inactive, __a, __p)
1687 #define vsriq_m_n_s8(__a, __b,  __imm, __p) __arm_vsriq_m_n_s8(__a, __b,  __imm, __p)
1688 #define vsubq_m_s8(__inactive, __a, __b, __p) __arm_vsubq_m_s8(__inactive, __a, __b, __p)
1689 #define vcvtq_m_n_f16_u16(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_f16_u16(__inactive, __a,  __imm6, __p)
1690 #define vqshluq_m_n_s8(__inactive, __a,  __imm, __p) __arm_vqshluq_m_n_s8(__inactive, __a,  __imm, __p)
1691 #define vabavq_p_s8(__a, __b, __c, __p) __arm_vabavq_p_s8(__a, __b, __c, __p)
1692 #define vsriq_m_n_u8(__a, __b,  __imm, __p) __arm_vsriq_m_n_u8(__a, __b,  __imm, __p)
1693 #define vshlq_m_u8(__inactive, __a, __b, __p) __arm_vshlq_m_u8(__inactive, __a, __b, __p)
1694 #define vsubq_m_u8(__inactive, __a, __b, __p) __arm_vsubq_m_u8(__inactive, __a, __b, __p)
1695 #define vabavq_p_u8(__a, __b, __c, __p) __arm_vabavq_p_u8(__a, __b, __c, __p)
1696 #define vshlq_m_s8(__inactive, __a, __b, __p) __arm_vshlq_m_s8(__inactive, __a, __b, __p)
1697 #define vcvtq_m_n_f16_s16(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_f16_s16(__inactive, __a,  __imm6, __p)
1698 #define vsriq_m_n_s16(__a, __b,  __imm, __p) __arm_vsriq_m_n_s16(__a, __b,  __imm, __p)
1699 #define vsubq_m_s16(__inactive, __a, __b, __p) __arm_vsubq_m_s16(__inactive, __a, __b, __p)
1700 #define vcvtq_m_n_f32_u32(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_f32_u32(__inactive, __a,  __imm6, __p)
1701 #define vqshluq_m_n_s16(__inactive, __a,  __imm, __p) __arm_vqshluq_m_n_s16(__inactive, __a,  __imm, __p)
1702 #define vabavq_p_s16(__a, __b, __c, __p) __arm_vabavq_p_s16(__a, __b, __c, __p)
1703 #define vsriq_m_n_u16(__a, __b,  __imm, __p) __arm_vsriq_m_n_u16(__a, __b,  __imm, __p)
1704 #define vshlq_m_u16(__inactive, __a, __b, __p) __arm_vshlq_m_u16(__inactive, __a, __b, __p)
1705 #define vsubq_m_u16(__inactive, __a, __b, __p) __arm_vsubq_m_u16(__inactive, __a, __b, __p)
1706 #define vabavq_p_u16(__a, __b, __c, __p) __arm_vabavq_p_u16(__a, __b, __c, __p)
1707 #define vshlq_m_s16(__inactive, __a, __b, __p) __arm_vshlq_m_s16(__inactive, __a, __b, __p)
1708 #define vcvtq_m_n_f32_s32(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_f32_s32(__inactive, __a,  __imm6, __p)
1709 #define vsriq_m_n_s32(__a, __b,  __imm, __p) __arm_vsriq_m_n_s32(__a, __b,  __imm, __p)
1710 #define vsubq_m_s32(__inactive, __a, __b, __p) __arm_vsubq_m_s32(__inactive, __a, __b, __p)
1711 #define vqshluq_m_n_s32(__inactive, __a,  __imm, __p) __arm_vqshluq_m_n_s32(__inactive, __a,  __imm, __p)
1712 #define vabavq_p_s32(__a, __b, __c, __p) __arm_vabavq_p_s32(__a, __b, __c, __p)
1713 #define vsriq_m_n_u32(__a, __b,  __imm, __p) __arm_vsriq_m_n_u32(__a, __b,  __imm, __p)
1714 #define vshlq_m_u32(__inactive, __a, __b, __p) __arm_vshlq_m_u32(__inactive, __a, __b, __p)
1715 #define vsubq_m_u32(__inactive, __a, __b, __p) __arm_vsubq_m_u32(__inactive, __a, __b, __p)
1716 #define vabavq_p_u32(__a, __b, __c, __p) __arm_vabavq_p_u32(__a, __b, __c, __p)
1717 #define vshlq_m_s32(__inactive, __a, __b, __p) __arm_vshlq_m_s32(__inactive, __a, __b, __p)
1718 #define vabdq_m_s8(__inactive, __a, __b, __p) __arm_vabdq_m_s8(__inactive, __a, __b, __p)
1719 #define vabdq_m_s32(__inactive, __a, __b, __p) __arm_vabdq_m_s32(__inactive, __a, __b, __p)
1720 #define vabdq_m_s16(__inactive, __a, __b, __p) __arm_vabdq_m_s16(__inactive, __a, __b, __p)
1721 #define vabdq_m_u8(__inactive, __a, __b, __p) __arm_vabdq_m_u8(__inactive, __a, __b, __p)
1722 #define vabdq_m_u32(__inactive, __a, __b, __p) __arm_vabdq_m_u32(__inactive, __a, __b, __p)
1723 #define vabdq_m_u16(__inactive, __a, __b, __p) __arm_vabdq_m_u16(__inactive, __a, __b, __p)
1724 #define vaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vaddq_m_n_s8(__inactive, __a, __b, __p)
1725 #define vaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vaddq_m_n_s32(__inactive, __a, __b, __p)
1726 #define vaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vaddq_m_n_s16(__inactive, __a, __b, __p)
1727 #define vaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vaddq_m_n_u8(__inactive, __a, __b, __p)
1728 #define vaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vaddq_m_n_u32(__inactive, __a, __b, __p)
1729 #define vaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vaddq_m_n_u16(__inactive, __a, __b, __p)
1730 #define vaddq_m_s8(__inactive, __a, __b, __p) __arm_vaddq_m_s8(__inactive, __a, __b, __p)
1731 #define vaddq_m_s32(__inactive, __a, __b, __p) __arm_vaddq_m_s32(__inactive, __a, __b, __p)
1732 #define vaddq_m_s16(__inactive, __a, __b, __p) __arm_vaddq_m_s16(__inactive, __a, __b, __p)
1733 #define vaddq_m_u8(__inactive, __a, __b, __p) __arm_vaddq_m_u8(__inactive, __a, __b, __p)
1734 #define vaddq_m_u32(__inactive, __a, __b, __p) __arm_vaddq_m_u32(__inactive, __a, __b, __p)
1735 #define vaddq_m_u16(__inactive, __a, __b, __p) __arm_vaddq_m_u16(__inactive, __a, __b, __p)
1736 #define vandq_m_s8(__inactive, __a, __b, __p) __arm_vandq_m_s8(__inactive, __a, __b, __p)
1737 #define vandq_m_s32(__inactive, __a, __b, __p) __arm_vandq_m_s32(__inactive, __a, __b, __p)
1738 #define vandq_m_s16(__inactive, __a, __b, __p) __arm_vandq_m_s16(__inactive, __a, __b, __p)
1739 #define vandq_m_u8(__inactive, __a, __b, __p) __arm_vandq_m_u8(__inactive, __a, __b, __p)
1740 #define vandq_m_u32(__inactive, __a, __b, __p) __arm_vandq_m_u32(__inactive, __a, __b, __p)
1741 #define vandq_m_u16(__inactive, __a, __b, __p) __arm_vandq_m_u16(__inactive, __a, __b, __p)
1742 #define vbicq_m_s8(__inactive, __a, __b, __p) __arm_vbicq_m_s8(__inactive, __a, __b, __p)
1743 #define vbicq_m_s32(__inactive, __a, __b, __p) __arm_vbicq_m_s32(__inactive, __a, __b, __p)
1744 #define vbicq_m_s16(__inactive, __a, __b, __p) __arm_vbicq_m_s16(__inactive, __a, __b, __p)
1745 #define vbicq_m_u8(__inactive, __a, __b, __p) __arm_vbicq_m_u8(__inactive, __a, __b, __p)
1746 #define vbicq_m_u32(__inactive, __a, __b, __p) __arm_vbicq_m_u32(__inactive, __a, __b, __p)
1747 #define vbicq_m_u16(__inactive, __a, __b, __p) __arm_vbicq_m_u16(__inactive, __a, __b, __p)
1748 #define vbrsrq_m_n_s8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s8(__inactive, __a, __b, __p)
1749 #define vbrsrq_m_n_s32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s32(__inactive, __a, __b, __p)
1750 #define vbrsrq_m_n_s16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s16(__inactive, __a, __b, __p)
1751 #define vbrsrq_m_n_u8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u8(__inactive, __a, __b, __p)
1752 #define vbrsrq_m_n_u32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u32(__inactive, __a, __b, __p)
1753 #define vbrsrq_m_n_u16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u16(__inactive, __a, __b, __p)
1754 #define vcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s8(__inactive, __a, __b, __p)
1755 #define vcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s32(__inactive, __a, __b, __p)
1756 #define vcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s16(__inactive, __a, __b, __p)
1757 #define vcaddq_rot270_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u8(__inactive, __a, __b, __p)
1758 #define vcaddq_rot270_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u32(__inactive, __a, __b, __p)
1759 #define vcaddq_rot270_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u16(__inactive, __a, __b, __p)
1760 #define vcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s8(__inactive, __a, __b, __p)
1761 #define vcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s32(__inactive, __a, __b, __p)
1762 #define vcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s16(__inactive, __a, __b, __p)
1763 #define vcaddq_rot90_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u8(__inactive, __a, __b, __p)
1764 #define vcaddq_rot90_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u32(__inactive, __a, __b, __p)
1765 #define vcaddq_rot90_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u16(__inactive, __a, __b, __p)
1766 #define veorq_m_s8(__inactive, __a, __b, __p) __arm_veorq_m_s8(__inactive, __a, __b, __p)
1767 #define veorq_m_s32(__inactive, __a, __b, __p) __arm_veorq_m_s32(__inactive, __a, __b, __p)
1768 #define veorq_m_s16(__inactive, __a, __b, __p) __arm_veorq_m_s16(__inactive, __a, __b, __p)
1769 #define veorq_m_u8(__inactive, __a, __b, __p) __arm_veorq_m_u8(__inactive, __a, __b, __p)
1770 #define veorq_m_u32(__inactive, __a, __b, __p) __arm_veorq_m_u32(__inactive, __a, __b, __p)
1771 #define veorq_m_u16(__inactive, __a, __b, __p) __arm_veorq_m_u16(__inactive, __a, __b, __p)
1772 #define vhaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s8(__inactive, __a, __b, __p)
1773 #define vhaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s32(__inactive, __a, __b, __p)
1774 #define vhaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s16(__inactive, __a, __b, __p)
1775 #define vhaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u8(__inactive, __a, __b, __p)
1776 #define vhaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u32(__inactive, __a, __b, __p)
1777 #define vhaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u16(__inactive, __a, __b, __p)
1778 #define vhaddq_m_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_s8(__inactive, __a, __b, __p)
1779 #define vhaddq_m_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_s32(__inactive, __a, __b, __p)
1780 #define vhaddq_m_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_s16(__inactive, __a, __b, __p)
1781 #define vhaddq_m_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_u8(__inactive, __a, __b, __p)
1782 #define vhaddq_m_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_u32(__inactive, __a, __b, __p)
1783 #define vhaddq_m_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_u16(__inactive, __a, __b, __p)
1784 #define vhcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s8(__inactive, __a, __b, __p)
1785 #define vhcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s32(__inactive, __a, __b, __p)
1786 #define vhcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s16(__inactive, __a, __b, __p)
1787 #define vhcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s8(__inactive, __a, __b, __p)
1788 #define vhcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s32(__inactive, __a, __b, __p)
1789 #define vhcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s16(__inactive, __a, __b, __p)
1790 #define vhsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s8(__inactive, __a, __b, __p)
1791 #define vhsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s32(__inactive, __a, __b, __p)
1792 #define vhsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s16(__inactive, __a, __b, __p)
1793 #define vhsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u8(__inactive, __a, __b, __p)
1794 #define vhsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u32(__inactive, __a, __b, __p)
1795 #define vhsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u16(__inactive, __a, __b, __p)
1796 #define vhsubq_m_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_s8(__inactive, __a, __b, __p)
1797 #define vhsubq_m_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_s32(__inactive, __a, __b, __p)
1798 #define vhsubq_m_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_s16(__inactive, __a, __b, __p)
1799 #define vhsubq_m_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_u8(__inactive, __a, __b, __p)
1800 #define vhsubq_m_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_u32(__inactive, __a, __b, __p)
1801 #define vhsubq_m_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_u16(__inactive, __a, __b, __p)
1802 #define vmaxq_m_s8(__inactive, __a, __b, __p) __arm_vmaxq_m_s8(__inactive, __a, __b, __p)
1803 #define vmaxq_m_s32(__inactive, __a, __b, __p) __arm_vmaxq_m_s32(__inactive, __a, __b, __p)
1804 #define vmaxq_m_s16(__inactive, __a, __b, __p) __arm_vmaxq_m_s16(__inactive, __a, __b, __p)
1805 #define vmaxq_m_u8(__inactive, __a, __b, __p) __arm_vmaxq_m_u8(__inactive, __a, __b, __p)
1806 #define vmaxq_m_u32(__inactive, __a, __b, __p) __arm_vmaxq_m_u32(__inactive, __a, __b, __p)
1807 #define vmaxq_m_u16(__inactive, __a, __b, __p) __arm_vmaxq_m_u16(__inactive, __a, __b, __p)
1808 #define vminq_m_s8(__inactive, __a, __b, __p) __arm_vminq_m_s8(__inactive, __a, __b, __p)
1809 #define vminq_m_s32(__inactive, __a, __b, __p) __arm_vminq_m_s32(__inactive, __a, __b, __p)
1810 #define vminq_m_s16(__inactive, __a, __b, __p) __arm_vminq_m_s16(__inactive, __a, __b, __p)
1811 #define vminq_m_u8(__inactive, __a, __b, __p) __arm_vminq_m_u8(__inactive, __a, __b, __p)
1812 #define vminq_m_u32(__inactive, __a, __b, __p) __arm_vminq_m_u32(__inactive, __a, __b, __p)
1813 #define vminq_m_u16(__inactive, __a, __b, __p) __arm_vminq_m_u16(__inactive, __a, __b, __p)
1814 #define vmladavaq_p_s8(__a, __b, __c, __p) __arm_vmladavaq_p_s8(__a, __b, __c, __p)
1815 #define vmladavaq_p_s32(__a, __b, __c, __p) __arm_vmladavaq_p_s32(__a, __b, __c, __p)
1816 #define vmladavaq_p_s16(__a, __b, __c, __p) __arm_vmladavaq_p_s16(__a, __b, __c, __p)
1817 #define vmladavaq_p_u8(__a, __b, __c, __p) __arm_vmladavaq_p_u8(__a, __b, __c, __p)
1818 #define vmladavaq_p_u32(__a, __b, __c, __p) __arm_vmladavaq_p_u32(__a, __b, __c, __p)
1819 #define vmladavaq_p_u16(__a, __b, __c, __p) __arm_vmladavaq_p_u16(__a, __b, __c, __p)
1820 #define vmladavaxq_p_s8(__a, __b, __c, __p) __arm_vmladavaxq_p_s8(__a, __b, __c, __p)
1821 #define vmladavaxq_p_s32(__a, __b, __c, __p) __arm_vmladavaxq_p_s32(__a, __b, __c, __p)
1822 #define vmladavaxq_p_s16(__a, __b, __c, __p) __arm_vmladavaxq_p_s16(__a, __b, __c, __p)
1823 #define vmlaq_m_n_s8(__a, __b, __c, __p) __arm_vmlaq_m_n_s8(__a, __b, __c, __p)
1824 #define vmlaq_m_n_s32(__a, __b, __c, __p) __arm_vmlaq_m_n_s32(__a, __b, __c, __p)
1825 #define vmlaq_m_n_s16(__a, __b, __c, __p) __arm_vmlaq_m_n_s16(__a, __b, __c, __p)
1826 #define vmlaq_m_n_u8(__a, __b, __c, __p) __arm_vmlaq_m_n_u8(__a, __b, __c, __p)
1827 #define vmlaq_m_n_u32(__a, __b, __c, __p) __arm_vmlaq_m_n_u32(__a, __b, __c, __p)
1828 #define vmlaq_m_n_u16(__a, __b, __c, __p) __arm_vmlaq_m_n_u16(__a, __b, __c, __p)
1829 #define vmlasq_m_n_s8(__a, __b, __c, __p) __arm_vmlasq_m_n_s8(__a, __b, __c, __p)
1830 #define vmlasq_m_n_s32(__a, __b, __c, __p) __arm_vmlasq_m_n_s32(__a, __b, __c, __p)
1831 #define vmlasq_m_n_s16(__a, __b, __c, __p) __arm_vmlasq_m_n_s16(__a, __b, __c, __p)
1832 #define vmlasq_m_n_u8(__a, __b, __c, __p) __arm_vmlasq_m_n_u8(__a, __b, __c, __p)
1833 #define vmlasq_m_n_u32(__a, __b, __c, __p) __arm_vmlasq_m_n_u32(__a, __b, __c, __p)
1834 #define vmlasq_m_n_u16(__a, __b, __c, __p) __arm_vmlasq_m_n_u16(__a, __b, __c, __p)
1835 #define vmlsdavaq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaq_p_s8(__a, __b, __c, __p)
1836 #define vmlsdavaq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaq_p_s32(__a, __b, __c, __p)
1837 #define vmlsdavaq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaq_p_s16(__a, __b, __c, __p)
1838 #define vmlsdavaxq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s8(__a, __b, __c, __p)
1839 #define vmlsdavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s32(__a, __b, __c, __p)
1840 #define vmlsdavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s16(__a, __b, __c, __p)
1841 #define vmulhq_m_s8(__inactive, __a, __b, __p) __arm_vmulhq_m_s8(__inactive, __a, __b, __p)
1842 #define vmulhq_m_s32(__inactive, __a, __b, __p) __arm_vmulhq_m_s32(__inactive, __a, __b, __p)
1843 #define vmulhq_m_s16(__inactive, __a, __b, __p) __arm_vmulhq_m_s16(__inactive, __a, __b, __p)
1844 #define vmulhq_m_u8(__inactive, __a, __b, __p) __arm_vmulhq_m_u8(__inactive, __a, __b, __p)
1845 #define vmulhq_m_u32(__inactive, __a, __b, __p) __arm_vmulhq_m_u32(__inactive, __a, __b, __p)
1846 #define vmulhq_m_u16(__inactive, __a, __b, __p) __arm_vmulhq_m_u16(__inactive, __a, __b, __p)
1847 #define vmullbq_int_m_s8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s8(__inactive, __a, __b, __p)
1848 #define vmullbq_int_m_s32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s32(__inactive, __a, __b, __p)
1849 #define vmullbq_int_m_s16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s16(__inactive, __a, __b, __p)
1850 #define vmullbq_int_m_u8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u8(__inactive, __a, __b, __p)
1851 #define vmullbq_int_m_u32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u32(__inactive, __a, __b, __p)
1852 #define vmullbq_int_m_u16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u16(__inactive, __a, __b, __p)
1853 #define vmulltq_int_m_s8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s8(__inactive, __a, __b, __p)
1854 #define vmulltq_int_m_s32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s32(__inactive, __a, __b, __p)
1855 #define vmulltq_int_m_s16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s16(__inactive, __a, __b, __p)
1856 #define vmulltq_int_m_u8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u8(__inactive, __a, __b, __p)
1857 #define vmulltq_int_m_u32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u32(__inactive, __a, __b, __p)
1858 #define vmulltq_int_m_u16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u16(__inactive, __a, __b, __p)
1859 #define vmulq_m_n_s8(__inactive, __a, __b, __p) __arm_vmulq_m_n_s8(__inactive, __a, __b, __p)
1860 #define vmulq_m_n_s32(__inactive, __a, __b, __p) __arm_vmulq_m_n_s32(__inactive, __a, __b, __p)
1861 #define vmulq_m_n_s16(__inactive, __a, __b, __p) __arm_vmulq_m_n_s16(__inactive, __a, __b, __p)
1862 #define vmulq_m_n_u8(__inactive, __a, __b, __p) __arm_vmulq_m_n_u8(__inactive, __a, __b, __p)
1863 #define vmulq_m_n_u32(__inactive, __a, __b, __p) __arm_vmulq_m_n_u32(__inactive, __a, __b, __p)
1864 #define vmulq_m_n_u16(__inactive, __a, __b, __p) __arm_vmulq_m_n_u16(__inactive, __a, __b, __p)
1865 #define vmulq_m_s8(__inactive, __a, __b, __p) __arm_vmulq_m_s8(__inactive, __a, __b, __p)
1866 #define vmulq_m_s32(__inactive, __a, __b, __p) __arm_vmulq_m_s32(__inactive, __a, __b, __p)
1867 #define vmulq_m_s16(__inactive, __a, __b, __p) __arm_vmulq_m_s16(__inactive, __a, __b, __p)
1868 #define vmulq_m_u8(__inactive, __a, __b, __p) __arm_vmulq_m_u8(__inactive, __a, __b, __p)
1869 #define vmulq_m_u32(__inactive, __a, __b, __p) __arm_vmulq_m_u32(__inactive, __a, __b, __p)
1870 #define vmulq_m_u16(__inactive, __a, __b, __p) __arm_vmulq_m_u16(__inactive, __a, __b, __p)
1871 #define vornq_m_s8(__inactive, __a, __b, __p) __arm_vornq_m_s8(__inactive, __a, __b, __p)
1872 #define vornq_m_s32(__inactive, __a, __b, __p) __arm_vornq_m_s32(__inactive, __a, __b, __p)
1873 #define vornq_m_s16(__inactive, __a, __b, __p) __arm_vornq_m_s16(__inactive, __a, __b, __p)
1874 #define vornq_m_u8(__inactive, __a, __b, __p) __arm_vornq_m_u8(__inactive, __a, __b, __p)
1875 #define vornq_m_u32(__inactive, __a, __b, __p) __arm_vornq_m_u32(__inactive, __a, __b, __p)
1876 #define vornq_m_u16(__inactive, __a, __b, __p) __arm_vornq_m_u16(__inactive, __a, __b, __p)
1877 #define vorrq_m_s8(__inactive, __a, __b, __p) __arm_vorrq_m_s8(__inactive, __a, __b, __p)
1878 #define vorrq_m_s32(__inactive, __a, __b, __p) __arm_vorrq_m_s32(__inactive, __a, __b, __p)
1879 #define vorrq_m_s16(__inactive, __a, __b, __p) __arm_vorrq_m_s16(__inactive, __a, __b, __p)
1880 #define vorrq_m_u8(__inactive, __a, __b, __p) __arm_vorrq_m_u8(__inactive, __a, __b, __p)
1881 #define vorrq_m_u32(__inactive, __a, __b, __p) __arm_vorrq_m_u32(__inactive, __a, __b, __p)
1882 #define vorrq_m_u16(__inactive, __a, __b, __p) __arm_vorrq_m_u16(__inactive, __a, __b, __p)
1883 #define vqaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s8(__inactive, __a, __b, __p)
1884 #define vqaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s32(__inactive, __a, __b, __p)
1885 #define vqaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s16(__inactive, __a, __b, __p)
1886 #define vqaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u8(__inactive, __a, __b, __p)
1887 #define vqaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u32(__inactive, __a, __b, __p)
1888 #define vqaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u16(__inactive, __a, __b, __p)
1889 #define vqaddq_m_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_s8(__inactive, __a, __b, __p)
1890 #define vqaddq_m_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_s32(__inactive, __a, __b, __p)
1891 #define vqaddq_m_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_s16(__inactive, __a, __b, __p)
1892 #define vqaddq_m_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_u8(__inactive, __a, __b, __p)
1893 #define vqaddq_m_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_u32(__inactive, __a, __b, __p)
1894 #define vqaddq_m_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_u16(__inactive, __a, __b, __p)
1895 #define vqdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s8(__inactive, __a, __b, __p)
1896 #define vqdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s32(__inactive, __a, __b, __p)
1897 #define vqdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s16(__inactive, __a, __b, __p)
1898 #define vqdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s8(__inactive, __a, __b, __p)
1899 #define vqdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s32(__inactive, __a, __b, __p)
1900 #define vqdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s16(__inactive, __a, __b, __p)
1901 #define vqdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s8(__a, __b, __c, __p)
1902 #define vqdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s32(__a, __b, __c, __p)
1903 #define vqdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s16(__a, __b, __c, __p)
1904 #define vqdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s8(__a, __b, __c, __p)
1905 #define vqdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s32(__a, __b, __c, __p)
1906 #define vqdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s16(__a, __b, __c, __p)
1907 #define vqdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s8(__inactive, __a, __b, __p)
1908 #define vqdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s32(__inactive, __a, __b, __p)
1909 #define vqdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s16(__inactive, __a, __b, __p)
1910 #define vqdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s8(__inactive, __a, __b, __p)
1911 #define vqdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s32(__inactive, __a, __b, __p)
1912 #define vqdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s16(__inactive, __a, __b, __p)
1913 #define vqdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s8(__inactive, __a, __b, __p)
1914 #define vqdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s32(__inactive, __a, __b, __p)
1915 #define vqdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s16(__inactive, __a, __b, __p)
1916 #define vqdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s8(__inactive, __a, __b, __p)
1917 #define vqdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s32(__inactive, __a, __b, __p)
1918 #define vqdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s16(__inactive, __a, __b, __p)
1919 #define vqrdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s8(__inactive, __a, __b, __p)
1920 #define vqrdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s32(__inactive, __a, __b, __p)
1921 #define vqrdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s16(__inactive, __a, __b, __p)
1922 #define vqrdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s8(__inactive, __a, __b, __p)
1923 #define vqrdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s32(__inactive, __a, __b, __p)
1924 #define vqrdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s16(__inactive, __a, __b, __p)
1925 #define vqrdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s8(__a, __b, __c, __p)
1926 #define vqrdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s32(__a, __b, __c, __p)
1927 #define vqrdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s16(__a, __b, __c, __p)
1928 #define vqrdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s8(__a, __b, __c, __p)
1929 #define vqrdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s32(__a, __b, __c, __p)
1930 #define vqrdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s16(__a, __b, __c, __p)
1931 #define vqrdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s8(__inactive, __a, __b, __p)
1932 #define vqrdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s32(__inactive, __a, __b, __p)
1933 #define vqrdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s16(__inactive, __a, __b, __p)
1934 #define vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p)
1935 #define vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p)
1936 #define vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p)
1937 #define vqrdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s8(__inactive, __a, __b, __p)
1938 #define vqrdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s32(__inactive, __a, __b, __p)
1939 #define vqrdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s16(__inactive, __a, __b, __p)
1940 #define vqrdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s8(__inactive, __a, __b, __p)
1941 #define vqrdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s32(__inactive, __a, __b, __p)
1942 #define vqrdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s16(__inactive, __a, __b, __p)
1943 #define vqrshlq_m_s8(__inactive, __a, __b, __p) __arm_vqrshlq_m_s8(__inactive, __a, __b, __p)
1944 #define vqrshlq_m_s32(__inactive, __a, __b, __p) __arm_vqrshlq_m_s32(__inactive, __a, __b, __p)
1945 #define vqrshlq_m_s16(__inactive, __a, __b, __p) __arm_vqrshlq_m_s16(__inactive, __a, __b, __p)
1946 #define vqrshlq_m_u8(__inactive, __a, __b, __p) __arm_vqrshlq_m_u8(__inactive, __a, __b, __p)
1947 #define vqrshlq_m_u32(__inactive, __a, __b, __p) __arm_vqrshlq_m_u32(__inactive, __a, __b, __p)
1948 #define vqrshlq_m_u16(__inactive, __a, __b, __p) __arm_vqrshlq_m_u16(__inactive, __a, __b, __p)
1949 #define vqshlq_m_n_s8(__inactive, __a,  __imm, __p) __arm_vqshlq_m_n_s8(__inactive, __a,  __imm, __p)
1950 #define vqshlq_m_n_s32(__inactive, __a,  __imm, __p) __arm_vqshlq_m_n_s32(__inactive, __a,  __imm, __p)
1951 #define vqshlq_m_n_s16(__inactive, __a,  __imm, __p) __arm_vqshlq_m_n_s16(__inactive, __a,  __imm, __p)
1952 #define vqshlq_m_n_u8(__inactive, __a,  __imm, __p) __arm_vqshlq_m_n_u8(__inactive, __a,  __imm, __p)
1953 #define vqshlq_m_n_u32(__inactive, __a,  __imm, __p) __arm_vqshlq_m_n_u32(__inactive, __a,  __imm, __p)
1954 #define vqshlq_m_n_u16(__inactive, __a,  __imm, __p) __arm_vqshlq_m_n_u16(__inactive, __a,  __imm, __p)
1955 #define vqshlq_m_s8(__inactive, __a, __b, __p) __arm_vqshlq_m_s8(__inactive, __a, __b, __p)
1956 #define vqshlq_m_s32(__inactive, __a, __b, __p) __arm_vqshlq_m_s32(__inactive, __a, __b, __p)
1957 #define vqshlq_m_s16(__inactive, __a, __b, __p) __arm_vqshlq_m_s16(__inactive, __a, __b, __p)
1958 #define vqshlq_m_u8(__inactive, __a, __b, __p) __arm_vqshlq_m_u8(__inactive, __a, __b, __p)
1959 #define vqshlq_m_u32(__inactive, __a, __b, __p) __arm_vqshlq_m_u32(__inactive, __a, __b, __p)
1960 #define vqshlq_m_u16(__inactive, __a, __b, __p) __arm_vqshlq_m_u16(__inactive, __a, __b, __p)
1961 #define vqsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s8(__inactive, __a, __b, __p)
1962 #define vqsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s32(__inactive, __a, __b, __p)
1963 #define vqsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s16(__inactive, __a, __b, __p)
1964 #define vqsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u8(__inactive, __a, __b, __p)
1965 #define vqsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u32(__inactive, __a, __b, __p)
1966 #define vqsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u16(__inactive, __a, __b, __p)
1967 #define vqsubq_m_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_s8(__inactive, __a, __b, __p)
1968 #define vqsubq_m_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_s32(__inactive, __a, __b, __p)
1969 #define vqsubq_m_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_s16(__inactive, __a, __b, __p)
1970 #define vqsubq_m_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_u8(__inactive, __a, __b, __p)
1971 #define vqsubq_m_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_u32(__inactive, __a, __b, __p)
1972 #define vqsubq_m_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_u16(__inactive, __a, __b, __p)
1973 #define vrhaddq_m_s8(__inactive, __a, __b, __p) __arm_vrhaddq_m_s8(__inactive, __a, __b, __p)
1974 #define vrhaddq_m_s32(__inactive, __a, __b, __p) __arm_vrhaddq_m_s32(__inactive, __a, __b, __p)
1975 #define vrhaddq_m_s16(__inactive, __a, __b, __p) __arm_vrhaddq_m_s16(__inactive, __a, __b, __p)
1976 #define vrhaddq_m_u8(__inactive, __a, __b, __p) __arm_vrhaddq_m_u8(__inactive, __a, __b, __p)
1977 #define vrhaddq_m_u32(__inactive, __a, __b, __p) __arm_vrhaddq_m_u32(__inactive, __a, __b, __p)
1978 #define vrhaddq_m_u16(__inactive, __a, __b, __p) __arm_vrhaddq_m_u16(__inactive, __a, __b, __p)
1979 #define vrmulhq_m_s8(__inactive, __a, __b, __p) __arm_vrmulhq_m_s8(__inactive, __a, __b, __p)
1980 #define vrmulhq_m_s32(__inactive, __a, __b, __p) __arm_vrmulhq_m_s32(__inactive, __a, __b, __p)
1981 #define vrmulhq_m_s16(__inactive, __a, __b, __p) __arm_vrmulhq_m_s16(__inactive, __a, __b, __p)
1982 #define vrmulhq_m_u8(__inactive, __a, __b, __p) __arm_vrmulhq_m_u8(__inactive, __a, __b, __p)
1983 #define vrmulhq_m_u32(__inactive, __a, __b, __p) __arm_vrmulhq_m_u32(__inactive, __a, __b, __p)
1984 #define vrmulhq_m_u16(__inactive, __a, __b, __p) __arm_vrmulhq_m_u16(__inactive, __a, __b, __p)
1985 #define vrshlq_m_s8(__inactive, __a, __b, __p) __arm_vrshlq_m_s8(__inactive, __a, __b, __p)
1986 #define vrshlq_m_s32(__inactive, __a, __b, __p) __arm_vrshlq_m_s32(__inactive, __a, __b, __p)
1987 #define vrshlq_m_s16(__inactive, __a, __b, __p) __arm_vrshlq_m_s16(__inactive, __a, __b, __p)
1988 #define vrshlq_m_u8(__inactive, __a, __b, __p) __arm_vrshlq_m_u8(__inactive, __a, __b, __p)
1989 #define vrshlq_m_u32(__inactive, __a, __b, __p) __arm_vrshlq_m_u32(__inactive, __a, __b, __p)
1990 #define vrshlq_m_u16(__inactive, __a, __b, __p) __arm_vrshlq_m_u16(__inactive, __a, __b, __p)
1991 #define vrshrq_m_n_s8(__inactive, __a,  __imm, __p) __arm_vrshrq_m_n_s8(__inactive, __a,  __imm, __p)
1992 #define vrshrq_m_n_s32(__inactive, __a,  __imm, __p) __arm_vrshrq_m_n_s32(__inactive, __a,  __imm, __p)
1993 #define vrshrq_m_n_s16(__inactive, __a,  __imm, __p) __arm_vrshrq_m_n_s16(__inactive, __a,  __imm, __p)
1994 #define vrshrq_m_n_u8(__inactive, __a,  __imm, __p) __arm_vrshrq_m_n_u8(__inactive, __a,  __imm, __p)
1995 #define vrshrq_m_n_u32(__inactive, __a,  __imm, __p) __arm_vrshrq_m_n_u32(__inactive, __a,  __imm, __p)
1996 #define vrshrq_m_n_u16(__inactive, __a,  __imm, __p) __arm_vrshrq_m_n_u16(__inactive, __a,  __imm, __p)
1997 #define vshlq_m_n_s8(__inactive, __a,  __imm, __p) __arm_vshlq_m_n_s8(__inactive, __a,  __imm, __p)
1998 #define vshlq_m_n_s32(__inactive, __a,  __imm, __p) __arm_vshlq_m_n_s32(__inactive, __a,  __imm, __p)
1999 #define vshlq_m_n_s16(__inactive, __a,  __imm, __p) __arm_vshlq_m_n_s16(__inactive, __a,  __imm, __p)
2000 #define vshlq_m_n_u8(__inactive, __a,  __imm, __p) __arm_vshlq_m_n_u8(__inactive, __a,  __imm, __p)
2001 #define vshlq_m_n_u32(__inactive, __a,  __imm, __p) __arm_vshlq_m_n_u32(__inactive, __a,  __imm, __p)
2002 #define vshlq_m_n_u16(__inactive, __a,  __imm, __p) __arm_vshlq_m_n_u16(__inactive, __a,  __imm, __p)
2003 #define vshrq_m_n_s8(__inactive, __a,  __imm, __p) __arm_vshrq_m_n_s8(__inactive, __a,  __imm, __p)
2004 #define vshrq_m_n_s32(__inactive, __a,  __imm, __p) __arm_vshrq_m_n_s32(__inactive, __a,  __imm, __p)
2005 #define vshrq_m_n_s16(__inactive, __a,  __imm, __p) __arm_vshrq_m_n_s16(__inactive, __a,  __imm, __p)
2006 #define vshrq_m_n_u8(__inactive, __a,  __imm, __p) __arm_vshrq_m_n_u8(__inactive, __a,  __imm, __p)
2007 #define vshrq_m_n_u32(__inactive, __a,  __imm, __p) __arm_vshrq_m_n_u32(__inactive, __a,  __imm, __p)
2008 #define vshrq_m_n_u16(__inactive, __a,  __imm, __p) __arm_vshrq_m_n_u16(__inactive, __a,  __imm, __p)
2009 #define vsliq_m_n_s8(__a, __b,  __imm, __p) __arm_vsliq_m_n_s8(__a, __b,  __imm, __p)
2010 #define vsliq_m_n_s32(__a, __b,  __imm, __p) __arm_vsliq_m_n_s32(__a, __b,  __imm, __p)
2011 #define vsliq_m_n_s16(__a, __b,  __imm, __p) __arm_vsliq_m_n_s16(__a, __b,  __imm, __p)
2012 #define vsliq_m_n_u8(__a, __b,  __imm, __p) __arm_vsliq_m_n_u8(__a, __b,  __imm, __p)
2013 #define vsliq_m_n_u32(__a, __b,  __imm, __p) __arm_vsliq_m_n_u32(__a, __b,  __imm, __p)
2014 #define vsliq_m_n_u16(__a, __b,  __imm, __p) __arm_vsliq_m_n_u16(__a, __b,  __imm, __p)
2015 #define vsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vsubq_m_n_s8(__inactive, __a, __b, __p)
2016 #define vsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vsubq_m_n_s32(__inactive, __a, __b, __p)
2017 #define vsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vsubq_m_n_s16(__inactive, __a, __b, __p)
2018 #define vsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vsubq_m_n_u8(__inactive, __a, __b, __p)
2019 #define vsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vsubq_m_n_u32(__inactive, __a, __b, __p)
2020 #define vsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vsubq_m_n_u16(__inactive, __a, __b, __p)
2021 #define vmlaldavaq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaq_p_s32(__a, __b, __c, __p)
2022 #define vmlaldavaq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaq_p_s16(__a, __b, __c, __p)
2023 #define vmlaldavaq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaq_p_u32(__a, __b, __c, __p)
2024 #define vmlaldavaq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaq_p_u16(__a, __b, __c, __p)
2025 #define vmlaldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s32(__a, __b, __c, __p)
2026 #define vmlaldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s16(__a, __b, __c, __p)
2027 #define vmlsldavaq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaq_p_s32(__a, __b, __c, __p)
2028 #define vmlsldavaq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaq_p_s16(__a, __b, __c, __p)
2029 #define vmlsldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s32(__a, __b, __c, __p)
2030 #define vmlsldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s16(__a, __b, __c, __p)
2031 #define vmullbq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p8(__inactive, __a, __b, __p)
2032 #define vmullbq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p16(__inactive, __a, __b, __p)
2033 #define vmulltq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p8(__inactive, __a, __b, __p)
2034 #define vmulltq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p16(__inactive, __a, __b, __p)
2035 #define vqdmullbq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s32(__inactive, __a, __b, __p)
2036 #define vqdmullbq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s16(__inactive, __a, __b, __p)
2037 #define vqdmullbq_m_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s32(__inactive, __a, __b, __p)
2038 #define vqdmullbq_m_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s16(__inactive, __a, __b, __p)
2039 #define vqdmulltq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s32(__inactive, __a, __b, __p)
2040 #define vqdmulltq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s16(__inactive, __a, __b, __p)
2041 #define vqdmulltq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s32(__inactive, __a, __b, __p)
2042 #define vqdmulltq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s16(__inactive, __a, __b, __p)
2043 #define vqrshrnbq_m_n_s32(__a, __b,  __imm, __p) __arm_vqrshrnbq_m_n_s32(__a, __b,  __imm, __p)
2044 #define vqrshrnbq_m_n_s16(__a, __b,  __imm, __p) __arm_vqrshrnbq_m_n_s16(__a, __b,  __imm, __p)
2045 #define vqrshrnbq_m_n_u32(__a, __b,  __imm, __p) __arm_vqrshrnbq_m_n_u32(__a, __b,  __imm, __p)
2046 #define vqrshrnbq_m_n_u16(__a, __b,  __imm, __p) __arm_vqrshrnbq_m_n_u16(__a, __b,  __imm, __p)
2047 #define vqrshrntq_m_n_s32(__a, __b,  __imm, __p) __arm_vqrshrntq_m_n_s32(__a, __b,  __imm, __p)
2048 #define vqrshrntq_m_n_s16(__a, __b,  __imm, __p) __arm_vqrshrntq_m_n_s16(__a, __b,  __imm, __p)
2049 #define vqrshrntq_m_n_u32(__a, __b,  __imm, __p) __arm_vqrshrntq_m_n_u32(__a, __b,  __imm, __p)
2050 #define vqrshrntq_m_n_u16(__a, __b,  __imm, __p) __arm_vqrshrntq_m_n_u16(__a, __b,  __imm, __p)
2051 #define vqrshrunbq_m_n_s32(__a, __b,  __imm, __p) __arm_vqrshrunbq_m_n_s32(__a, __b,  __imm, __p)
2052 #define vqrshrunbq_m_n_s16(__a, __b,  __imm, __p) __arm_vqrshrunbq_m_n_s16(__a, __b,  __imm, __p)
2053 #define vqrshruntq_m_n_s32(__a, __b,  __imm, __p) __arm_vqrshruntq_m_n_s32(__a, __b,  __imm, __p)
2054 #define vqrshruntq_m_n_s16(__a, __b,  __imm, __p) __arm_vqrshruntq_m_n_s16(__a, __b,  __imm, __p)
2055 #define vqshrnbq_m_n_s32(__a, __b,  __imm, __p) __arm_vqshrnbq_m_n_s32(__a, __b,  __imm, __p)
2056 #define vqshrnbq_m_n_s16(__a, __b,  __imm, __p) __arm_vqshrnbq_m_n_s16(__a, __b,  __imm, __p)
2057 #define vqshrnbq_m_n_u32(__a, __b,  __imm, __p) __arm_vqshrnbq_m_n_u32(__a, __b,  __imm, __p)
2058 #define vqshrnbq_m_n_u16(__a, __b,  __imm, __p) __arm_vqshrnbq_m_n_u16(__a, __b,  __imm, __p)
2059 #define vqshrntq_m_n_s32(__a, __b,  __imm, __p) __arm_vqshrntq_m_n_s32(__a, __b,  __imm, __p)
2060 #define vqshrntq_m_n_s16(__a, __b,  __imm, __p) __arm_vqshrntq_m_n_s16(__a, __b,  __imm, __p)
2061 #define vqshrntq_m_n_u32(__a, __b,  __imm, __p) __arm_vqshrntq_m_n_u32(__a, __b,  __imm, __p)
2062 #define vqshrntq_m_n_u16(__a, __b,  __imm, __p) __arm_vqshrntq_m_n_u16(__a, __b,  __imm, __p)
2063 #define vqshrunbq_m_n_s32(__a, __b,  __imm, __p) __arm_vqshrunbq_m_n_s32(__a, __b,  __imm, __p)
2064 #define vqshrunbq_m_n_s16(__a, __b,  __imm, __p) __arm_vqshrunbq_m_n_s16(__a, __b,  __imm, __p)
2065 #define vqshruntq_m_n_s32(__a, __b,  __imm, __p) __arm_vqshruntq_m_n_s32(__a, __b,  __imm, __p)
2066 #define vqshruntq_m_n_s16(__a, __b,  __imm, __p) __arm_vqshruntq_m_n_s16(__a, __b,  __imm, __p)
2067 #define vrmlaldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_s32(__a, __b, __c, __p)
2068 #define vrmlaldavhaq_p_u32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_u32(__a, __b, __c, __p)
2069 #define vrmlaldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p_s32(__a, __b, __c, __p)
2070 #define vrmlsldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaq_p_s32(__a, __b, __c, __p)
2071 #define vrmlsldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p_s32(__a, __b, __c, __p)
2072 #define vrshrnbq_m_n_s32(__a, __b,  __imm, __p) __arm_vrshrnbq_m_n_s32(__a, __b,  __imm, __p)
2073 #define vrshrnbq_m_n_s16(__a, __b,  __imm, __p) __arm_vrshrnbq_m_n_s16(__a, __b,  __imm, __p)
2074 #define vrshrnbq_m_n_u32(__a, __b,  __imm, __p) __arm_vrshrnbq_m_n_u32(__a, __b,  __imm, __p)
2075 #define vrshrnbq_m_n_u16(__a, __b,  __imm, __p) __arm_vrshrnbq_m_n_u16(__a, __b,  __imm, __p)
2076 #define vrshrntq_m_n_s32(__a, __b,  __imm, __p) __arm_vrshrntq_m_n_s32(__a, __b,  __imm, __p)
2077 #define vrshrntq_m_n_s16(__a, __b,  __imm, __p) __arm_vrshrntq_m_n_s16(__a, __b,  __imm, __p)
2078 #define vrshrntq_m_n_u32(__a, __b,  __imm, __p) __arm_vrshrntq_m_n_u32(__a, __b,  __imm, __p)
2079 #define vrshrntq_m_n_u16(__a, __b,  __imm, __p) __arm_vrshrntq_m_n_u16(__a, __b,  __imm, __p)
2080 #define vshllbq_m_n_s8(__inactive, __a,  __imm, __p) __arm_vshllbq_m_n_s8(__inactive, __a,  __imm, __p)
2081 #define vshllbq_m_n_s16(__inactive, __a,  __imm, __p) __arm_vshllbq_m_n_s16(__inactive, __a,  __imm, __p)
2082 #define vshllbq_m_n_u8(__inactive, __a,  __imm, __p) __arm_vshllbq_m_n_u8(__inactive, __a,  __imm, __p)
2083 #define vshllbq_m_n_u16(__inactive, __a,  __imm, __p) __arm_vshllbq_m_n_u16(__inactive, __a,  __imm, __p)
2084 #define vshlltq_m_n_s8(__inactive, __a,  __imm, __p) __arm_vshlltq_m_n_s8(__inactive, __a,  __imm, __p)
2085 #define vshlltq_m_n_s16(__inactive, __a,  __imm, __p) __arm_vshlltq_m_n_s16(__inactive, __a,  __imm, __p)
2086 #define vshlltq_m_n_u8(__inactive, __a,  __imm, __p) __arm_vshlltq_m_n_u8(__inactive, __a,  __imm, __p)
2087 #define vshlltq_m_n_u16(__inactive, __a,  __imm, __p) __arm_vshlltq_m_n_u16(__inactive, __a,  __imm, __p)
2088 #define vshrnbq_m_n_s32(__a, __b,  __imm, __p) __arm_vshrnbq_m_n_s32(__a, __b,  __imm, __p)
2089 #define vshrnbq_m_n_s16(__a, __b,  __imm, __p) __arm_vshrnbq_m_n_s16(__a, __b,  __imm, __p)
2090 #define vshrnbq_m_n_u32(__a, __b,  __imm, __p) __arm_vshrnbq_m_n_u32(__a, __b,  __imm, __p)
2091 #define vshrnbq_m_n_u16(__a, __b,  __imm, __p) __arm_vshrnbq_m_n_u16(__a, __b,  __imm, __p)
2092 #define vshrntq_m_n_s32(__a, __b,  __imm, __p) __arm_vshrntq_m_n_s32(__a, __b,  __imm, __p)
2093 #define vshrntq_m_n_s16(__a, __b,  __imm, __p) __arm_vshrntq_m_n_s16(__a, __b,  __imm, __p)
2094 #define vshrntq_m_n_u32(__a, __b,  __imm, __p) __arm_vshrntq_m_n_u32(__a, __b,  __imm, __p)
2095 #define vshrntq_m_n_u16(__a, __b,  __imm, __p) __arm_vshrntq_m_n_u16(__a, __b,  __imm, __p)
2096 #define vabdq_m_f32(__inactive, __a, __b, __p) __arm_vabdq_m_f32(__inactive, __a, __b, __p)
2097 #define vabdq_m_f16(__inactive, __a, __b, __p) __arm_vabdq_m_f16(__inactive, __a, __b, __p)
2098 #define vaddq_m_f32(__inactive, __a, __b, __p) __arm_vaddq_m_f32(__inactive, __a, __b, __p)
2099 #define vaddq_m_f16(__inactive, __a, __b, __p) __arm_vaddq_m_f16(__inactive, __a, __b, __p)
2100 #define vaddq_m_n_f32(__inactive, __a, __b, __p) __arm_vaddq_m_n_f32(__inactive, __a, __b, __p)
2101 #define vaddq_m_n_f16(__inactive, __a, __b, __p) __arm_vaddq_m_n_f16(__inactive, __a, __b, __p)
2102 #define vandq_m_f32(__inactive, __a, __b, __p) __arm_vandq_m_f32(__inactive, __a, __b, __p)
2103 #define vandq_m_f16(__inactive, __a, __b, __p) __arm_vandq_m_f16(__inactive, __a, __b, __p)
2104 #define vbicq_m_f32(__inactive, __a, __b, __p) __arm_vbicq_m_f32(__inactive, __a, __b, __p)
2105 #define vbicq_m_f16(__inactive, __a, __b, __p) __arm_vbicq_m_f16(__inactive, __a, __b, __p)
2106 #define vbrsrq_m_n_f32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f32(__inactive, __a, __b, __p)
2107 #define vbrsrq_m_n_f16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f16(__inactive, __a, __b, __p)
2108 #define vcaddq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f32(__inactive, __a, __b, __p)
2109 #define vcaddq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f16(__inactive, __a, __b, __p)
2110 #define vcaddq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f32(__inactive, __a, __b, __p)
2111 #define vcaddq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f16(__inactive, __a, __b, __p)
2112 #define vcmlaq_m_f32(__a, __b, __c, __p) __arm_vcmlaq_m_f32(__a, __b, __c, __p)
2113 #define vcmlaq_m_f16(__a, __b, __c, __p) __arm_vcmlaq_m_f16(__a, __b, __c, __p)
2114 #define vcmlaq_rot180_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f32(__a, __b, __c, __p)
2115 #define vcmlaq_rot180_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f16(__a, __b, __c, __p)
2116 #define vcmlaq_rot270_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f32(__a, __b, __c, __p)
2117 #define vcmlaq_rot270_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f16(__a, __b, __c, __p)
2118 #define vcmlaq_rot90_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f32(__a, __b, __c, __p)
2119 #define vcmlaq_rot90_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f16(__a, __b, __c, __p)
2120 #define vcmulq_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_m_f32(__inactive, __a, __b, __p)
2121 #define vcmulq_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_m_f16(__inactive, __a, __b, __p)
2122 #define vcmulq_rot180_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f32(__inactive, __a, __b, __p)
2123 #define vcmulq_rot180_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f16(__inactive, __a, __b, __p)
2124 #define vcmulq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f32(__inactive, __a, __b, __p)
2125 #define vcmulq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f16(__inactive, __a, __b, __p)
2126 #define vcmulq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f32(__inactive, __a, __b, __p)
2127 #define vcmulq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f16(__inactive, __a, __b, __p)
2128 #define vcvtq_m_n_s32_f32(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_s32_f32(__inactive, __a,  __imm6, __p)
2129 #define vcvtq_m_n_s16_f16(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_s16_f16(__inactive, __a,  __imm6, __p)
2130 #define vcvtq_m_n_u32_f32(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_u32_f32(__inactive, __a,  __imm6, __p)
2131 #define vcvtq_m_n_u16_f16(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_u16_f16(__inactive, __a,  __imm6, __p)
2132 #define veorq_m_f32(__inactive, __a, __b, __p) __arm_veorq_m_f32(__inactive, __a, __b, __p)
2133 #define veorq_m_f16(__inactive, __a, __b, __p) __arm_veorq_m_f16(__inactive, __a, __b, __p)
2134 #define vfmaq_m_f32(__a, __b, __c, __p) __arm_vfmaq_m_f32(__a, __b, __c, __p)
2135 #define vfmaq_m_f16(__a, __b, __c, __p) __arm_vfmaq_m_f16(__a, __b, __c, __p)
2136 #define vfmaq_m_n_f32(__a, __b, __c, __p) __arm_vfmaq_m_n_f32(__a, __b, __c, __p)
2137 #define vfmaq_m_n_f16(__a, __b, __c, __p) __arm_vfmaq_m_n_f16(__a, __b, __c, __p)
2138 #define vfmasq_m_n_f32(__a, __b, __c, __p) __arm_vfmasq_m_n_f32(__a, __b, __c, __p)
2139 #define vfmasq_m_n_f16(__a, __b, __c, __p) __arm_vfmasq_m_n_f16(__a, __b, __c, __p)
2140 #define vfmsq_m_f32(__a, __b, __c, __p) __arm_vfmsq_m_f32(__a, __b, __c, __p)
2141 #define vfmsq_m_f16(__a, __b, __c, __p) __arm_vfmsq_m_f16(__a, __b, __c, __p)
2142 #define vmaxnmq_m_f32(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f32(__inactive, __a, __b, __p)
2143 #define vmaxnmq_m_f16(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f16(__inactive, __a, __b, __p)
2144 #define vminnmq_m_f32(__inactive, __a, __b, __p) __arm_vminnmq_m_f32(__inactive, __a, __b, __p)
2145 #define vminnmq_m_f16(__inactive, __a, __b, __p) __arm_vminnmq_m_f16(__inactive, __a, __b, __p)
2146 #define vmulq_m_f32(__inactive, __a, __b, __p) __arm_vmulq_m_f32(__inactive, __a, __b, __p)
2147 #define vmulq_m_f16(__inactive, __a, __b, __p) __arm_vmulq_m_f16(__inactive, __a, __b, __p)
2148 #define vmulq_m_n_f32(__inactive, __a, __b, __p) __arm_vmulq_m_n_f32(__inactive, __a, __b, __p)
2149 #define vmulq_m_n_f16(__inactive, __a, __b, __p) __arm_vmulq_m_n_f16(__inactive, __a, __b, __p)
2150 #define vornq_m_f32(__inactive, __a, __b, __p) __arm_vornq_m_f32(__inactive, __a, __b, __p)
2151 #define vornq_m_f16(__inactive, __a, __b, __p) __arm_vornq_m_f16(__inactive, __a, __b, __p)
2152 #define vorrq_m_f32(__inactive, __a, __b, __p) __arm_vorrq_m_f32(__inactive, __a, __b, __p)
2153 #define vorrq_m_f16(__inactive, __a, __b, __p) __arm_vorrq_m_f16(__inactive, __a, __b, __p)
2154 #define vsubq_m_f32(__inactive, __a, __b, __p) __arm_vsubq_m_f32(__inactive, __a, __b, __p)
2155 #define vsubq_m_f16(__inactive, __a, __b, __p) __arm_vsubq_m_f16(__inactive, __a, __b, __p)
2156 #define vsubq_m_n_f32(__inactive, __a, __b, __p) __arm_vsubq_m_n_f32(__inactive, __a, __b, __p)
2157 #define vsubq_m_n_f16(__inactive, __a, __b, __p) __arm_vsubq_m_n_f16(__inactive, __a, __b, __p)
2158 #define vstrbq_s8( __addr, __value) __arm_vstrbq_s8( __addr, __value)
2159 #define vstrbq_u8( __addr, __value) __arm_vstrbq_u8( __addr, __value)
2160 #define vstrbq_u16( __addr, __value) __arm_vstrbq_u16( __addr, __value)
2161 #define vstrbq_scatter_offset_s8( __base, __offset, __value) __arm_vstrbq_scatter_offset_s8( __base, __offset, __value)
2162 #define vstrbq_scatter_offset_u8( __base, __offset, __value) __arm_vstrbq_scatter_offset_u8( __base, __offset, __value)
2163 #define vstrbq_scatter_offset_u16( __base, __offset, __value) __arm_vstrbq_scatter_offset_u16( __base, __offset, __value)
2164 #define vstrbq_s16( __addr, __value) __arm_vstrbq_s16( __addr, __value)
2165 #define vstrbq_u32( __addr, __value) __arm_vstrbq_u32( __addr, __value)
2166 #define vstrbq_scatter_offset_s16( __base, __offset, __value) __arm_vstrbq_scatter_offset_s16( __base, __offset, __value)
2167 #define vstrbq_scatter_offset_u32( __base, __offset, __value) __arm_vstrbq_scatter_offset_u32( __base, __offset, __value)
2168 #define vstrbq_s32( __addr, __value) __arm_vstrbq_s32( __addr, __value)
2169 #define vstrbq_scatter_offset_s32( __base, __offset, __value) __arm_vstrbq_scatter_offset_s32( __base, __offset, __value)
2170 #define vstrwq_scatter_base_s32(__addr,  __offset, __value) __arm_vstrwq_scatter_base_s32(__addr,  __offset, __value)
2171 #define vstrwq_scatter_base_u32(__addr,  __offset, __value) __arm_vstrwq_scatter_base_u32(__addr,  __offset, __value)
2172 #define vldrbq_gather_offset_u8(__base, __offset) __arm_vldrbq_gather_offset_u8(__base, __offset)
2173 #define vldrbq_gather_offset_s8(__base, __offset) __arm_vldrbq_gather_offset_s8(__base, __offset)
2174 #define vldrbq_s8(__base) __arm_vldrbq_s8(__base)
2175 #define vldrbq_u8(__base) __arm_vldrbq_u8(__base)
2176 #define vldrbq_gather_offset_u16(__base, __offset) __arm_vldrbq_gather_offset_u16(__base, __offset)
2177 #define vldrbq_gather_offset_s16(__base, __offset) __arm_vldrbq_gather_offset_s16(__base, __offset)
2178 #define vldrbq_s16(__base) __arm_vldrbq_s16(__base)
2179 #define vldrbq_u16(__base) __arm_vldrbq_u16(__base)
2180 #define vldrbq_gather_offset_u32(__base, __offset) __arm_vldrbq_gather_offset_u32(__base, __offset)
2181 #define vldrbq_gather_offset_s32(__base, __offset) __arm_vldrbq_gather_offset_s32(__base, __offset)
2182 #define vldrbq_s32(__base) __arm_vldrbq_s32(__base)
2183 #define vldrbq_u32(__base) __arm_vldrbq_u32(__base)
2184 #define vldrwq_gather_base_s32(__addr,  __offset) __arm_vldrwq_gather_base_s32(__addr,  __offset)
2185 #define vldrwq_gather_base_u32(__addr,  __offset) __arm_vldrwq_gather_base_u32(__addr,  __offset)
2186 #define vstrbq_p_s8( __addr, __value, __p) __arm_vstrbq_p_s8( __addr, __value, __p)
2187 #define vstrbq_p_s32( __addr, __value, __p) __arm_vstrbq_p_s32( __addr, __value, __p)
2188 #define vstrbq_p_s16( __addr, __value, __p) __arm_vstrbq_p_s16( __addr, __value, __p)
2189 #define vstrbq_p_u8( __addr, __value, __p) __arm_vstrbq_p_u8( __addr, __value, __p)
2190 #define vstrbq_p_u32( __addr, __value, __p) __arm_vstrbq_p_u32( __addr, __value, __p)
2191 #define vstrbq_p_u16( __addr, __value, __p) __arm_vstrbq_p_u16( __addr, __value, __p)
2192 #define vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p)
2193 #define vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p)
2194 #define vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p)
2195 #define vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p)
2196 #define vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p)
2197 #define vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p)
2198 #define vstrwq_scatter_base_p_s32(__addr,  __offset, __value, __p) __arm_vstrwq_scatter_base_p_s32(__addr,  __offset, __value, __p)
2199 #define vstrwq_scatter_base_p_u32(__addr,  __offset, __value, __p) __arm_vstrwq_scatter_base_p_u32(__addr,  __offset, __value, __p)
2200 #define vldrbq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s16(__base, __offset, __p)
2201 #define vldrbq_gather_offset_z_u8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u8(__base, __offset, __p)
2202 #define vldrbq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s32(__base, __offset, __p)
2203 #define vldrbq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u16(__base, __offset, __p)
2204 #define vldrbq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u32(__base, __offset, __p)
2205 #define vldrbq_gather_offset_z_s8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s8(__base, __offset, __p)
2206 #define vldrbq_z_s16(__base, __p) __arm_vldrbq_z_s16(__base, __p)
2207 #define vldrbq_z_u8(__base, __p) __arm_vldrbq_z_u8(__base, __p)
2208 #define vldrbq_z_s8(__base, __p) __arm_vldrbq_z_s8(__base, __p)
2209 #define vldrbq_z_s32(__base, __p) __arm_vldrbq_z_s32(__base, __p)
2210 #define vldrbq_z_u16(__base, __p) __arm_vldrbq_z_u16(__base, __p)
2211 #define vldrbq_z_u32(__base, __p) __arm_vldrbq_z_u32(__base, __p)
2212 #define vldrwq_gather_base_z_u32(__addr,  __offset, __p) __arm_vldrwq_gather_base_z_u32(__addr,  __offset, __p)
2213 #define vldrwq_gather_base_z_s32(__addr,  __offset, __p) __arm_vldrwq_gather_base_z_s32(__addr,  __offset, __p)
2214 #define vld1q_s8(__base) __arm_vld1q_s8(__base)
2215 #define vld1q_s32(__base) __arm_vld1q_s32(__base)
2216 #define vld1q_s16(__base) __arm_vld1q_s16(__base)
2217 #define vld1q_u8(__base) __arm_vld1q_u8(__base)
2218 #define vld1q_u32(__base) __arm_vld1q_u32(__base)
2219 #define vld1q_u16(__base) __arm_vld1q_u16(__base)
2220 #define vldrhq_gather_offset_s32(__base, __offset) __arm_vldrhq_gather_offset_s32(__base, __offset)
2221 #define vldrhq_gather_offset_s16(__base, __offset) __arm_vldrhq_gather_offset_s16(__base, __offset)
2222 #define vldrhq_gather_offset_u32(__base, __offset) __arm_vldrhq_gather_offset_u32(__base, __offset)
2223 #define vldrhq_gather_offset_u16(__base, __offset) __arm_vldrhq_gather_offset_u16(__base, __offset)
2224 #define vldrhq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s32(__base, __offset, __p)
2225 #define vldrhq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s16(__base, __offset, __p)
2226 #define vldrhq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u32(__base, __offset, __p)
2227 #define vldrhq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u16(__base, __offset, __p)
2228 #define vldrhq_gather_shifted_offset_s32(__base, __offset) __arm_vldrhq_gather_shifted_offset_s32(__base, __offset)
2229 #define vldrhq_gather_shifted_offset_s16(__base, __offset) __arm_vldrhq_gather_shifted_offset_s16(__base, __offset)
2230 #define vldrhq_gather_shifted_offset_u32(__base, __offset) __arm_vldrhq_gather_shifted_offset_u32(__base, __offset)
2231 #define vldrhq_gather_shifted_offset_u16(__base, __offset) __arm_vldrhq_gather_shifted_offset_u16(__base, __offset)
2232 #define vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p)
2233 #define vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p)
2234 #define vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p)
2235 #define vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p)
2236 #define vldrhq_s32(__base) __arm_vldrhq_s32(__base)
2237 #define vldrhq_s16(__base) __arm_vldrhq_s16(__base)
2238 #define vldrhq_u32(__base) __arm_vldrhq_u32(__base)
2239 #define vldrhq_u16(__base) __arm_vldrhq_u16(__base)
2240 #define vldrhq_z_s32(__base, __p) __arm_vldrhq_z_s32(__base, __p)
2241 #define vldrhq_z_s16(__base, __p) __arm_vldrhq_z_s16(__base, __p)
2242 #define vldrhq_z_u32(__base, __p) __arm_vldrhq_z_u32(__base, __p)
2243 #define vldrhq_z_u16(__base, __p) __arm_vldrhq_z_u16(__base, __p)
2244 #define vldrwq_s32(__base) __arm_vldrwq_s32(__base)
2245 #define vldrwq_u32(__base) __arm_vldrwq_u32(__base)
2246 #define vldrwq_z_s32(__base, __p) __arm_vldrwq_z_s32(__base, __p)
2247 #define vldrwq_z_u32(__base, __p) __arm_vldrwq_z_u32(__base, __p)
2248 #define vld1q_f32(__base) __arm_vld1q_f32(__base)
2249 #define vld1q_f16(__base) __arm_vld1q_f16(__base)
2250 #define vldrhq_f16(__base) __arm_vldrhq_f16(__base)
2251 #define vldrhq_z_f16(__base, __p) __arm_vldrhq_z_f16(__base, __p)
2252 #define vldrwq_f32(__base) __arm_vldrwq_f32(__base)
2253 #define vldrwq_z_f32(__base, __p) __arm_vldrwq_z_f32(__base, __p)
2254 #define vldrdq_gather_base_s64(__addr,  __offset) __arm_vldrdq_gather_base_s64(__addr,  __offset)
2255 #define vldrdq_gather_base_u64(__addr,  __offset) __arm_vldrdq_gather_base_u64(__addr,  __offset)
2256 #define vldrdq_gather_base_z_s64(__addr,  __offset, __p) __arm_vldrdq_gather_base_z_s64(__addr,  __offset, __p)
2257 #define vldrdq_gather_base_z_u64(__addr,  __offset, __p) __arm_vldrdq_gather_base_z_u64(__addr,  __offset, __p)
2258 #define vldrdq_gather_offset_s64(__base, __offset) __arm_vldrdq_gather_offset_s64(__base, __offset)
2259 #define vldrdq_gather_offset_u64(__base, __offset) __arm_vldrdq_gather_offset_u64(__base, __offset)
2260 #define vldrdq_gather_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_s64(__base, __offset, __p)
2261 #define vldrdq_gather_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_u64(__base, __offset, __p)
2262 #define vldrdq_gather_shifted_offset_s64(__base, __offset) __arm_vldrdq_gather_shifted_offset_s64(__base, __offset)
2263 #define vldrdq_gather_shifted_offset_u64(__base, __offset) __arm_vldrdq_gather_shifted_offset_u64(__base, __offset)
2264 #define vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p)
2265 #define vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p)
2266 #define vldrhq_gather_offset_f16(__base, __offset) __arm_vldrhq_gather_offset_f16(__base, __offset)
2267 #define vldrhq_gather_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_f16(__base, __offset, __p)
2268 #define vldrhq_gather_shifted_offset_f16(__base, __offset) __arm_vldrhq_gather_shifted_offset_f16(__base, __offset)
2269 #define vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p)
2270 #define vldrwq_gather_base_f32(__addr,  __offset) __arm_vldrwq_gather_base_f32(__addr,  __offset)
2271 #define vldrwq_gather_base_z_f32(__addr,  __offset, __p) __arm_vldrwq_gather_base_z_f32(__addr,  __offset, __p)
2272 #define vldrwq_gather_offset_f32(__base, __offset) __arm_vldrwq_gather_offset_f32(__base, __offset)
2273 #define vldrwq_gather_offset_s32(__base, __offset) __arm_vldrwq_gather_offset_s32(__base, __offset)
2274 #define vldrwq_gather_offset_u32(__base, __offset) __arm_vldrwq_gather_offset_u32(__base, __offset)
2275 #define vldrwq_gather_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_f32(__base, __offset, __p)
2276 #define vldrwq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_s32(__base, __offset, __p)
2277 #define vldrwq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_u32(__base, __offset, __p)
2278 #define vldrwq_gather_shifted_offset_f32(__base, __offset) __arm_vldrwq_gather_shifted_offset_f32(__base, __offset)
2279 #define vldrwq_gather_shifted_offset_s32(__base, __offset) __arm_vldrwq_gather_shifted_offset_s32(__base, __offset)
2280 #define vldrwq_gather_shifted_offset_u32(__base, __offset) __arm_vldrwq_gather_shifted_offset_u32(__base, __offset)
2281 #define vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p)
2282 #define vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p)
2283 #define vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p)
2284 #define vst1q_f32(__addr, __value) __arm_vst1q_f32(__addr, __value)
2285 #define vst1q_f16(__addr, __value) __arm_vst1q_f16(__addr, __value)
2286 #define vst1q_s8(__addr, __value) __arm_vst1q_s8(__addr, __value)
2287 #define vst1q_s32(__addr, __value) __arm_vst1q_s32(__addr, __value)
2288 #define vst1q_s16(__addr, __value) __arm_vst1q_s16(__addr, __value)
2289 #define vst1q_u8(__addr, __value) __arm_vst1q_u8(__addr, __value)
2290 #define vst1q_u32(__addr, __value) __arm_vst1q_u32(__addr, __value)
2291 #define vst1q_u16(__addr, __value) __arm_vst1q_u16(__addr, __value)
2292 #define vstrhq_f16(__addr, __value) __arm_vstrhq_f16(__addr, __value)
2293 #define vstrhq_scatter_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_offset_s32( __base, __offset, __value)
2294 #define vstrhq_scatter_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_offset_s16( __base, __offset, __value)
2295 #define vstrhq_scatter_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_offset_u32( __base, __offset, __value)
2296 #define vstrhq_scatter_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_offset_u16( __base, __offset, __value)
2297 #define vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p)
2298 #define vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p)
2299 #define vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p)
2300 #define vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p)
2301 #define vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s32( __base, __offset, __value)
2302 #define vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s16( __base, __offset, __value)
2303 #define vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u32( __base, __offset, __value)
2304 #define vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u16( __base, __offset, __value)
2305 #define vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p)
2306 #define vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p)
2307 #define vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p)
2308 #define vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p)
2309 #define vstrhq_s32(__addr, __value) __arm_vstrhq_s32(__addr, __value)
2310 #define vstrhq_s16(__addr, __value) __arm_vstrhq_s16(__addr, __value)
2311 #define vstrhq_u32(__addr, __value) __arm_vstrhq_u32(__addr, __value)
2312 #define vstrhq_u16(__addr, __value) __arm_vstrhq_u16(__addr, __value)
2313 #define vstrhq_p_f16(__addr, __value, __p) __arm_vstrhq_p_f16(__addr, __value, __p)
2314 #define vstrhq_p_s32(__addr, __value, __p) __arm_vstrhq_p_s32(__addr, __value, __p)
2315 #define vstrhq_p_s16(__addr, __value, __p) __arm_vstrhq_p_s16(__addr, __value, __p)
2316 #define vstrhq_p_u32(__addr, __value, __p) __arm_vstrhq_p_u32(__addr, __value, __p)
2317 #define vstrhq_p_u16(__addr, __value, __p) __arm_vstrhq_p_u16(__addr, __value, __p)
2318 #define vstrwq_f32(__addr, __value) __arm_vstrwq_f32(__addr, __value)
2319 #define vstrwq_s32(__addr, __value) __arm_vstrwq_s32(__addr, __value)
2320 #define vstrwq_u32(__addr, __value) __arm_vstrwq_u32(__addr, __value)
2321 #define vstrwq_p_f32(__addr, __value, __p) __arm_vstrwq_p_f32(__addr, __value, __p)
2322 #define vstrwq_p_s32(__addr, __value, __p) __arm_vstrwq_p_s32(__addr, __value, __p)
2323 #define vstrwq_p_u32(__addr, __value, __p) __arm_vstrwq_p_u32(__addr, __value, __p)
2324 #define vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p)
2325 #define vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p)
2326 #define vstrdq_scatter_base_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_s64(__addr, __offset, __value)
2327 #define vstrdq_scatter_base_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_u64(__addr, __offset, __value)
2328 #define vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p)
2329 #define vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p)
2330 #define vstrdq_scatter_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_offset_s64(__base, __offset, __value)
2331 #define vstrdq_scatter_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_offset_u64(__base, __offset, __value)
2332 #define vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p)
2333 #define vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p)
2334 #define vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_s64(__base, __offset, __value)
2335 #define vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_u64(__base, __offset, __value)
2336 #define vstrhq_scatter_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_offset_f16(__base, __offset, __value)
2337 #define vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p)
2338 #define vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_f16(__base, __offset, __value)
2339 #define vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p)
2340 #define vstrwq_scatter_base_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_f32(__addr, __offset, __value)
2341 #define vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p)
2342 #define vstrwq_scatter_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_offset_f32(__base, __offset, __value)
2343 #define vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p)
2344 #define vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p)
2345 #define vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p)
2346 #define vstrwq_scatter_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_offset_s32(__base, __offset, __value)
2347 #define vstrwq_scatter_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_offset_u32(__base, __offset, __value)
2348 #define vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_f32(__base, __offset, __value)
2349 #define vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p)
2350 #define vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p)
2351 #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
2352 #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
2353 #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
2354 #define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b)
2355 #define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b)
2356 #define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b)
2357 #define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b)
2358 #define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b)
2359 #define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
2360 #define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
2361 #define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
2362 #define vreinterpretq_s16_s32(__a) __arm_vreinterpretq_s16_s32(__a)
2363 #define vreinterpretq_s16_s64(__a) __arm_vreinterpretq_s16_s64(__a)
2364 #define vreinterpretq_s16_s8(__a) __arm_vreinterpretq_s16_s8(__a)
2365 #define vreinterpretq_s16_u16(__a) __arm_vreinterpretq_s16_u16(__a)
2366 #define vreinterpretq_s16_u32(__a) __arm_vreinterpretq_s16_u32(__a)
2367 #define vreinterpretq_s16_u64(__a) __arm_vreinterpretq_s16_u64(__a)
2368 #define vreinterpretq_s16_u8(__a) __arm_vreinterpretq_s16_u8(__a)
2369 #define vreinterpretq_s32_s16(__a) __arm_vreinterpretq_s32_s16(__a)
2370 #define vreinterpretq_s32_s64(__a) __arm_vreinterpretq_s32_s64(__a)
2371 #define vreinterpretq_s32_s8(__a) __arm_vreinterpretq_s32_s8(__a)
2372 #define vreinterpretq_s32_u16(__a) __arm_vreinterpretq_s32_u16(__a)
2373 #define vreinterpretq_s32_u32(__a) __arm_vreinterpretq_s32_u32(__a)
2374 #define vreinterpretq_s32_u64(__a) __arm_vreinterpretq_s32_u64(__a)
2375 #define vreinterpretq_s32_u8(__a) __arm_vreinterpretq_s32_u8(__a)
2376 #define vreinterpretq_s64_s16(__a) __arm_vreinterpretq_s64_s16(__a)
2377 #define vreinterpretq_s64_s32(__a) __arm_vreinterpretq_s64_s32(__a)
2378 #define vreinterpretq_s64_s8(__a) __arm_vreinterpretq_s64_s8(__a)
2379 #define vreinterpretq_s64_u16(__a) __arm_vreinterpretq_s64_u16(__a)
2380 #define vreinterpretq_s64_u32(__a) __arm_vreinterpretq_s64_u32(__a)
2381 #define vreinterpretq_s64_u64(__a) __arm_vreinterpretq_s64_u64(__a)
2382 #define vreinterpretq_s64_u8(__a) __arm_vreinterpretq_s64_u8(__a)
2383 #define vreinterpretq_s8_s16(__a) __arm_vreinterpretq_s8_s16(__a)
2384 #define vreinterpretq_s8_s32(__a) __arm_vreinterpretq_s8_s32(__a)
2385 #define vreinterpretq_s8_s64(__a) __arm_vreinterpretq_s8_s64(__a)
2386 #define vreinterpretq_s8_u16(__a) __arm_vreinterpretq_s8_u16(__a)
2387 #define vreinterpretq_s8_u32(__a) __arm_vreinterpretq_s8_u32(__a)
2388 #define vreinterpretq_s8_u64(__a) __arm_vreinterpretq_s8_u64(__a)
2389 #define vreinterpretq_s8_u8(__a) __arm_vreinterpretq_s8_u8(__a)
2390 #define vreinterpretq_u16_s16(__a) __arm_vreinterpretq_u16_s16(__a)
2391 #define vreinterpretq_u16_s32(__a) __arm_vreinterpretq_u16_s32(__a)
2392 #define vreinterpretq_u16_s64(__a) __arm_vreinterpretq_u16_s64(__a)
2393 #define vreinterpretq_u16_s8(__a) __arm_vreinterpretq_u16_s8(__a)
2394 #define vreinterpretq_u16_u32(__a) __arm_vreinterpretq_u16_u32(__a)
2395 #define vreinterpretq_u16_u64(__a) __arm_vreinterpretq_u16_u64(__a)
2396 #define vreinterpretq_u16_u8(__a) __arm_vreinterpretq_u16_u8(__a)
2397 #define vreinterpretq_u32_s16(__a) __arm_vreinterpretq_u32_s16(__a)
2398 #define vreinterpretq_u32_s32(__a) __arm_vreinterpretq_u32_s32(__a)
2399 #define vreinterpretq_u32_s64(__a) __arm_vreinterpretq_u32_s64(__a)
2400 #define vreinterpretq_u32_s8(__a) __arm_vreinterpretq_u32_s8(__a)
2401 #define vreinterpretq_u32_u16(__a) __arm_vreinterpretq_u32_u16(__a)
2402 #define vreinterpretq_u32_u64(__a) __arm_vreinterpretq_u32_u64(__a)
2403 #define vreinterpretq_u32_u8(__a) __arm_vreinterpretq_u32_u8(__a)
2404 #define vreinterpretq_u64_s16(__a) __arm_vreinterpretq_u64_s16(__a)
2405 #define vreinterpretq_u64_s32(__a) __arm_vreinterpretq_u64_s32(__a)
2406 #define vreinterpretq_u64_s64(__a) __arm_vreinterpretq_u64_s64(__a)
2407 #define vreinterpretq_u64_s8(__a) __arm_vreinterpretq_u64_s8(__a)
2408 #define vreinterpretq_u64_u16(__a) __arm_vreinterpretq_u64_u16(__a)
2409 #define vreinterpretq_u64_u32(__a) __arm_vreinterpretq_u64_u32(__a)
2410 #define vreinterpretq_u64_u8(__a) __arm_vreinterpretq_u64_u8(__a)
2411 #define vreinterpretq_u8_s16(__a) __arm_vreinterpretq_u8_s16(__a)
2412 #define vreinterpretq_u8_s32(__a) __arm_vreinterpretq_u8_s32(__a)
2413 #define vreinterpretq_u8_s64(__a) __arm_vreinterpretq_u8_s64(__a)
2414 #define vreinterpretq_u8_s8(__a) __arm_vreinterpretq_u8_s8(__a)
2415 #define vreinterpretq_u8_u16(__a) __arm_vreinterpretq_u8_u16(__a)
2416 #define vreinterpretq_u8_u32(__a) __arm_vreinterpretq_u8_u32(__a)
2417 #define vreinterpretq_u8_u64(__a) __arm_vreinterpretq_u8_u64(__a)
2418 #define vreinterpretq_s32_f16(__a) __arm_vreinterpretq_s32_f16(__a)
2419 #define vreinterpretq_s32_f32(__a) __arm_vreinterpretq_s32_f32(__a)
2420 #define vreinterpretq_u16_f16(__a) __arm_vreinterpretq_u16_f16(__a)
2421 #define vreinterpretq_u16_f32(__a) __arm_vreinterpretq_u16_f32(__a)
2422 #define vreinterpretq_u32_f16(__a) __arm_vreinterpretq_u32_f16(__a)
2423 #define vreinterpretq_u32_f32(__a) __arm_vreinterpretq_u32_f32(__a)
2424 #define vreinterpretq_u64_f16(__a) __arm_vreinterpretq_u64_f16(__a)
2425 #define vreinterpretq_u64_f32(__a) __arm_vreinterpretq_u64_f32(__a)
2426 #define vreinterpretq_u8_f16(__a) __arm_vreinterpretq_u8_f16(__a)
2427 #define vreinterpretq_u8_f32(__a) __arm_vreinterpretq_u8_f32(__a)
2428 #define vreinterpretq_f16_f32(__a) __arm_vreinterpretq_f16_f32(__a)
2429 #define vreinterpretq_f16_s16(__a) __arm_vreinterpretq_f16_s16(__a)
2430 #define vreinterpretq_f16_s32(__a) __arm_vreinterpretq_f16_s32(__a)
2431 #define vreinterpretq_f16_s64(__a) __arm_vreinterpretq_f16_s64(__a)
2432 #define vreinterpretq_f16_s8(__a) __arm_vreinterpretq_f16_s8(__a)
2433 #define vreinterpretq_f16_u16(__a) __arm_vreinterpretq_f16_u16(__a)
2434 #define vreinterpretq_f16_u32(__a) __arm_vreinterpretq_f16_u32(__a)
2435 #define vreinterpretq_f16_u64(__a) __arm_vreinterpretq_f16_u64(__a)
2436 #define vreinterpretq_f16_u8(__a) __arm_vreinterpretq_f16_u8(__a)
2437 #define vreinterpretq_f32_f16(__a) __arm_vreinterpretq_f32_f16(__a)
2438 #define vreinterpretq_f32_s16(__a) __arm_vreinterpretq_f32_s16(__a)
2439 #define vreinterpretq_f32_s32(__a) __arm_vreinterpretq_f32_s32(__a)
2440 #define vreinterpretq_f32_s64(__a) __arm_vreinterpretq_f32_s64(__a)
2441 #define vreinterpretq_f32_s8(__a) __arm_vreinterpretq_f32_s8(__a)
2442 #define vreinterpretq_f32_u16(__a) __arm_vreinterpretq_f32_u16(__a)
2443 #define vreinterpretq_f32_u32(__a) __arm_vreinterpretq_f32_u32(__a)
2444 #define vreinterpretq_f32_u64(__a) __arm_vreinterpretq_f32_u64(__a)
2445 #define vreinterpretq_f32_u8(__a) __arm_vreinterpretq_f32_u8(__a)
2446 #define vreinterpretq_s16_f16(__a) __arm_vreinterpretq_s16_f16(__a)
2447 #define vreinterpretq_s16_f32(__a) __arm_vreinterpretq_s16_f32(__a)
2448 #define vreinterpretq_s64_f16(__a) __arm_vreinterpretq_s64_f16(__a)
2449 #define vreinterpretq_s64_f32(__a) __arm_vreinterpretq_s64_f32(__a)
2450 #define vreinterpretq_s8_f16(__a) __arm_vreinterpretq_s8_f16(__a)
2451 #define vreinterpretq_s8_f32(__a) __arm_vreinterpretq_s8_f32(__a)
2452 #define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void)
2453 #define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void)
2454 #define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void)
2455 #define vuninitializedq_u64(void) __arm_vuninitializedq_u64(void)
2456 #define vuninitializedq_s8(void) __arm_vuninitializedq_s8(void)
2457 #define vuninitializedq_s16(void) __arm_vuninitializedq_s16(void)
2458 #define vuninitializedq_s32(void) __arm_vuninitializedq_s32(void)
2459 #define vuninitializedq_s64(void) __arm_vuninitializedq_s64(void)
2460 #define vuninitializedq_f16(void) __arm_vuninitializedq_f16(void)
2461 #define vuninitializedq_f32(void) __arm_vuninitializedq_f32(void)
2462 #define vddupq_m_n_u8(__inactive, __a,  __imm, __p) __arm_vddupq_m_n_u8(__inactive, __a,  __imm, __p)
2463 #define vddupq_m_n_u32(__inactive, __a,  __imm, __p) __arm_vddupq_m_n_u32(__inactive, __a,  __imm, __p)
2464 #define vddupq_m_n_u16(__inactive, __a,  __imm, __p) __arm_vddupq_m_n_u16(__inactive, __a,  __imm, __p)
2465 #define vddupq_m_wb_u8(__inactive,  __a,  __imm, __p) __arm_vddupq_m_wb_u8(__inactive,  __a,  __imm, __p)
2466 #define vddupq_m_wb_u16(__inactive,  __a,  __imm, __p) __arm_vddupq_m_wb_u16(__inactive,  __a,  __imm, __p)
2467 #define vddupq_m_wb_u32(__inactive,  __a,  __imm, __p) __arm_vddupq_m_wb_u32(__inactive,  __a,  __imm, __p)
2468 #define vddupq_n_u8(__a,  __imm) __arm_vddupq_n_u8(__a,  __imm)
2469 #define vddupq_n_u32(__a,  __imm) __arm_vddupq_n_u32(__a,  __imm)
2470 #define vddupq_n_u16(__a,  __imm) __arm_vddupq_n_u16(__a,  __imm)
2471 #define vddupq_wb_u8( __a,  __imm) __arm_vddupq_wb_u8( __a,  __imm)
2472 #define vddupq_wb_u16( __a,  __imm) __arm_vddupq_wb_u16( __a,  __imm)
2473 #define vddupq_wb_u32( __a,  __imm) __arm_vddupq_wb_u32( __a,  __imm)
2474 #define vdwdupq_m_n_u8(__inactive, __a, __b,  __imm, __p) __arm_vdwdupq_m_n_u8(__inactive, __a, __b,  __imm, __p)
2475 #define vdwdupq_m_n_u32(__inactive, __a, __b,  __imm, __p) __arm_vdwdupq_m_n_u32(__inactive, __a, __b,  __imm, __p)
2476 #define vdwdupq_m_n_u16(__inactive, __a, __b,  __imm, __p) __arm_vdwdupq_m_n_u16(__inactive, __a, __b,  __imm, __p)
2477 #define vdwdupq_m_wb_u8(__inactive,  __a, __b,  __imm, __p) __arm_vdwdupq_m_wb_u8(__inactive,  __a, __b,  __imm, __p)
2478 #define vdwdupq_m_wb_u32(__inactive,  __a, __b,  __imm, __p) __arm_vdwdupq_m_wb_u32(__inactive,  __a, __b,  __imm, __p)
2479 #define vdwdupq_m_wb_u16(__inactive,  __a, __b,  __imm, __p) __arm_vdwdupq_m_wb_u16(__inactive,  __a, __b,  __imm, __p)
2480 #define vdwdupq_n_u8(__a, __b,  __imm) __arm_vdwdupq_n_u8(__a, __b,  __imm)
2481 #define vdwdupq_n_u32(__a, __b,  __imm) __arm_vdwdupq_n_u32(__a, __b,  __imm)
2482 #define vdwdupq_n_u16(__a, __b,  __imm) __arm_vdwdupq_n_u16(__a, __b,  __imm)
2483 #define vdwdupq_wb_u8( __a, __b,  __imm) __arm_vdwdupq_wb_u8( __a, __b,  __imm)
2484 #define vdwdupq_wb_u32( __a, __b,  __imm) __arm_vdwdupq_wb_u32( __a, __b,  __imm)
2485 #define vdwdupq_wb_u16( __a, __b,  __imm) __arm_vdwdupq_wb_u16( __a, __b,  __imm)
2486 #define vidupq_m_n_u8(__inactive, __a,  __imm, __p) __arm_vidupq_m_n_u8(__inactive, __a,  __imm, __p)
2487 #define vidupq_m_n_u32(__inactive, __a,  __imm, __p) __arm_vidupq_m_n_u32(__inactive, __a,  __imm, __p)
2488 #define vidupq_m_n_u16(__inactive, __a,  __imm, __p) __arm_vidupq_m_n_u16(__inactive, __a,  __imm, __p)
2489 #define vidupq_m_wb_u8(__inactive,  __a,  __imm, __p) __arm_vidupq_m_wb_u8(__inactive,  __a,  __imm, __p)
2490 #define vidupq_m_wb_u16(__inactive,  __a,  __imm, __p) __arm_vidupq_m_wb_u16(__inactive,  __a,  __imm, __p)
2491 #define vidupq_m_wb_u32(__inactive,  __a,  __imm, __p) __arm_vidupq_m_wb_u32(__inactive,  __a,  __imm, __p)
2492 #define vidupq_n_u8(__a,  __imm) __arm_vidupq_n_u8(__a,  __imm)
2493 #define vidupq_n_u32(__a,  __imm) __arm_vidupq_n_u32(__a,  __imm)
2494 #define vidupq_n_u16(__a,  __imm) __arm_vidupq_n_u16(__a,  __imm)
2495 #define vidupq_wb_u8( __a,  __imm) __arm_vidupq_wb_u8( __a,  __imm)
2496 #define vidupq_wb_u16( __a,  __imm) __arm_vidupq_wb_u16( __a,  __imm)
2497 #define vidupq_wb_u32( __a,  __imm) __arm_vidupq_wb_u32( __a,  __imm)
2498 #define viwdupq_m_n_u8(__inactive, __a, __b,  __imm, __p) __arm_viwdupq_m_n_u8(__inactive, __a, __b,  __imm, __p)
2499 #define viwdupq_m_n_u32(__inactive, __a, __b,  __imm, __p) __arm_viwdupq_m_n_u32(__inactive, __a, __b,  __imm, __p)
2500 #define viwdupq_m_n_u16(__inactive, __a, __b,  __imm, __p) __arm_viwdupq_m_n_u16(__inactive, __a, __b,  __imm, __p)
2501 #define viwdupq_m_wb_u8(__inactive,  __a, __b,  __imm, __p) __arm_viwdupq_m_wb_u8(__inactive,  __a, __b,  __imm, __p)
2502 #define viwdupq_m_wb_u32(__inactive,  __a, __b,  __imm, __p) __arm_viwdupq_m_wb_u32(__inactive,  __a, __b,  __imm, __p)
2503 #define viwdupq_m_wb_u16(__inactive,  __a, __b,  __imm, __p) __arm_viwdupq_m_wb_u16(__inactive,  __a, __b,  __imm, __p)
2504 #define viwdupq_n_u8(__a, __b,  __imm) __arm_viwdupq_n_u8(__a, __b,  __imm)
2505 #define viwdupq_n_u32(__a, __b,  __imm) __arm_viwdupq_n_u32(__a, __b,  __imm)
2506 #define viwdupq_n_u16(__a, __b,  __imm) __arm_viwdupq_n_u16(__a, __b,  __imm)
2507 #define viwdupq_wb_u8( __a, __b,  __imm) __arm_viwdupq_wb_u8( __a, __b,  __imm)
2508 #define viwdupq_wb_u32( __a, __b,  __imm) __arm_viwdupq_wb_u32( __a, __b,  __imm)
2509 #define viwdupq_wb_u16( __a, __b,  __imm) __arm_viwdupq_wb_u16( __a, __b,  __imm)
2510 #define vldrdq_gather_base_wb_s64(__addr, __offset) __arm_vldrdq_gather_base_wb_s64(__addr, __offset)
2511 #define vldrdq_gather_base_wb_u64(__addr, __offset) __arm_vldrdq_gather_base_wb_u64(__addr, __offset)
2512 #define vldrdq_gather_base_wb_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_s64(__addr, __offset, __p)
2513 #define vldrdq_gather_base_wb_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_u64(__addr, __offset, __p)
2514 #define vldrwq_gather_base_wb_f32(__addr, __offset) __arm_vldrwq_gather_base_wb_f32(__addr, __offset)
2515 #define vldrwq_gather_base_wb_s32(__addr, __offset) __arm_vldrwq_gather_base_wb_s32(__addr, __offset)
2516 #define vldrwq_gather_base_wb_u32(__addr, __offset) __arm_vldrwq_gather_base_wb_u32(__addr, __offset)
2517 #define vldrwq_gather_base_wb_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_f32(__addr, __offset, __p)
2518 #define vldrwq_gather_base_wb_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_s32(__addr, __offset, __p)
2519 #define vldrwq_gather_base_wb_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_u32(__addr, __offset, __p)
2520 #define vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p)
2521 #define vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p)
2522 #define vstrdq_scatter_base_wb_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_s64(__addr, __offset, __value)
2523 #define vstrdq_scatter_base_wb_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_u64(__addr, __offset, __value)
2524 #define vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p)
2525 #define vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p)
2526 #define vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p)
2527 #define vstrwq_scatter_base_wb_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_s32(__addr, __offset, __value)
2528 #define vstrwq_scatter_base_wb_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_u32(__addr, __offset, __value)
2529 #define vstrwq_scatter_base_wb_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_f32(__addr, __offset, __value)
2530 #define vddupq_x_n_u8(__a,  __imm, __p) __arm_vddupq_x_n_u8(__a,  __imm, __p)
2531 #define vddupq_x_n_u16(__a,  __imm, __p) __arm_vddupq_x_n_u16(__a,  __imm, __p)
2532 #define vddupq_x_n_u32(__a,  __imm, __p) __arm_vddupq_x_n_u32(__a,  __imm, __p)
2533 #define vddupq_x_wb_u8(__a,  __imm, __p) __arm_vddupq_x_wb_u8(__a,  __imm, __p)
2534 #define vddupq_x_wb_u16(__a,  __imm, __p) __arm_vddupq_x_wb_u16(__a,  __imm, __p)
2535 #define vddupq_x_wb_u32(__a,  __imm, __p) __arm_vddupq_x_wb_u32(__a,  __imm, __p)
2536 #define vdwdupq_x_n_u8(__a, __b,  __imm, __p) __arm_vdwdupq_x_n_u8(__a, __b,  __imm, __p)
2537 #define vdwdupq_x_n_u16(__a, __b,  __imm, __p) __arm_vdwdupq_x_n_u16(__a, __b,  __imm, __p)
2538 #define vdwdupq_x_n_u32(__a, __b,  __imm, __p) __arm_vdwdupq_x_n_u32(__a, __b,  __imm, __p)
2539 #define vdwdupq_x_wb_u8(__a, __b,  __imm, __p) __arm_vdwdupq_x_wb_u8(__a, __b,  __imm, __p)
2540 #define vdwdupq_x_wb_u16(__a, __b,  __imm, __p) __arm_vdwdupq_x_wb_u16(__a, __b,  __imm, __p)
2541 #define vdwdupq_x_wb_u32(__a, __b,  __imm, __p) __arm_vdwdupq_x_wb_u32(__a, __b,  __imm, __p)
2542 #define vidupq_x_n_u8(__a,  __imm, __p) __arm_vidupq_x_n_u8(__a,  __imm, __p)
2543 #define vidupq_x_n_u16(__a,  __imm, __p) __arm_vidupq_x_n_u16(__a,  __imm, __p)
2544 #define vidupq_x_n_u32(__a,  __imm, __p) __arm_vidupq_x_n_u32(__a,  __imm, __p)
2545 #define vidupq_x_wb_u8(__a,  __imm, __p) __arm_vidupq_x_wb_u8(__a,  __imm, __p)
2546 #define vidupq_x_wb_u16(__a,  __imm, __p) __arm_vidupq_x_wb_u16(__a,  __imm, __p)
2547 #define vidupq_x_wb_u32(__a,  __imm, __p) __arm_vidupq_x_wb_u32(__a,  __imm, __p)
2548 #define viwdupq_x_n_u8(__a, __b,  __imm, __p) __arm_viwdupq_x_n_u8(__a, __b,  __imm, __p)
2549 #define viwdupq_x_n_u16(__a, __b,  __imm, __p) __arm_viwdupq_x_n_u16(__a, __b,  __imm, __p)
2550 #define viwdupq_x_n_u32(__a, __b,  __imm, __p) __arm_viwdupq_x_n_u32(__a, __b,  __imm, __p)
2551 #define viwdupq_x_wb_u8(__a, __b,  __imm, __p) __arm_viwdupq_x_wb_u8(__a, __b,  __imm, __p)
2552 #define viwdupq_x_wb_u16(__a, __b,  __imm, __p) __arm_viwdupq_x_wb_u16(__a, __b,  __imm, __p)
2553 #define viwdupq_x_wb_u32(__a, __b,  __imm, __p) __arm_viwdupq_x_wb_u32(__a, __b,  __imm, __p)
2554 #define vdupq_x_n_s8(__a, __p) __arm_vdupq_x_n_s8(__a, __p)
2555 #define vdupq_x_n_s16(__a, __p) __arm_vdupq_x_n_s16(__a, __p)
2556 #define vdupq_x_n_s32(__a, __p) __arm_vdupq_x_n_s32(__a, __p)
2557 #define vdupq_x_n_u8(__a, __p) __arm_vdupq_x_n_u8(__a, __p)
2558 #define vdupq_x_n_u16(__a, __p) __arm_vdupq_x_n_u16(__a, __p)
2559 #define vdupq_x_n_u32(__a, __p) __arm_vdupq_x_n_u32(__a, __p)
2560 #define vminq_x_s8(__a, __b, __p) __arm_vminq_x_s8(__a, __b, __p)
2561 #define vminq_x_s16(__a, __b, __p) __arm_vminq_x_s16(__a, __b, __p)
2562 #define vminq_x_s32(__a, __b, __p) __arm_vminq_x_s32(__a, __b, __p)
2563 #define vminq_x_u8(__a, __b, __p) __arm_vminq_x_u8(__a, __b, __p)
2564 #define vminq_x_u16(__a, __b, __p) __arm_vminq_x_u16(__a, __b, __p)
2565 #define vminq_x_u32(__a, __b, __p) __arm_vminq_x_u32(__a, __b, __p)
2566 #define vmaxq_x_s8(__a, __b, __p) __arm_vmaxq_x_s8(__a, __b, __p)
2567 #define vmaxq_x_s16(__a, __b, __p) __arm_vmaxq_x_s16(__a, __b, __p)
2568 #define vmaxq_x_s32(__a, __b, __p) __arm_vmaxq_x_s32(__a, __b, __p)
2569 #define vmaxq_x_u8(__a, __b, __p) __arm_vmaxq_x_u8(__a, __b, __p)
2570 #define vmaxq_x_u16(__a, __b, __p) __arm_vmaxq_x_u16(__a, __b, __p)
2571 #define vmaxq_x_u32(__a, __b, __p) __arm_vmaxq_x_u32(__a, __b, __p)
2572 #define vabdq_x_s8(__a, __b, __p) __arm_vabdq_x_s8(__a, __b, __p)
2573 #define vabdq_x_s16(__a, __b, __p) __arm_vabdq_x_s16(__a, __b, __p)
2574 #define vabdq_x_s32(__a, __b, __p) __arm_vabdq_x_s32(__a, __b, __p)
2575 #define vabdq_x_u8(__a, __b, __p) __arm_vabdq_x_u8(__a, __b, __p)
2576 #define vabdq_x_u16(__a, __b, __p) __arm_vabdq_x_u16(__a, __b, __p)
2577 #define vabdq_x_u32(__a, __b, __p) __arm_vabdq_x_u32(__a, __b, __p)
2578 #define vabsq_x_s8(__a, __p) __arm_vabsq_x_s8(__a, __p)
2579 #define vabsq_x_s16(__a, __p) __arm_vabsq_x_s16(__a, __p)
2580 #define vabsq_x_s32(__a, __p) __arm_vabsq_x_s32(__a, __p)
2581 #define vaddq_x_s8(__a, __b, __p) __arm_vaddq_x_s8(__a, __b, __p)
2582 #define vaddq_x_s16(__a, __b, __p) __arm_vaddq_x_s16(__a, __b, __p)
2583 #define vaddq_x_s32(__a, __b, __p) __arm_vaddq_x_s32(__a, __b, __p)
2584 #define vaddq_x_n_s8(__a, __b, __p) __arm_vaddq_x_n_s8(__a, __b, __p)
2585 #define vaddq_x_n_s16(__a, __b, __p) __arm_vaddq_x_n_s16(__a, __b, __p)
2586 #define vaddq_x_n_s32(__a, __b, __p) __arm_vaddq_x_n_s32(__a, __b, __p)
2587 #define vaddq_x_u8(__a, __b, __p) __arm_vaddq_x_u8(__a, __b, __p)
2588 #define vaddq_x_u16(__a, __b, __p) __arm_vaddq_x_u16(__a, __b, __p)
2589 #define vaddq_x_u32(__a, __b, __p) __arm_vaddq_x_u32(__a, __b, __p)
2590 #define vaddq_x_n_u8(__a, __b, __p) __arm_vaddq_x_n_u8(__a, __b, __p)
2591 #define vaddq_x_n_u16(__a, __b, __p) __arm_vaddq_x_n_u16(__a, __b, __p)
2592 #define vaddq_x_n_u32(__a, __b, __p) __arm_vaddq_x_n_u32(__a, __b, __p)
2593 #define vclsq_x_s8(__a, __p) __arm_vclsq_x_s8(__a, __p)
2594 #define vclsq_x_s16(__a, __p) __arm_vclsq_x_s16(__a, __p)
2595 #define vclsq_x_s32(__a, __p) __arm_vclsq_x_s32(__a, __p)
2596 #define vclzq_x_s8(__a, __p) __arm_vclzq_x_s8(__a, __p)
2597 #define vclzq_x_s16(__a, __p) __arm_vclzq_x_s16(__a, __p)
2598 #define vclzq_x_s32(__a, __p) __arm_vclzq_x_s32(__a, __p)
2599 #define vclzq_x_u8(__a, __p) __arm_vclzq_x_u8(__a, __p)
2600 #define vclzq_x_u16(__a, __p) __arm_vclzq_x_u16(__a, __p)
2601 #define vclzq_x_u32(__a, __p) __arm_vclzq_x_u32(__a, __p)
2602 #define vnegq_x_s8(__a, __p) __arm_vnegq_x_s8(__a, __p)
2603 #define vnegq_x_s16(__a, __p) __arm_vnegq_x_s16(__a, __p)
2604 #define vnegq_x_s32(__a, __p) __arm_vnegq_x_s32(__a, __p)
2605 #define vmulhq_x_s8(__a, __b, __p) __arm_vmulhq_x_s8(__a, __b, __p)
2606 #define vmulhq_x_s16(__a, __b, __p) __arm_vmulhq_x_s16(__a, __b, __p)
2607 #define vmulhq_x_s32(__a, __b, __p) __arm_vmulhq_x_s32(__a, __b, __p)
2608 #define vmulhq_x_u8(__a, __b, __p) __arm_vmulhq_x_u8(__a, __b, __p)
2609 #define vmulhq_x_u16(__a, __b, __p) __arm_vmulhq_x_u16(__a, __b, __p)
2610 #define vmulhq_x_u32(__a, __b, __p) __arm_vmulhq_x_u32(__a, __b, __p)
2611 #define vmullbq_poly_x_p8(__a, __b, __p) __arm_vmullbq_poly_x_p8(__a, __b, __p)
2612 #define vmullbq_poly_x_p16(__a, __b, __p) __arm_vmullbq_poly_x_p16(__a, __b, __p)
2613 #define vmullbq_int_x_s8(__a, __b, __p) __arm_vmullbq_int_x_s8(__a, __b, __p)
2614 #define vmullbq_int_x_s16(__a, __b, __p) __arm_vmullbq_int_x_s16(__a, __b, __p)
2615 #define vmullbq_int_x_s32(__a, __b, __p) __arm_vmullbq_int_x_s32(__a, __b, __p)
2616 #define vmullbq_int_x_u8(__a, __b, __p) __arm_vmullbq_int_x_u8(__a, __b, __p)
2617 #define vmullbq_int_x_u16(__a, __b, __p) __arm_vmullbq_int_x_u16(__a, __b, __p)
2618 #define vmullbq_int_x_u32(__a, __b, __p) __arm_vmullbq_int_x_u32(__a, __b, __p)
2619 #define vmulltq_poly_x_p8(__a, __b, __p) __arm_vmulltq_poly_x_p8(__a, __b, __p)
2620 #define vmulltq_poly_x_p16(__a, __b, __p) __arm_vmulltq_poly_x_p16(__a, __b, __p)
2621 #define vmulltq_int_x_s8(__a, __b, __p) __arm_vmulltq_int_x_s8(__a, __b, __p)
2622 #define vmulltq_int_x_s16(__a, __b, __p) __arm_vmulltq_int_x_s16(__a, __b, __p)
2623 #define vmulltq_int_x_s32(__a, __b, __p) __arm_vmulltq_int_x_s32(__a, __b, __p)
2624 #define vmulltq_int_x_u8(__a, __b, __p) __arm_vmulltq_int_x_u8(__a, __b, __p)
2625 #define vmulltq_int_x_u16(__a, __b, __p) __arm_vmulltq_int_x_u16(__a, __b, __p)
2626 #define vmulltq_int_x_u32(__a, __b, __p) __arm_vmulltq_int_x_u32(__a, __b, __p)
2627 #define vmulq_x_s8(__a, __b, __p) __arm_vmulq_x_s8(__a, __b, __p)
2628 #define vmulq_x_s16(__a, __b, __p) __arm_vmulq_x_s16(__a, __b, __p)
2629 #define vmulq_x_s32(__a, __b, __p) __arm_vmulq_x_s32(__a, __b, __p)
2630 #define vmulq_x_n_s8(__a, __b, __p) __arm_vmulq_x_n_s8(__a, __b, __p)
2631 #define vmulq_x_n_s16(__a, __b, __p) __arm_vmulq_x_n_s16(__a, __b, __p)
2632 #define vmulq_x_n_s32(__a, __b, __p) __arm_vmulq_x_n_s32(__a, __b, __p)
2633 #define vmulq_x_u8(__a, __b, __p) __arm_vmulq_x_u8(__a, __b, __p)
2634 #define vmulq_x_u16(__a, __b, __p) __arm_vmulq_x_u16(__a, __b, __p)
2635 #define vmulq_x_u32(__a, __b, __p) __arm_vmulq_x_u32(__a, __b, __p)
2636 #define vmulq_x_n_u8(__a, __b, __p) __arm_vmulq_x_n_u8(__a, __b, __p)
2637 #define vmulq_x_n_u16(__a, __b, __p) __arm_vmulq_x_n_u16(__a, __b, __p)
2638 #define vmulq_x_n_u32(__a, __b, __p) __arm_vmulq_x_n_u32(__a, __b, __p)
2639 #define vsubq_x_s8(__a, __b, __p) __arm_vsubq_x_s8(__a, __b, __p)
2640 #define vsubq_x_s16(__a, __b, __p) __arm_vsubq_x_s16(__a, __b, __p)
2641 #define vsubq_x_s32(__a, __b, __p) __arm_vsubq_x_s32(__a, __b, __p)
2642 #define vsubq_x_n_s8(__a, __b, __p) __arm_vsubq_x_n_s8(__a, __b, __p)
2643 #define vsubq_x_n_s16(__a, __b, __p) __arm_vsubq_x_n_s16(__a, __b, __p)
2644 #define vsubq_x_n_s32(__a, __b, __p) __arm_vsubq_x_n_s32(__a, __b, __p)
2645 #define vsubq_x_u8(__a, __b, __p) __arm_vsubq_x_u8(__a, __b, __p)
2646 #define vsubq_x_u16(__a, __b, __p) __arm_vsubq_x_u16(__a, __b, __p)
2647 #define vsubq_x_u32(__a, __b, __p) __arm_vsubq_x_u32(__a, __b, __p)
2648 #define vsubq_x_n_u8(__a, __b, __p) __arm_vsubq_x_n_u8(__a, __b, __p)
2649 #define vsubq_x_n_u16(__a, __b, __p) __arm_vsubq_x_n_u16(__a, __b, __p)
2650 #define vsubq_x_n_u32(__a, __b, __p) __arm_vsubq_x_n_u32(__a, __b, __p)
2651 #define vcaddq_rot90_x_s8(__a, __b, __p) __arm_vcaddq_rot90_x_s8(__a, __b, __p)
2652 #define vcaddq_rot90_x_s16(__a, __b, __p) __arm_vcaddq_rot90_x_s16(__a, __b, __p)
2653 #define vcaddq_rot90_x_s32(__a, __b, __p) __arm_vcaddq_rot90_x_s32(__a, __b, __p)
2654 #define vcaddq_rot90_x_u8(__a, __b, __p) __arm_vcaddq_rot90_x_u8(__a, __b, __p)
2655 #define vcaddq_rot90_x_u16(__a, __b, __p) __arm_vcaddq_rot90_x_u16(__a, __b, __p)
2656 #define vcaddq_rot90_x_u32(__a, __b, __p) __arm_vcaddq_rot90_x_u32(__a, __b, __p)
2657 #define vcaddq_rot270_x_s8(__a, __b, __p) __arm_vcaddq_rot270_x_s8(__a, __b, __p)
2658 #define vcaddq_rot270_x_s16(__a, __b, __p) __arm_vcaddq_rot270_x_s16(__a, __b, __p)
2659 #define vcaddq_rot270_x_s32(__a, __b, __p) __arm_vcaddq_rot270_x_s32(__a, __b, __p)
2660 #define vcaddq_rot270_x_u8(__a, __b, __p) __arm_vcaddq_rot270_x_u8(__a, __b, __p)
2661 #define vcaddq_rot270_x_u16(__a, __b, __p) __arm_vcaddq_rot270_x_u16(__a, __b, __p)
2662 #define vcaddq_rot270_x_u32(__a, __b, __p) __arm_vcaddq_rot270_x_u32(__a, __b, __p)
2663 #define vhaddq_x_n_s8(__a, __b, __p) __arm_vhaddq_x_n_s8(__a, __b, __p)
2664 #define vhaddq_x_n_s16(__a, __b, __p) __arm_vhaddq_x_n_s16(__a, __b, __p)
2665 #define vhaddq_x_n_s32(__a, __b, __p) __arm_vhaddq_x_n_s32(__a, __b, __p)
2666 #define vhaddq_x_n_u8(__a, __b, __p) __arm_vhaddq_x_n_u8(__a, __b, __p)
2667 #define vhaddq_x_n_u16(__a, __b, __p) __arm_vhaddq_x_n_u16(__a, __b, __p)
2668 #define vhaddq_x_n_u32(__a, __b, __p) __arm_vhaddq_x_n_u32(__a, __b, __p)
2669 #define vhaddq_x_s8(__a, __b, __p) __arm_vhaddq_x_s8(__a, __b, __p)
2670 #define vhaddq_x_s16(__a, __b, __p) __arm_vhaddq_x_s16(__a, __b, __p)
2671 #define vhaddq_x_s32(__a, __b, __p) __arm_vhaddq_x_s32(__a, __b, __p)
2672 #define vhaddq_x_u8(__a, __b, __p) __arm_vhaddq_x_u8(__a, __b, __p)
2673 #define vhaddq_x_u16(__a, __b, __p) __arm_vhaddq_x_u16(__a, __b, __p)
2674 #define vhaddq_x_u32(__a, __b, __p) __arm_vhaddq_x_u32(__a, __b, __p)
2675 #define vhcaddq_rot90_x_s8(__a, __b, __p) __arm_vhcaddq_rot90_x_s8(__a, __b, __p)
2676 #define vhcaddq_rot90_x_s16(__a, __b, __p) __arm_vhcaddq_rot90_x_s16(__a, __b, __p)
2677 #define vhcaddq_rot90_x_s32(__a, __b, __p) __arm_vhcaddq_rot90_x_s32(__a, __b, __p)
2678 #define vhcaddq_rot270_x_s8(__a, __b, __p) __arm_vhcaddq_rot270_x_s8(__a, __b, __p)
2679 #define vhcaddq_rot270_x_s16(__a, __b, __p) __arm_vhcaddq_rot270_x_s16(__a, __b, __p)
2680 #define vhcaddq_rot270_x_s32(__a, __b, __p) __arm_vhcaddq_rot270_x_s32(__a, __b, __p)
2681 #define vhsubq_x_n_s8(__a, __b, __p) __arm_vhsubq_x_n_s8(__a, __b, __p)
2682 #define vhsubq_x_n_s16(__a, __b, __p) __arm_vhsubq_x_n_s16(__a, __b, __p)
2683 #define vhsubq_x_n_s32(__a, __b, __p) __arm_vhsubq_x_n_s32(__a, __b, __p)
2684 #define vhsubq_x_n_u8(__a, __b, __p) __arm_vhsubq_x_n_u8(__a, __b, __p)
2685 #define vhsubq_x_n_u16(__a, __b, __p) __arm_vhsubq_x_n_u16(__a, __b, __p)
2686 #define vhsubq_x_n_u32(__a, __b, __p) __arm_vhsubq_x_n_u32(__a, __b, __p)
2687 #define vhsubq_x_s8(__a, __b, __p) __arm_vhsubq_x_s8(__a, __b, __p)
2688 #define vhsubq_x_s16(__a, __b, __p) __arm_vhsubq_x_s16(__a, __b, __p)
2689 #define vhsubq_x_s32(__a, __b, __p) __arm_vhsubq_x_s32(__a, __b, __p)
2690 #define vhsubq_x_u8(__a, __b, __p) __arm_vhsubq_x_u8(__a, __b, __p)
2691 #define vhsubq_x_u16(__a, __b, __p) __arm_vhsubq_x_u16(__a, __b, __p)
2692 #define vhsubq_x_u32(__a, __b, __p) __arm_vhsubq_x_u32(__a, __b, __p)
2693 #define vrhaddq_x_s8(__a, __b, __p) __arm_vrhaddq_x_s8(__a, __b, __p)
2694 #define vrhaddq_x_s16(__a, __b, __p) __arm_vrhaddq_x_s16(__a, __b, __p)
2695 #define vrhaddq_x_s32(__a, __b, __p) __arm_vrhaddq_x_s32(__a, __b, __p)
2696 #define vrhaddq_x_u8(__a, __b, __p) __arm_vrhaddq_x_u8(__a, __b, __p)
2697 #define vrhaddq_x_u16(__a, __b, __p) __arm_vrhaddq_x_u16(__a, __b, __p)
2698 #define vrhaddq_x_u32(__a, __b, __p) __arm_vrhaddq_x_u32(__a, __b, __p)
2699 #define vrmulhq_x_s8(__a, __b, __p) __arm_vrmulhq_x_s8(__a, __b, __p)
2700 #define vrmulhq_x_s16(__a, __b, __p) __arm_vrmulhq_x_s16(__a, __b, __p)
2701 #define vrmulhq_x_s32(__a, __b, __p) __arm_vrmulhq_x_s32(__a, __b, __p)
2702 #define vrmulhq_x_u8(__a, __b, __p) __arm_vrmulhq_x_u8(__a, __b, __p)
2703 #define vrmulhq_x_u16(__a, __b, __p) __arm_vrmulhq_x_u16(__a, __b, __p)
2704 #define vrmulhq_x_u32(__a, __b, __p) __arm_vrmulhq_x_u32(__a, __b, __p)
2705 #define vandq_x_s8(__a, __b, __p) __arm_vandq_x_s8(__a, __b, __p)
2706 #define vandq_x_s16(__a, __b, __p) __arm_vandq_x_s16(__a, __b, __p)
2707 #define vandq_x_s32(__a, __b, __p) __arm_vandq_x_s32(__a, __b, __p)
2708 #define vandq_x_u8(__a, __b, __p) __arm_vandq_x_u8(__a, __b, __p)
2709 #define vandq_x_u16(__a, __b, __p) __arm_vandq_x_u16(__a, __b, __p)
2710 #define vandq_x_u32(__a, __b, __p) __arm_vandq_x_u32(__a, __b, __p)
2711 #define vbicq_x_s8(__a, __b, __p) __arm_vbicq_x_s8(__a, __b, __p)
2712 #define vbicq_x_s16(__a, __b, __p) __arm_vbicq_x_s16(__a, __b, __p)
2713 #define vbicq_x_s32(__a, __b, __p) __arm_vbicq_x_s32(__a, __b, __p)
2714 #define vbicq_x_u8(__a, __b, __p) __arm_vbicq_x_u8(__a, __b, __p)
2715 #define vbicq_x_u16(__a, __b, __p) __arm_vbicq_x_u16(__a, __b, __p)
2716 #define vbicq_x_u32(__a, __b, __p) __arm_vbicq_x_u32(__a, __b, __p)
2717 #define vbrsrq_x_n_s8(__a, __b, __p) __arm_vbrsrq_x_n_s8(__a, __b, __p)
2718 #define vbrsrq_x_n_s16(__a, __b, __p) __arm_vbrsrq_x_n_s16(__a, __b, __p)
2719 #define vbrsrq_x_n_s32(__a, __b, __p) __arm_vbrsrq_x_n_s32(__a, __b, __p)
2720 #define vbrsrq_x_n_u8(__a, __b, __p) __arm_vbrsrq_x_n_u8(__a, __b, __p)
2721 #define vbrsrq_x_n_u16(__a, __b, __p) __arm_vbrsrq_x_n_u16(__a, __b, __p)
2722 #define vbrsrq_x_n_u32(__a, __b, __p) __arm_vbrsrq_x_n_u32(__a, __b, __p)
2723 #define veorq_x_s8(__a, __b, __p) __arm_veorq_x_s8(__a, __b, __p)
2724 #define veorq_x_s16(__a, __b, __p) __arm_veorq_x_s16(__a, __b, __p)
2725 #define veorq_x_s32(__a, __b, __p) __arm_veorq_x_s32(__a, __b, __p)
2726 #define veorq_x_u8(__a, __b, __p) __arm_veorq_x_u8(__a, __b, __p)
2727 #define veorq_x_u16(__a, __b, __p) __arm_veorq_x_u16(__a, __b, __p)
2728 #define veorq_x_u32(__a, __b, __p) __arm_veorq_x_u32(__a, __b, __p)
2729 #define vmovlbq_x_s8(__a, __p) __arm_vmovlbq_x_s8(__a, __p)
2730 #define vmovlbq_x_s16(__a, __p) __arm_vmovlbq_x_s16(__a, __p)
2731 #define vmovlbq_x_u8(__a, __p) __arm_vmovlbq_x_u8(__a, __p)
2732 #define vmovlbq_x_u16(__a, __p) __arm_vmovlbq_x_u16(__a, __p)
2733 #define vmovltq_x_s8(__a, __p) __arm_vmovltq_x_s8(__a, __p)
2734 #define vmovltq_x_s16(__a, __p) __arm_vmovltq_x_s16(__a, __p)
2735 #define vmovltq_x_u8(__a, __p) __arm_vmovltq_x_u8(__a, __p)
2736 #define vmovltq_x_u16(__a, __p) __arm_vmovltq_x_u16(__a, __p)
2737 #define vmvnq_x_s8(__a, __p) __arm_vmvnq_x_s8(__a, __p)
2738 #define vmvnq_x_s16(__a, __p) __arm_vmvnq_x_s16(__a, __p)
2739 #define vmvnq_x_s32(__a, __p) __arm_vmvnq_x_s32(__a, __p)
2740 #define vmvnq_x_u8(__a, __p) __arm_vmvnq_x_u8(__a, __p)
2741 #define vmvnq_x_u16(__a, __p) __arm_vmvnq_x_u16(__a, __p)
2742 #define vmvnq_x_u32(__a, __p) __arm_vmvnq_x_u32(__a, __p)
2743 #define vmvnq_x_n_s16( __imm, __p) __arm_vmvnq_x_n_s16( __imm, __p)
2744 #define vmvnq_x_n_s32( __imm, __p) __arm_vmvnq_x_n_s32( __imm, __p)
2745 #define vmvnq_x_n_u16( __imm, __p) __arm_vmvnq_x_n_u16( __imm, __p)
2746 #define vmvnq_x_n_u32( __imm, __p) __arm_vmvnq_x_n_u32( __imm, __p)
2747 #define vornq_x_s8(__a, __b, __p) __arm_vornq_x_s8(__a, __b, __p)
2748 #define vornq_x_s16(__a, __b, __p) __arm_vornq_x_s16(__a, __b, __p)
2749 #define vornq_x_s32(__a, __b, __p) __arm_vornq_x_s32(__a, __b, __p)
2750 #define vornq_x_u8(__a, __b, __p) __arm_vornq_x_u8(__a, __b, __p)
2751 #define vornq_x_u16(__a, __b, __p) __arm_vornq_x_u16(__a, __b, __p)
2752 #define vornq_x_u32(__a, __b, __p) __arm_vornq_x_u32(__a, __b, __p)
2753 #define vorrq_x_s8(__a, __b, __p) __arm_vorrq_x_s8(__a, __b, __p)
2754 #define vorrq_x_s16(__a, __b, __p) __arm_vorrq_x_s16(__a, __b, __p)
2755 #define vorrq_x_s32(__a, __b, __p) __arm_vorrq_x_s32(__a, __b, __p)
2756 #define vorrq_x_u8(__a, __b, __p) __arm_vorrq_x_u8(__a, __b, __p)
2757 #define vorrq_x_u16(__a, __b, __p) __arm_vorrq_x_u16(__a, __b, __p)
2758 #define vorrq_x_u32(__a, __b, __p) __arm_vorrq_x_u32(__a, __b, __p)
2759 #define vrev16q_x_s8(__a, __p) __arm_vrev16q_x_s8(__a, __p)
2760 #define vrev16q_x_u8(__a, __p) __arm_vrev16q_x_u8(__a, __p)
2761 #define vrev32q_x_s8(__a, __p) __arm_vrev32q_x_s8(__a, __p)
2762 #define vrev32q_x_s16(__a, __p) __arm_vrev32q_x_s16(__a, __p)
2763 #define vrev32q_x_u8(__a, __p) __arm_vrev32q_x_u8(__a, __p)
2764 #define vrev32q_x_u16(__a, __p) __arm_vrev32q_x_u16(__a, __p)
2765 #define vrev64q_x_s8(__a, __p) __arm_vrev64q_x_s8(__a, __p)
2766 #define vrev64q_x_s16(__a, __p) __arm_vrev64q_x_s16(__a, __p)
2767 #define vrev64q_x_s32(__a, __p) __arm_vrev64q_x_s32(__a, __p)
2768 #define vrev64q_x_u8(__a, __p) __arm_vrev64q_x_u8(__a, __p)
2769 #define vrev64q_x_u16(__a, __p) __arm_vrev64q_x_u16(__a, __p)
2770 #define vrev64q_x_u32(__a, __p) __arm_vrev64q_x_u32(__a, __p)
2771 #define vrshlq_x_s8(__a, __b, __p) __arm_vrshlq_x_s8(__a, __b, __p)
2772 #define vrshlq_x_s16(__a, __b, __p) __arm_vrshlq_x_s16(__a, __b, __p)
2773 #define vrshlq_x_s32(__a, __b, __p) __arm_vrshlq_x_s32(__a, __b, __p)
2774 #define vrshlq_x_u8(__a, __b, __p) __arm_vrshlq_x_u8(__a, __b, __p)
2775 #define vrshlq_x_u16(__a, __b, __p) __arm_vrshlq_x_u16(__a, __b, __p)
2776 #define vrshlq_x_u32(__a, __b, __p) __arm_vrshlq_x_u32(__a, __b, __p)
2777 #define vshllbq_x_n_s8(__a,  __imm, __p) __arm_vshllbq_x_n_s8(__a,  __imm, __p)
2778 #define vshllbq_x_n_s16(__a,  __imm, __p) __arm_vshllbq_x_n_s16(__a,  __imm, __p)
2779 #define vshllbq_x_n_u8(__a,  __imm, __p) __arm_vshllbq_x_n_u8(__a,  __imm, __p)
2780 #define vshllbq_x_n_u16(__a,  __imm, __p) __arm_vshllbq_x_n_u16(__a,  __imm, __p)
2781 #define vshlltq_x_n_s8(__a,  __imm, __p) __arm_vshlltq_x_n_s8(__a,  __imm, __p)
2782 #define vshlltq_x_n_s16(__a,  __imm, __p) __arm_vshlltq_x_n_s16(__a,  __imm, __p)
2783 #define vshlltq_x_n_u8(__a,  __imm, __p) __arm_vshlltq_x_n_u8(__a,  __imm, __p)
2784 #define vshlltq_x_n_u16(__a,  __imm, __p) __arm_vshlltq_x_n_u16(__a,  __imm, __p)
2785 #define vshlq_x_s8(__a, __b, __p) __arm_vshlq_x_s8(__a, __b, __p)
2786 #define vshlq_x_s16(__a, __b, __p) __arm_vshlq_x_s16(__a, __b, __p)
2787 #define vshlq_x_s32(__a, __b, __p) __arm_vshlq_x_s32(__a, __b, __p)
2788 #define vshlq_x_u8(__a, __b, __p) __arm_vshlq_x_u8(__a, __b, __p)
2789 #define vshlq_x_u16(__a, __b, __p) __arm_vshlq_x_u16(__a, __b, __p)
2790 #define vshlq_x_u32(__a, __b, __p) __arm_vshlq_x_u32(__a, __b, __p)
2791 #define vshlq_x_n_s8(__a,  __imm, __p) __arm_vshlq_x_n_s8(__a,  __imm, __p)
2792 #define vshlq_x_n_s16(__a,  __imm, __p) __arm_vshlq_x_n_s16(__a,  __imm, __p)
2793 #define vshlq_x_n_s32(__a,  __imm, __p) __arm_vshlq_x_n_s32(__a,  __imm, __p)
2794 #define vshlq_x_n_u8(__a,  __imm, __p) __arm_vshlq_x_n_u8(__a,  __imm, __p)
2795 #define vshlq_x_n_u16(__a,  __imm, __p) __arm_vshlq_x_n_u16(__a,  __imm, __p)
2796 #define vshlq_x_n_u32(__a,  __imm, __p) __arm_vshlq_x_n_u32(__a,  __imm, __p)
2797 #define vrshrq_x_n_s8(__a,  __imm, __p) __arm_vrshrq_x_n_s8(__a,  __imm, __p)
2798 #define vrshrq_x_n_s16(__a,  __imm, __p) __arm_vrshrq_x_n_s16(__a,  __imm, __p)
2799 #define vrshrq_x_n_s32(__a,  __imm, __p) __arm_vrshrq_x_n_s32(__a,  __imm, __p)
2800 #define vrshrq_x_n_u8(__a,  __imm, __p) __arm_vrshrq_x_n_u8(__a,  __imm, __p)
2801 #define vrshrq_x_n_u16(__a,  __imm, __p) __arm_vrshrq_x_n_u16(__a,  __imm, __p)
2802 #define vrshrq_x_n_u32(__a,  __imm, __p) __arm_vrshrq_x_n_u32(__a,  __imm, __p)
2803 #define vshrq_x_n_s8(__a,  __imm, __p) __arm_vshrq_x_n_s8(__a,  __imm, __p)
2804 #define vshrq_x_n_s16(__a,  __imm, __p) __arm_vshrq_x_n_s16(__a,  __imm, __p)
2805 #define vshrq_x_n_s32(__a,  __imm, __p) __arm_vshrq_x_n_s32(__a,  __imm, __p)
2806 #define vshrq_x_n_u8(__a,  __imm, __p) __arm_vshrq_x_n_u8(__a,  __imm, __p)
2807 #define vshrq_x_n_u16(__a,  __imm, __p) __arm_vshrq_x_n_u16(__a,  __imm, __p)
2808 #define vshrq_x_n_u32(__a,  __imm, __p) __arm_vshrq_x_n_u32(__a,  __imm, __p)
2809 #define vdupq_x_n_f16(__a, __p) __arm_vdupq_x_n_f16(__a, __p)
2810 #define vdupq_x_n_f32(__a, __p) __arm_vdupq_x_n_f32(__a, __p)
2811 #define vminnmq_x_f16(__a, __b, __p) __arm_vminnmq_x_f16(__a, __b, __p)
2812 #define vminnmq_x_f32(__a, __b, __p) __arm_vminnmq_x_f32(__a, __b, __p)
2813 #define vmaxnmq_x_f16(__a, __b, __p) __arm_vmaxnmq_x_f16(__a, __b, __p)
2814 #define vmaxnmq_x_f32(__a, __b, __p) __arm_vmaxnmq_x_f32(__a, __b, __p)
2815 #define vabdq_x_f16(__a, __b, __p) __arm_vabdq_x_f16(__a, __b, __p)
2816 #define vabdq_x_f32(__a, __b, __p) __arm_vabdq_x_f32(__a, __b, __p)
2817 #define vabsq_x_f16(__a, __p) __arm_vabsq_x_f16(__a, __p)
2818 #define vabsq_x_f32(__a, __p) __arm_vabsq_x_f32(__a, __p)
2819 #define vaddq_x_f16(__a, __b, __p) __arm_vaddq_x_f16(__a, __b, __p)
2820 #define vaddq_x_f32(__a, __b, __p) __arm_vaddq_x_f32(__a, __b, __p)
2821 #define vaddq_x_n_f16(__a, __b, __p) __arm_vaddq_x_n_f16(__a, __b, __p)
2822 #define vaddq_x_n_f32(__a, __b, __p) __arm_vaddq_x_n_f32(__a, __b, __p)
2823 #define vnegq_x_f16(__a, __p) __arm_vnegq_x_f16(__a, __p)
2824 #define vnegq_x_f32(__a, __p) __arm_vnegq_x_f32(__a, __p)
2825 #define vmulq_x_f16(__a, __b, __p) __arm_vmulq_x_f16(__a, __b, __p)
2826 #define vmulq_x_f32(__a, __b, __p) __arm_vmulq_x_f32(__a, __b, __p)
2827 #define vmulq_x_n_f16(__a, __b, __p) __arm_vmulq_x_n_f16(__a, __b, __p)
2828 #define vmulq_x_n_f32(__a, __b, __p) __arm_vmulq_x_n_f32(__a, __b, __p)
2829 #define vsubq_x_f16(__a, __b, __p) __arm_vsubq_x_f16(__a, __b, __p)
2830 #define vsubq_x_f32(__a, __b, __p) __arm_vsubq_x_f32(__a, __b, __p)
2831 #define vsubq_x_n_f16(__a, __b, __p) __arm_vsubq_x_n_f16(__a, __b, __p)
2832 #define vsubq_x_n_f32(__a, __b, __p) __arm_vsubq_x_n_f32(__a, __b, __p)
2833 #define vcaddq_rot90_x_f16(__a, __b, __p) __arm_vcaddq_rot90_x_f16(__a, __b, __p)
2834 #define vcaddq_rot90_x_f32(__a, __b, __p) __arm_vcaddq_rot90_x_f32(__a, __b, __p)
2835 #define vcaddq_rot270_x_f16(__a, __b, __p) __arm_vcaddq_rot270_x_f16(__a, __b, __p)
2836 #define vcaddq_rot270_x_f32(__a, __b, __p) __arm_vcaddq_rot270_x_f32(__a, __b, __p)
2837 #define vcmulq_x_f16(__a, __b, __p) __arm_vcmulq_x_f16(__a, __b, __p)
2838 #define vcmulq_x_f32(__a, __b, __p) __arm_vcmulq_x_f32(__a, __b, __p)
2839 #define vcmulq_rot90_x_f16(__a, __b, __p) __arm_vcmulq_rot90_x_f16(__a, __b, __p)
2840 #define vcmulq_rot90_x_f32(__a, __b, __p) __arm_vcmulq_rot90_x_f32(__a, __b, __p)
2841 #define vcmulq_rot180_x_f16(__a, __b, __p) __arm_vcmulq_rot180_x_f16(__a, __b, __p)
2842 #define vcmulq_rot180_x_f32(__a, __b, __p) __arm_vcmulq_rot180_x_f32(__a, __b, __p)
2843 #define vcmulq_rot270_x_f16(__a, __b, __p) __arm_vcmulq_rot270_x_f16(__a, __b, __p)
2844 #define vcmulq_rot270_x_f32(__a, __b, __p) __arm_vcmulq_rot270_x_f32(__a, __b, __p)
2845 #define vcvtaq_x_s16_f16(__a, __p) __arm_vcvtaq_x_s16_f16(__a, __p)
2846 #define vcvtaq_x_s32_f32(__a, __p) __arm_vcvtaq_x_s32_f32(__a, __p)
2847 #define vcvtaq_x_u16_f16(__a, __p) __arm_vcvtaq_x_u16_f16(__a, __p)
2848 #define vcvtaq_x_u32_f32(__a, __p) __arm_vcvtaq_x_u32_f32(__a, __p)
2849 #define vcvtnq_x_s16_f16(__a, __p) __arm_vcvtnq_x_s16_f16(__a, __p)
2850 #define vcvtnq_x_s32_f32(__a, __p) __arm_vcvtnq_x_s32_f32(__a, __p)
2851 #define vcvtnq_x_u16_f16(__a, __p) __arm_vcvtnq_x_u16_f16(__a, __p)
2852 #define vcvtnq_x_u32_f32(__a, __p) __arm_vcvtnq_x_u32_f32(__a, __p)
2853 #define vcvtpq_x_s16_f16(__a, __p) __arm_vcvtpq_x_s16_f16(__a, __p)
2854 #define vcvtpq_x_s32_f32(__a, __p) __arm_vcvtpq_x_s32_f32(__a, __p)
2855 #define vcvtpq_x_u16_f16(__a, __p) __arm_vcvtpq_x_u16_f16(__a, __p)
2856 #define vcvtpq_x_u32_f32(__a, __p) __arm_vcvtpq_x_u32_f32(__a, __p)
2857 #define vcvtmq_x_s16_f16(__a, __p) __arm_vcvtmq_x_s16_f16(__a, __p)
2858 #define vcvtmq_x_s32_f32(__a, __p) __arm_vcvtmq_x_s32_f32(__a, __p)
2859 #define vcvtmq_x_u16_f16(__a, __p) __arm_vcvtmq_x_u16_f16(__a, __p)
2860 #define vcvtmq_x_u32_f32(__a, __p) __arm_vcvtmq_x_u32_f32(__a, __p)
2861 #define vcvtbq_x_f32_f16(__a, __p) __arm_vcvtbq_x_f32_f16(__a, __p)
2862 #define vcvttq_x_f32_f16(__a, __p) __arm_vcvttq_x_f32_f16(__a, __p)
2863 #define vcvtq_x_f16_u16(__a, __p) __arm_vcvtq_x_f16_u16(__a, __p)
2864 #define vcvtq_x_f16_s16(__a, __p) __arm_vcvtq_x_f16_s16(__a, __p)
2865 #define vcvtq_x_f32_s32(__a, __p) __arm_vcvtq_x_f32_s32(__a, __p)
2866 #define vcvtq_x_f32_u32(__a, __p) __arm_vcvtq_x_f32_u32(__a, __p)
2867 #define vcvtq_x_n_f16_s16(__a,  __imm6, __p) __arm_vcvtq_x_n_f16_s16(__a,  __imm6, __p)
2868 #define vcvtq_x_n_f16_u16(__a,  __imm6, __p) __arm_vcvtq_x_n_f16_u16(__a,  __imm6, __p)
2869 #define vcvtq_x_n_f32_s32(__a,  __imm6, __p) __arm_vcvtq_x_n_f32_s32(__a,  __imm6, __p)
2870 #define vcvtq_x_n_f32_u32(__a,  __imm6, __p) __arm_vcvtq_x_n_f32_u32(__a,  __imm6, __p)
2871 #define vcvtq_x_s16_f16(__a, __p) __arm_vcvtq_x_s16_f16(__a, __p)
2872 #define vcvtq_x_s32_f32(__a, __p) __arm_vcvtq_x_s32_f32(__a, __p)
2873 #define vcvtq_x_u16_f16(__a, __p) __arm_vcvtq_x_u16_f16(__a, __p)
2874 #define vcvtq_x_u32_f32(__a, __p) __arm_vcvtq_x_u32_f32(__a, __p)
2875 #define vcvtq_x_n_s16_f16(__a,  __imm6, __p) __arm_vcvtq_x_n_s16_f16(__a,  __imm6, __p)
2876 #define vcvtq_x_n_s32_f32(__a,  __imm6, __p) __arm_vcvtq_x_n_s32_f32(__a,  __imm6, __p)
2877 #define vcvtq_x_n_u16_f16(__a,  __imm6, __p) __arm_vcvtq_x_n_u16_f16(__a,  __imm6, __p)
2878 #define vcvtq_x_n_u32_f32(__a,  __imm6, __p) __arm_vcvtq_x_n_u32_f32(__a,  __imm6, __p)
2879 #define vrndq_x_f16(__a, __p) __arm_vrndq_x_f16(__a, __p)
2880 #define vrndq_x_f32(__a, __p) __arm_vrndq_x_f32(__a, __p)
2881 #define vrndnq_x_f16(__a, __p) __arm_vrndnq_x_f16(__a, __p)
2882 #define vrndnq_x_f32(__a, __p) __arm_vrndnq_x_f32(__a, __p)
2883 #define vrndmq_x_f16(__a, __p) __arm_vrndmq_x_f16(__a, __p)
2884 #define vrndmq_x_f32(__a, __p) __arm_vrndmq_x_f32(__a, __p)
2885 #define vrndpq_x_f16(__a, __p) __arm_vrndpq_x_f16(__a, __p)
2886 #define vrndpq_x_f32(__a, __p) __arm_vrndpq_x_f32(__a, __p)
2887 #define vrndaq_x_f16(__a, __p) __arm_vrndaq_x_f16(__a, __p)
2888 #define vrndaq_x_f32(__a, __p) __arm_vrndaq_x_f32(__a, __p)
2889 #define vrndxq_x_f16(__a, __p) __arm_vrndxq_x_f16(__a, __p)
2890 #define vrndxq_x_f32(__a, __p) __arm_vrndxq_x_f32(__a, __p)
2891 #define vandq_x_f16(__a, __b, __p) __arm_vandq_x_f16(__a, __b, __p)
2892 #define vandq_x_f32(__a, __b, __p) __arm_vandq_x_f32(__a, __b, __p)
2893 #define vbicq_x_f16(__a, __b, __p) __arm_vbicq_x_f16(__a, __b, __p)
2894 #define vbicq_x_f32(__a, __b, __p) __arm_vbicq_x_f32(__a, __b, __p)
2895 #define vbrsrq_x_n_f16(__a, __b, __p) __arm_vbrsrq_x_n_f16(__a, __b, __p)
2896 #define vbrsrq_x_n_f32(__a, __b, __p) __arm_vbrsrq_x_n_f32(__a, __b, __p)
2897 #define veorq_x_f16(__a, __b, __p) __arm_veorq_x_f16(__a, __b, __p)
2898 #define veorq_x_f32(__a, __b, __p) __arm_veorq_x_f32(__a, __b, __p)
2899 #define vornq_x_f16(__a, __b, __p) __arm_vornq_x_f16(__a, __b, __p)
2900 #define vornq_x_f32(__a, __b, __p) __arm_vornq_x_f32(__a, __b, __p)
2901 #define vorrq_x_f16(__a, __b, __p) __arm_vorrq_x_f16(__a, __b, __p)
2902 #define vorrq_x_f32(__a, __b, __p) __arm_vorrq_x_f32(__a, __b, __p)
2903 #define vrev32q_x_f16(__a, __p) __arm_vrev32q_x_f16(__a, __p)
2904 #define vrev64q_x_f16(__a, __p) __arm_vrev64q_x_f16(__a, __p)
2905 #define vrev64q_x_f32(__a, __p) __arm_vrev64q_x_f32(__a, __p)
2906 #define vadciq_s32(__a, __b,  __carry_out) __arm_vadciq_s32(__a, __b,  __carry_out)
2907 #define vadciq_u32(__a, __b,  __carry_out) __arm_vadciq_u32(__a, __b,  __carry_out)
2908 #define vadciq_m_s32(__inactive, __a, __b,  __carry_out, __p) __arm_vadciq_m_s32(__inactive, __a, __b,  __carry_out, __p)
2909 #define vadciq_m_u32(__inactive, __a, __b,  __carry_out, __p) __arm_vadciq_m_u32(__inactive, __a, __b,  __carry_out, __p)
2910 #define vadcq_s32(__a, __b,  __carry) __arm_vadcq_s32(__a, __b,  __carry)
2911 #define vadcq_u32(__a, __b,  __carry) __arm_vadcq_u32(__a, __b,  __carry)
2912 #define vadcq_m_s32(__inactive, __a, __b,  __carry, __p) __arm_vadcq_m_s32(__inactive, __a, __b,  __carry, __p)
2913 #define vadcq_m_u32(__inactive, __a, __b,  __carry, __p) __arm_vadcq_m_u32(__inactive, __a, __b,  __carry, __p)
2914 #define vsbciq_s32(__a, __b,  __carry_out) __arm_vsbciq_s32(__a, __b,  __carry_out)
2915 #define vsbciq_u32(__a, __b,  __carry_out) __arm_vsbciq_u32(__a, __b,  __carry_out)
2916 #define vsbciq_m_s32(__inactive, __a, __b,  __carry_out, __p) __arm_vsbciq_m_s32(__inactive, __a, __b,  __carry_out, __p)
2917 #define vsbciq_m_u32(__inactive, __a, __b,  __carry_out, __p) __arm_vsbciq_m_u32(__inactive, __a, __b,  __carry_out, __p)
2918 #define vsbcq_s32(__a, __b,  __carry) __arm_vsbcq_s32(__a, __b,  __carry)
2919 #define vsbcq_u32(__a, __b,  __carry) __arm_vsbcq_u32(__a, __b,  __carry)
2920 #define vsbcq_m_s32(__inactive, __a, __b,  __carry, __p) __arm_vsbcq_m_s32(__inactive, __a, __b,  __carry, __p)
2921 #define vsbcq_m_u32(__inactive, __a, __b,  __carry, __p) __arm_vsbcq_m_u32(__inactive, __a, __b,  __carry, __p)
2922 #define vst1q_p_u8(__addr, __value, __p) __arm_vst1q_p_u8(__addr, __value, __p)
2923 #define vst1q_p_s8(__addr, __value, __p) __arm_vst1q_p_s8(__addr, __value, __p)
2924 #define vst2q_s8(__addr, __value) __arm_vst2q_s8(__addr, __value)
2925 #define vst2q_u8(__addr, __value) __arm_vst2q_u8(__addr, __value)
2926 #define vld1q_z_u8(__base, __p) __arm_vld1q_z_u8(__base, __p)
2927 #define vld1q_z_s8(__base, __p) __arm_vld1q_z_s8(__base, __p)
2928 #define vld2q_s8(__addr) __arm_vld2q_s8(__addr)
2929 #define vld2q_u8(__addr) __arm_vld2q_u8(__addr)
2930 #define vld4q_s8(__addr) __arm_vld4q_s8(__addr)
2931 #define vld4q_u8(__addr) __arm_vld4q_u8(__addr)
2932 #define vst1q_p_u16(__addr, __value, __p) __arm_vst1q_p_u16(__addr, __value, __p)
2933 #define vst1q_p_s16(__addr, __value, __p) __arm_vst1q_p_s16(__addr, __value, __p)
2934 #define vst2q_s16(__addr, __value) __arm_vst2q_s16(__addr, __value)
2935 #define vst2q_u16(__addr, __value) __arm_vst2q_u16(__addr, __value)
2936 #define vld1q_z_u16(__base, __p) __arm_vld1q_z_u16(__base, __p)
2937 #define vld1q_z_s16(__base, __p) __arm_vld1q_z_s16(__base, __p)
2938 #define vld2q_s16(__addr) __arm_vld2q_s16(__addr)
2939 #define vld2q_u16(__addr) __arm_vld2q_u16(__addr)
2940 #define vld4q_s16(__addr) __arm_vld4q_s16(__addr)
2941 #define vld4q_u16(__addr) __arm_vld4q_u16(__addr)
2942 #define vst1q_p_u32(__addr, __value, __p) __arm_vst1q_p_u32(__addr, __value, __p)
2943 #define vst1q_p_s32(__addr, __value, __p) __arm_vst1q_p_s32(__addr, __value, __p)
2944 #define vst2q_s32(__addr, __value) __arm_vst2q_s32(__addr, __value)
2945 #define vst2q_u32(__addr, __value) __arm_vst2q_u32(__addr, __value)
2946 #define vld1q_z_u32(__base, __p) __arm_vld1q_z_u32(__base, __p)
2947 #define vld1q_z_s32(__base, __p) __arm_vld1q_z_s32(__base, __p)
2948 #define vld2q_s32(__addr) __arm_vld2q_s32(__addr)
2949 #define vld2q_u32(__addr) __arm_vld2q_u32(__addr)
2950 #define vld4q_s32(__addr) __arm_vld4q_s32(__addr)
2951 #define vld4q_u32(__addr) __arm_vld4q_u32(__addr)
2952 #define vld4q_f16(__addr) __arm_vld4q_f16(__addr)
2953 #define vld2q_f16(__addr) __arm_vld2q_f16(__addr)
2954 #define vld1q_z_f16(__base, __p) __arm_vld1q_z_f16(__base, __p)
2955 #define vst2q_f16(__addr, __value) __arm_vst2q_f16(__addr, __value)
2956 #define vst1q_p_f16(__addr, __value, __p) __arm_vst1q_p_f16(__addr, __value, __p)
2957 #define vld4q_f32(__addr) __arm_vld4q_f32(__addr)
2958 #define vld2q_f32(__addr) __arm_vld2q_f32(__addr)
2959 #define vld1q_z_f32(__base, __p) __arm_vld1q_z_f32(__base, __p)
2960 #define vst2q_f32(__addr, __value) __arm_vst2q_f32(__addr, __value)
2961 #define vst1q_p_f32(__addr, __value, __p) __arm_vst1q_p_f32(__addr, __value, __p)
2962 #define vsetq_lane_f16(__a, __b,  __idx) __arm_vsetq_lane_f16(__a, __b,  __idx)
2963 #define vsetq_lane_f32(__a, __b,  __idx) __arm_vsetq_lane_f32(__a, __b,  __idx)
2964 #define vsetq_lane_s16(__a, __b,  __idx) __arm_vsetq_lane_s16(__a, __b,  __idx)
2965 #define vsetq_lane_s32(__a, __b,  __idx) __arm_vsetq_lane_s32(__a, __b,  __idx)
2966 #define vsetq_lane_s8(__a, __b,  __idx) __arm_vsetq_lane_s8(__a, __b,  __idx)
2967 #define vsetq_lane_s64(__a, __b,  __idx) __arm_vsetq_lane_s64(__a, __b,  __idx)
2968 #define vsetq_lane_u8(__a, __b,  __idx) __arm_vsetq_lane_u8(__a, __b,  __idx)
2969 #define vsetq_lane_u16(__a, __b,  __idx) __arm_vsetq_lane_u16(__a, __b,  __idx)
2970 #define vsetq_lane_u32(__a, __b,  __idx) __arm_vsetq_lane_u32(__a, __b,  __idx)
2971 #define vsetq_lane_u64(__a, __b,  __idx) __arm_vsetq_lane_u64(__a, __b,  __idx)
2972 #define vgetq_lane_f16(__a,  __idx) __arm_vgetq_lane_f16(__a,  __idx)
2973 #define vgetq_lane_f32(__a,  __idx) __arm_vgetq_lane_f32(__a,  __idx)
2974 #define vgetq_lane_s16(__a,  __idx) __arm_vgetq_lane_s16(__a,  __idx)
2975 #define vgetq_lane_s32(__a,  __idx) __arm_vgetq_lane_s32(__a,  __idx)
2976 #define vgetq_lane_s8(__a,  __idx) __arm_vgetq_lane_s8(__a,  __idx)
2977 #define vgetq_lane_s64(__a,  __idx) __arm_vgetq_lane_s64(__a,  __idx)
2978 #define vgetq_lane_u8(__a,  __idx) __arm_vgetq_lane_u8(__a,  __idx)
2979 #define vgetq_lane_u16(__a,  __idx) __arm_vgetq_lane_u16(__a,  __idx)
2980 #define vgetq_lane_u32(__a,  __idx) __arm_vgetq_lane_u32(__a,  __idx)
2981 #define vgetq_lane_u64(__a,  __idx) __arm_vgetq_lane_u64(__a,  __idx)
2982 #define sqrshr(__p0, __p1) __arm_sqrshr(__p0, __p1)
2983 #define sqrshrl(__p0, __p1) __arm_sqrshrl(__p0, __p1)
2984 #define sqrshrl_sat48(__p0, __p1) __arm_sqrshrl_sat48(__p0, __p1)
2985 #define sqshl(__p0, __p1) __arm_sqshl(__p0, __p1)
2986 #define sqshll(__p0, __p1) __arm_sqshll(__p0, __p1)
2987 #define srshr(__p0, __p1) __arm_srshr(__p0, __p1)
2988 #define srshrl(__p0, __p1) __arm_srshrl(__p0, __p1)
2989 #define uqrshl(__p0, __p1) __arm_uqrshl(__p0, __p1)
2990 #define uqrshll(__p0, __p1) __arm_uqrshll(__p0, __p1)
2991 #define uqrshll_sat48(__p0, __p1) __arm_uqrshll_sat48(__p0, __p1)
2992 #define uqshl(__p0, __p1) __arm_uqshl(__p0, __p1)
2993 #define uqshll(__p0, __p1) __arm_uqshll(__p0, __p1)
2994 #define urshr(__p0, __p1) __arm_urshr(__p0, __p1)
2995 #define urshrl(__p0, __p1) __arm_urshrl(__p0, __p1)
2996 #define lsll(__p0, __p1) __arm_lsll(__p0, __p1)
2997 #define asrl(__p0, __p1) __arm_asrl(__p0, __p1)
2998 #define vshlcq_m_s8(__a,  __b,  __imm, __p) __arm_vshlcq_m_s8(__a,  __b,  __imm, __p)
2999 #define vshlcq_m_u8(__a,  __b,  __imm, __p) __arm_vshlcq_m_u8(__a,  __b,  __imm, __p)
3000 #define vshlcq_m_s16(__a,  __b,  __imm, __p) __arm_vshlcq_m_s16(__a,  __b,  __imm, __p)
3001 #define vshlcq_m_u16(__a,  __b,  __imm, __p) __arm_vshlcq_m_u16(__a,  __b,  __imm, __p)
3002 #define vshlcq_m_s32(__a,  __b,  __imm, __p) __arm_vshlcq_m_s32(__a,  __b,  __imm, __p)
3003 #define vshlcq_m_u32(__a,  __b,  __imm, __p) __arm_vshlcq_m_u32(__a,  __b,  __imm, __p)
3004 #endif
3005 
3006 /* For big-endian, GCC's vector indices are reversed within each 64 bits
3007    compared to the architectural lane indices used by MVE intrinsics.  */
3008 #define __ARM_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0]))
3009 #ifdef __ARM_BIG_ENDIAN
3010 #define __ARM_LANEQ(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec)/2 - 1))
3011 #else
3012 #define __ARM_LANEQ(__vec, __idx) __idx
3013 #endif
3014 #define __ARM_CHECK_LANEQ(__vec, __idx)		 \
3015   __builtin_arm_lane_check (__ARM_NUM_LANES(__vec),     \
3016 			    __ARM_LANEQ(__vec, __idx))
3017 
3018 __extension__ extern __inline void
3019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q_s8(int8_t * __addr,int8x16x4_t __value)3020 __arm_vst4q_s8 (int8_t * __addr, int8x16x4_t __value)
3021 {
3022   union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
3023   __rv.__i = __value;
3024   __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
3025 }
3026 
3027 __extension__ extern __inline void
3028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q_s16(int16_t * __addr,int16x8x4_t __value)3029 __arm_vst4q_s16 (int16_t * __addr, int16x8x4_t __value)
3030 {
3031   union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
3032   __rv.__i = __value;
3033   __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
3034 }
3035 
3036 __extension__ extern __inline void
3037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q_s32(int32_t * __addr,int32x4x4_t __value)3038 __arm_vst4q_s32 (int32_t * __addr, int32x4x4_t __value)
3039 {
3040   union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
3041   __rv.__i = __value;
3042   __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o);
3043 }
3044 
3045 __extension__ extern __inline void
3046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q_u8(uint8_t * __addr,uint8x16x4_t __value)3047 __arm_vst4q_u8 (uint8_t * __addr, uint8x16x4_t __value)
3048 {
3049   union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
3050   __rv.__i = __value;
3051   __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
3052 }
3053 
3054 __extension__ extern __inline void
3055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q_u16(uint16_t * __addr,uint16x8x4_t __value)3056 __arm_vst4q_u16 (uint16_t * __addr, uint16x8x4_t __value)
3057 {
3058   union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
3059   __rv.__i = __value;
3060   __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
3061 }
3062 
3063 __extension__ extern __inline void
3064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q_u32(uint32_t * __addr,uint32x4x4_t __value)3065 __arm_vst4q_u32 (uint32_t * __addr, uint32x4x4_t __value)
3066 {
3067   union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
3068   __rv.__i = __value;
3069   __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o);
3070 }
3071 
3072 __extension__ extern __inline int8x16_t
3073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n_s8(int8_t __a)3074 __arm_vdupq_n_s8 (int8_t __a)
3075 {
3076   return __builtin_mve_vdupq_n_sv16qi (__a);
3077 }
3078 
3079 __extension__ extern __inline int16x8_t
3080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n_s16(int16_t __a)3081 __arm_vdupq_n_s16 (int16_t __a)
3082 {
3083   return __builtin_mve_vdupq_n_sv8hi (__a);
3084 }
3085 
3086 __extension__ extern __inline int32x4_t
3087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n_s32(int32_t __a)3088 __arm_vdupq_n_s32 (int32_t __a)
3089 {
3090   return __builtin_mve_vdupq_n_sv4si (__a);
3091 }
3092 
3093 __extension__ extern __inline int8x16_t
3094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_s8(int8x16_t __a)3095 __arm_vabsq_s8 (int8x16_t __a)
3096 {
3097   return __builtin_mve_vabsq_sv16qi (__a);
3098 }
3099 
3100 __extension__ extern __inline int16x8_t
3101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_s16(int16x8_t __a)3102 __arm_vabsq_s16 (int16x8_t __a)
3103 {
3104   return __builtin_mve_vabsq_sv8hi (__a);
3105 }
3106 
3107 __extension__ extern __inline int32x4_t
3108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_s32(int32x4_t __a)3109 __arm_vabsq_s32 (int32x4_t __a)
3110 {
3111   return __builtin_mve_vabsq_sv4si (__a);
3112 }
3113 
3114 __extension__ extern __inline int8x16_t
3115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_s8(int8x16_t __a)3116 __arm_vclsq_s8 (int8x16_t __a)
3117 {
3118   return __builtin_mve_vclsq_sv16qi (__a);
3119 }
3120 
3121 __extension__ extern __inline int16x8_t
3122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_s16(int16x8_t __a)3123 __arm_vclsq_s16 (int16x8_t __a)
3124 {
3125   return __builtin_mve_vclsq_sv8hi (__a);
3126 }
3127 
3128 __extension__ extern __inline int32x4_t
3129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_s32(int32x4_t __a)3130 __arm_vclsq_s32 (int32x4_t __a)
3131 {
3132   return __builtin_mve_vclsq_sv4si (__a);
3133 }
3134 
3135 __extension__ extern __inline int8x16_t
3136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_s8(int8x16_t __a)3137 __arm_vclzq_s8 (int8x16_t __a)
3138 {
3139   return __builtin_mve_vclzq_sv16qi (__a);
3140 }
3141 
3142 __extension__ extern __inline int16x8_t
3143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_s16(int16x8_t __a)3144 __arm_vclzq_s16 (int16x8_t __a)
3145 {
3146   return __builtin_mve_vclzq_sv8hi (__a);
3147 }
3148 
3149 __extension__ extern __inline int32x4_t
3150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_s32(int32x4_t __a)3151 __arm_vclzq_s32 (int32x4_t __a)
3152 {
3153   return __builtin_mve_vclzq_sv4si (__a);
3154 }
3155 
3156 __extension__ extern __inline int8x16_t
3157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_s8(int8x16_t __a)3158 __arm_vnegq_s8 (int8x16_t __a)
3159 {
3160   return __builtin_mve_vnegq_sv16qi (__a);
3161 }
3162 
3163 __extension__ extern __inline int16x8_t
3164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_s16(int16x8_t __a)3165 __arm_vnegq_s16 (int16x8_t __a)
3166 {
3167   return __builtin_mve_vnegq_sv8hi (__a);
3168 }
3169 
3170 __extension__ extern __inline int32x4_t
3171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_s32(int32x4_t __a)3172 __arm_vnegq_s32 (int32x4_t __a)
3173 {
3174   return __builtin_mve_vnegq_sv4si (__a);
3175 }
3176 
3177 __extension__ extern __inline int64_t
3178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvq_s32(int32x4_t __a)3179 __arm_vaddlvq_s32 (int32x4_t __a)
3180 {
3181   return __builtin_mve_vaddlvq_sv4si (__a);
3182 }
3183 
3184 __extension__ extern __inline int32_t
3185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_s8(int8x16_t __a)3186 __arm_vaddvq_s8 (int8x16_t __a)
3187 {
3188   return __builtin_mve_vaddvq_sv16qi (__a);
3189 }
3190 
3191 __extension__ extern __inline int32_t
3192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_s16(int16x8_t __a)3193 __arm_vaddvq_s16 (int16x8_t __a)
3194 {
3195   return __builtin_mve_vaddvq_sv8hi (__a);
3196 }
3197 
3198 __extension__ extern __inline int32_t
3199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_s32(int32x4_t __a)3200 __arm_vaddvq_s32 (int32x4_t __a)
3201 {
3202   return __builtin_mve_vaddvq_sv4si (__a);
3203 }
3204 
3205 __extension__ extern __inline int16x8_t
3206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_s8(int8x16_t __a)3207 __arm_vmovlbq_s8 (int8x16_t __a)
3208 {
3209   return __builtin_mve_vmovlbq_sv16qi (__a);
3210 }
3211 
3212 __extension__ extern __inline int32x4_t
3213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_s16(int16x8_t __a)3214 __arm_vmovlbq_s16 (int16x8_t __a)
3215 {
3216   return __builtin_mve_vmovlbq_sv8hi (__a);
3217 }
3218 
3219 __extension__ extern __inline int16x8_t
3220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_s8(int8x16_t __a)3221 __arm_vmovltq_s8 (int8x16_t __a)
3222 {
3223   return __builtin_mve_vmovltq_sv16qi (__a);
3224 }
3225 
3226 __extension__ extern __inline int32x4_t
3227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_s16(int16x8_t __a)3228 __arm_vmovltq_s16 (int16x8_t __a)
3229 {
3230   return __builtin_mve_vmovltq_sv8hi (__a);
3231 }
3232 
3233 __extension__ extern __inline int8x16_t
3234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_s8(int8x16_t __a)3235 __arm_vmvnq_s8 (int8x16_t __a)
3236 {
3237   return __builtin_mve_vmvnq_sv16qi (__a);
3238 }
3239 
3240 __extension__ extern __inline int16x8_t
3241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_s16(int16x8_t __a)3242 __arm_vmvnq_s16 (int16x8_t __a)
3243 {
3244   return __builtin_mve_vmvnq_sv8hi (__a);
3245 }
3246 
3247 __extension__ extern __inline int32x4_t
3248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_s32(int32x4_t __a)3249 __arm_vmvnq_s32 (int32x4_t __a)
3250 {
3251   return __builtin_mve_vmvnq_sv4si (__a);
3252 }
3253 
3254 __extension__ extern __inline int16x8_t
3255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_n_s16(const int16_t __imm)3256 __arm_vmvnq_n_s16 (const int16_t __imm)
3257 {
3258   return __builtin_mve_vmvnq_n_sv8hi (__imm);
3259 }
3260 
3261 __extension__ extern __inline int32x4_t
3262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_n_s32(const int32_t __imm)3263 __arm_vmvnq_n_s32 (const int32_t __imm)
3264 {
3265   return __builtin_mve_vmvnq_n_sv4si (__imm);
3266 }
3267 
3268 __extension__ extern __inline int8x16_t
3269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_s8(int8x16_t __a)3270 __arm_vrev16q_s8 (int8x16_t __a)
3271 {
3272   return __builtin_mve_vrev16q_sv16qi (__a);
3273 }
3274 
3275 __extension__ extern __inline int8x16_t
3276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_s8(int8x16_t __a)3277 __arm_vrev32q_s8 (int8x16_t __a)
3278 {
3279   return __builtin_mve_vrev32q_sv16qi (__a);
3280 }
3281 
3282 __extension__ extern __inline int16x8_t
3283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_s16(int16x8_t __a)3284 __arm_vrev32q_s16 (int16x8_t __a)
3285 {
3286   return __builtin_mve_vrev32q_sv8hi (__a);
3287 }
3288 
3289 __extension__ extern __inline int8x16_t
3290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_s8(int8x16_t __a)3291 __arm_vrev64q_s8 (int8x16_t __a)
3292 {
3293   return __builtin_mve_vrev64q_sv16qi (__a);
3294 }
3295 
3296 __extension__ extern __inline int16x8_t
3297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_s16(int16x8_t __a)3298 __arm_vrev64q_s16 (int16x8_t __a)
3299 {
3300   return __builtin_mve_vrev64q_sv8hi (__a);
3301 }
3302 
3303 __extension__ extern __inline int32x4_t
3304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_s32(int32x4_t __a)3305 __arm_vrev64q_s32 (int32x4_t __a)
3306 {
3307   return __builtin_mve_vrev64q_sv4si (__a);
3308 }
3309 
3310 __extension__ extern __inline int8x16_t
3311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq_s8(int8x16_t __a)3312 __arm_vqabsq_s8 (int8x16_t __a)
3313 {
3314   return __builtin_mve_vqabsq_sv16qi (__a);
3315 }
3316 
3317 __extension__ extern __inline int16x8_t
3318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq_s16(int16x8_t __a)3319 __arm_vqabsq_s16 (int16x8_t __a)
3320 {
3321   return __builtin_mve_vqabsq_sv8hi (__a);
3322 }
3323 
3324 __extension__ extern __inline int32x4_t
3325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq_s32(int32x4_t __a)3326 __arm_vqabsq_s32 (int32x4_t __a)
3327 {
3328   return __builtin_mve_vqabsq_sv4si (__a);
3329 }
3330 
3331 __extension__ extern __inline int8x16_t
3332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_s8(int8x16_t __a)3333 __arm_vqnegq_s8 (int8x16_t __a)
3334 {
3335   return __builtin_mve_vqnegq_sv16qi (__a);
3336 }
3337 
3338 __extension__ extern __inline int16x8_t
3339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_s16(int16x8_t __a)3340 __arm_vqnegq_s16 (int16x8_t __a)
3341 {
3342   return __builtin_mve_vqnegq_sv8hi (__a);
3343 }
3344 
3345 __extension__ extern __inline int32x4_t
3346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_s32(int32x4_t __a)3347 __arm_vqnegq_s32 (int32x4_t __a)
3348 {
3349   return __builtin_mve_vqnegq_sv4si (__a);
3350 }
3351 
3352 __extension__ extern __inline uint8x16_t
3353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_u8(uint8x16_t __a)3354 __arm_vrev64q_u8 (uint8x16_t __a)
3355 {
3356   return __builtin_mve_vrev64q_uv16qi (__a);
3357 }
3358 
3359 __extension__ extern __inline uint16x8_t
3360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_u16(uint16x8_t __a)3361 __arm_vrev64q_u16 (uint16x8_t __a)
3362 {
3363   return __builtin_mve_vrev64q_uv8hi (__a);
3364 }
3365 
3366 __extension__ extern __inline uint32x4_t
3367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_u32(uint32x4_t __a)3368 __arm_vrev64q_u32 (uint32x4_t __a)
3369 {
3370   return __builtin_mve_vrev64q_uv4si (__a);
3371 }
3372 
3373 __extension__ extern __inline uint8x16_t
3374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_u8(uint8x16_t __a)3375 __arm_vmvnq_u8 (uint8x16_t __a)
3376 {
3377   return __builtin_mve_vmvnq_uv16qi (__a);
3378 }
3379 
3380 __extension__ extern __inline uint16x8_t
3381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_u16(uint16x8_t __a)3382 __arm_vmvnq_u16 (uint16x8_t __a)
3383 {
3384   return __builtin_mve_vmvnq_uv8hi (__a);
3385 }
3386 
3387 __extension__ extern __inline uint32x4_t
3388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_u32(uint32x4_t __a)3389 __arm_vmvnq_u32 (uint32x4_t __a)
3390 {
3391   return __builtin_mve_vmvnq_uv4si (__a);
3392 }
3393 
3394 __extension__ extern __inline uint8x16_t
3395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n_u8(uint8_t __a)3396 __arm_vdupq_n_u8 (uint8_t __a)
3397 {
3398   return __builtin_mve_vdupq_n_uv16qi (__a);
3399 }
3400 
3401 __extension__ extern __inline uint16x8_t
3402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n_u16(uint16_t __a)3403 __arm_vdupq_n_u16 (uint16_t __a)
3404 {
3405   return __builtin_mve_vdupq_n_uv8hi (__a);
3406 }
3407 
3408 __extension__ extern __inline uint32x4_t
3409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n_u32(uint32_t __a)3410 __arm_vdupq_n_u32 (uint32_t __a)
3411 {
3412   return __builtin_mve_vdupq_n_uv4si (__a);
3413 }
3414 
3415 __extension__ extern __inline uint8x16_t
3416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_u8(uint8x16_t __a)3417 __arm_vclzq_u8 (uint8x16_t __a)
3418 {
3419   return __builtin_mve_vclzq_uv16qi (__a);
3420 }
3421 
3422 __extension__ extern __inline uint16x8_t
3423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_u16(uint16x8_t __a)3424 __arm_vclzq_u16 (uint16x8_t __a)
3425 {
3426   return __builtin_mve_vclzq_uv8hi (__a);
3427 }
3428 
3429 __extension__ extern __inline uint32x4_t
3430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_u32(uint32x4_t __a)3431 __arm_vclzq_u32 (uint32x4_t __a)
3432 {
3433   return __builtin_mve_vclzq_uv4si (__a);
3434 }
3435 
3436 __extension__ extern __inline uint32_t
3437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_u8(uint8x16_t __a)3438 __arm_vaddvq_u8 (uint8x16_t __a)
3439 {
3440   return __builtin_mve_vaddvq_uv16qi (__a);
3441 }
3442 
3443 __extension__ extern __inline uint32_t
3444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_u16(uint16x8_t __a)3445 __arm_vaddvq_u16 (uint16x8_t __a)
3446 {
3447   return __builtin_mve_vaddvq_uv8hi (__a);
3448 }
3449 
3450 __extension__ extern __inline uint32_t
3451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_u32(uint32x4_t __a)3452 __arm_vaddvq_u32 (uint32x4_t __a)
3453 {
3454   return __builtin_mve_vaddvq_uv4si (__a);
3455 }
3456 
3457 __extension__ extern __inline uint8x16_t
3458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_u8(uint8x16_t __a)3459 __arm_vrev32q_u8 (uint8x16_t __a)
3460 {
3461   return __builtin_mve_vrev32q_uv16qi (__a);
3462 }
3463 
3464 __extension__ extern __inline uint16x8_t
3465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_u16(uint16x8_t __a)3466 __arm_vrev32q_u16 (uint16x8_t __a)
3467 {
3468   return __builtin_mve_vrev32q_uv8hi (__a);
3469 }
3470 
3471 __extension__ extern __inline uint16x8_t
3472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_u8(uint8x16_t __a)3473 __arm_vmovltq_u8 (uint8x16_t __a)
3474 {
3475   return __builtin_mve_vmovltq_uv16qi (__a);
3476 }
3477 
3478 __extension__ extern __inline uint32x4_t
3479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_u16(uint16x8_t __a)3480 __arm_vmovltq_u16 (uint16x8_t __a)
3481 {
3482   return __builtin_mve_vmovltq_uv8hi (__a);
3483 }
3484 
3485 __extension__ extern __inline uint16x8_t
3486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_u8(uint8x16_t __a)3487 __arm_vmovlbq_u8 (uint8x16_t __a)
3488 {
3489   return __builtin_mve_vmovlbq_uv16qi (__a);
3490 }
3491 
3492 __extension__ extern __inline uint32x4_t
3493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_u16(uint16x8_t __a)3494 __arm_vmovlbq_u16 (uint16x8_t __a)
3495 {
3496   return __builtin_mve_vmovlbq_uv8hi (__a);
3497 }
3498 
3499 __extension__ extern __inline uint16x8_t
3500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_n_u16(const int __imm)3501 __arm_vmvnq_n_u16 (const int __imm)
3502 {
3503   return __builtin_mve_vmvnq_n_uv8hi (__imm);
3504 }
3505 
3506 __extension__ extern __inline uint32x4_t
3507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_n_u32(const int __imm)3508 __arm_vmvnq_n_u32 (const int __imm)
3509 {
3510   return __builtin_mve_vmvnq_n_uv4si (__imm);
3511 }
3512 
3513 __extension__ extern __inline uint8x16_t
3514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_u8(uint8x16_t __a)3515 __arm_vrev16q_u8 (uint8x16_t __a)
3516 {
3517   return __builtin_mve_vrev16q_uv16qi (__a);
3518 }
3519 
3520 __extension__ extern __inline uint64_t
3521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvq_u32(uint32x4_t __a)3522 __arm_vaddlvq_u32 (uint32x4_t __a)
3523 {
3524   return __builtin_mve_vaddlvq_uv4si (__a);
3525 }
3526 
3527 __extension__ extern __inline mve_pred16_t
3528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vctp16q(uint32_t __a)3529 __arm_vctp16q (uint32_t __a)
3530 {
3531   return __builtin_mve_vctp16qhi (__a);
3532 }
3533 
3534 __extension__ extern __inline mve_pred16_t
3535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vctp32q(uint32_t __a)3536 __arm_vctp32q (uint32_t __a)
3537 {
3538   return __builtin_mve_vctp32qhi (__a);
3539 }
3540 
3541 __extension__ extern __inline mve_pred16_t
3542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vctp64q(uint32_t __a)3543 __arm_vctp64q (uint32_t __a)
3544 {
3545   return __builtin_mve_vctp64qhi (__a);
3546 }
3547 
3548 __extension__ extern __inline mve_pred16_t
3549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vctp8q(uint32_t __a)3550 __arm_vctp8q (uint32_t __a)
3551 {
3552   return __builtin_mve_vctp8qhi (__a);
3553 }
3554 
3555 __extension__ extern __inline mve_pred16_t
3556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpnot(mve_pred16_t __a)3557 __arm_vpnot (mve_pred16_t __a)
3558 {
3559   return __builtin_mve_vpnothi (__a);
3560 }
3561 
3562 __extension__ extern __inline uint8x16_t
3563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_u8(uint64_t __a,uint64_t __b)3564 __arm_vcreateq_u8 (uint64_t __a, uint64_t __b)
3565 {
3566   return __builtin_mve_vcreateq_uv16qi (__a, __b);
3567 }
3568 
3569 __extension__ extern __inline uint16x8_t
3570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_u16(uint64_t __a,uint64_t __b)3571 __arm_vcreateq_u16 (uint64_t __a, uint64_t __b)
3572 {
3573   return __builtin_mve_vcreateq_uv8hi (__a, __b);
3574 }
3575 
3576 __extension__ extern __inline uint32x4_t
3577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_u32(uint64_t __a,uint64_t __b)3578 __arm_vcreateq_u32 (uint64_t __a, uint64_t __b)
3579 {
3580   return __builtin_mve_vcreateq_uv4si (__a, __b);
3581 }
3582 
3583 __extension__ extern __inline uint64x2_t
3584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_u64(uint64_t __a,uint64_t __b)3585 __arm_vcreateq_u64 (uint64_t __a, uint64_t __b)
3586 {
3587   return __builtin_mve_vcreateq_uv2di (__a, __b);
3588 }
3589 
3590 __extension__ extern __inline int8x16_t
3591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_s8(uint64_t __a,uint64_t __b)3592 __arm_vcreateq_s8 (uint64_t __a, uint64_t __b)
3593 {
3594   return __builtin_mve_vcreateq_sv16qi (__a, __b);
3595 }
3596 
3597 __extension__ extern __inline int16x8_t
3598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_s16(uint64_t __a,uint64_t __b)3599 __arm_vcreateq_s16 (uint64_t __a, uint64_t __b)
3600 {
3601   return __builtin_mve_vcreateq_sv8hi (__a, __b);
3602 }
3603 
3604 __extension__ extern __inline int32x4_t
3605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_s32(uint64_t __a,uint64_t __b)3606 __arm_vcreateq_s32 (uint64_t __a, uint64_t __b)
3607 {
3608   return __builtin_mve_vcreateq_sv4si (__a, __b);
3609 }
3610 
3611 __extension__ extern __inline int64x2_t
3612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_s64(uint64_t __a,uint64_t __b)3613 __arm_vcreateq_s64 (uint64_t __a, uint64_t __b)
3614 {
3615   return __builtin_mve_vcreateq_sv2di (__a, __b);
3616 }
3617 
3618 __extension__ extern __inline int8x16_t
3619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_n_s8(int8x16_t __a,const int __imm)3620 __arm_vshrq_n_s8 (int8x16_t __a, const int __imm)
3621 {
3622   return __builtin_mve_vshrq_n_sv16qi (__a, __imm);
3623 }
3624 
3625 __extension__ extern __inline int16x8_t
3626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_n_s16(int16x8_t __a,const int __imm)3627 __arm_vshrq_n_s16 (int16x8_t __a, const int __imm)
3628 {
3629   return __builtin_mve_vshrq_n_sv8hi (__a, __imm);
3630 }
3631 
3632 __extension__ extern __inline int32x4_t
3633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_n_s32(int32x4_t __a,const int __imm)3634 __arm_vshrq_n_s32 (int32x4_t __a, const int __imm)
3635 {
3636   return __builtin_mve_vshrq_n_sv4si (__a, __imm);
3637 }
3638 
3639 __extension__ extern __inline uint8x16_t
3640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_n_u8(uint8x16_t __a,const int __imm)3641 __arm_vshrq_n_u8 (uint8x16_t __a, const int __imm)
3642 {
3643   return __builtin_mve_vshrq_n_uv16qi (__a, __imm);
3644 }
3645 
3646 __extension__ extern __inline uint16x8_t
3647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_n_u16(uint16x8_t __a,const int __imm)3648 __arm_vshrq_n_u16 (uint16x8_t __a, const int __imm)
3649 {
3650   return __builtin_mve_vshrq_n_uv8hi (__a, __imm);
3651 }
3652 
3653 __extension__ extern __inline uint32x4_t
3654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_n_u32(uint32x4_t __a,const int __imm)3655 __arm_vshrq_n_u32 (uint32x4_t __a, const int __imm)
3656 {
3657   return __builtin_mve_vshrq_n_uv4si (__a, __imm);
3658 }
3659 __extension__ extern __inline int64_t
3660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvq_p_s32(int32x4_t __a,mve_pred16_t __p)3661 __arm_vaddlvq_p_s32 (int32x4_t __a, mve_pred16_t __p)
3662 {
3663   return __builtin_mve_vaddlvq_p_sv4si (__a, __p);
3664 }
3665 
3666 __extension__ extern __inline uint64_t
3667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvq_p_u32(uint32x4_t __a,mve_pred16_t __p)3668 __arm_vaddlvq_p_u32 (uint32x4_t __a, mve_pred16_t __p)
3669 {
3670   return __builtin_mve_vaddlvq_p_uv4si (__a, __p);
3671 }
3672 
3673 __extension__ extern __inline mve_pred16_t
3674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_s8(int8x16_t __a,int8x16_t __b)3675 __arm_vcmpneq_s8 (int8x16_t __a, int8x16_t __b)
3676 {
3677   return __builtin_mve_vcmpneq_sv16qi (__a, __b);
3678 }
3679 
3680 __extension__ extern __inline mve_pred16_t
3681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_s16(int16x8_t __a,int16x8_t __b)3682 __arm_vcmpneq_s16 (int16x8_t __a, int16x8_t __b)
3683 {
3684   return __builtin_mve_vcmpneq_sv8hi (__a, __b);
3685 }
3686 
3687 __extension__ extern __inline mve_pred16_t
3688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_s32(int32x4_t __a,int32x4_t __b)3689 __arm_vcmpneq_s32 (int32x4_t __a, int32x4_t __b)
3690 {
3691   return __builtin_mve_vcmpneq_sv4si (__a, __b);
3692 }
3693 
3694 __extension__ extern __inline mve_pred16_t
3695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_u8(uint8x16_t __a,uint8x16_t __b)3696 __arm_vcmpneq_u8 (uint8x16_t __a, uint8x16_t __b)
3697 {
3698   return __builtin_mve_vcmpneq_uv16qi (__a, __b);
3699 }
3700 
3701 __extension__ extern __inline mve_pred16_t
3702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_u16(uint16x8_t __a,uint16x8_t __b)3703 __arm_vcmpneq_u16 (uint16x8_t __a, uint16x8_t __b)
3704 {
3705   return __builtin_mve_vcmpneq_uv8hi (__a, __b);
3706 }
3707 
3708 __extension__ extern __inline mve_pred16_t
3709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_u32(uint32x4_t __a,uint32x4_t __b)3710 __arm_vcmpneq_u32 (uint32x4_t __a, uint32x4_t __b)
3711 {
3712   return __builtin_mve_vcmpneq_uv4si (__a, __b);
3713 }
3714 
3715 __extension__ extern __inline int8x16_t
3716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_s8(int8x16_t __a,int8x16_t __b)3717 __arm_vshlq_s8 (int8x16_t __a, int8x16_t __b)
3718 {
3719   return __builtin_mve_vshlq_sv16qi (__a, __b);
3720 }
3721 
3722 __extension__ extern __inline int16x8_t
3723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_s16(int16x8_t __a,int16x8_t __b)3724 __arm_vshlq_s16 (int16x8_t __a, int16x8_t __b)
3725 {
3726   return __builtin_mve_vshlq_sv8hi (__a, __b);
3727 }
3728 
3729 __extension__ extern __inline int32x4_t
3730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_s32(int32x4_t __a,int32x4_t __b)3731 __arm_vshlq_s32 (int32x4_t __a, int32x4_t __b)
3732 {
3733   return __builtin_mve_vshlq_sv4si (__a, __b);
3734 }
3735 
3736 __extension__ extern __inline uint8x16_t
3737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_u8(uint8x16_t __a,int8x16_t __b)3738 __arm_vshlq_u8 (uint8x16_t __a, int8x16_t __b)
3739 {
3740   return __builtin_mve_vshlq_uv16qi (__a, __b);
3741 }
3742 
3743 __extension__ extern __inline uint16x8_t
3744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_u16(uint16x8_t __a,int16x8_t __b)3745 __arm_vshlq_u16 (uint16x8_t __a, int16x8_t __b)
3746 {
3747   return __builtin_mve_vshlq_uv8hi (__a, __b);
3748 }
3749 
3750 __extension__ extern __inline uint32x4_t
3751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_u32(uint32x4_t __a,int32x4_t __b)3752 __arm_vshlq_u32 (uint32x4_t __a, int32x4_t __b)
3753 {
3754   return __builtin_mve_vshlq_uv4si (__a, __b);
3755 }
3756 __extension__ extern __inline uint8x16_t
3757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_u8(uint8x16_t __a,uint8x16_t __b)3758 __arm_vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
3759 {
3760   return __builtin_mve_vsubq_uv16qi (__a, __b);
3761 }
3762 
3763 __extension__ extern __inline uint8x16_t
3764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_n_u8(uint8x16_t __a,uint8_t __b)3765 __arm_vsubq_n_u8 (uint8x16_t __a, uint8_t __b)
3766 {
3767   return __builtin_mve_vsubq_n_uv16qi (__a, __b);
3768 }
3769 
3770 __extension__ extern __inline uint8x16_t
3771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_u8(uint8x16_t __a,uint8x16_t __b)3772 __arm_vrmulhq_u8 (uint8x16_t __a, uint8x16_t __b)
3773 {
3774   return __builtin_mve_vrmulhq_uv16qi (__a, __b);
3775 }
3776 
3777 __extension__ extern __inline uint8x16_t
3778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_u8(uint8x16_t __a,uint8x16_t __b)3779 __arm_vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
3780 {
3781   return __builtin_mve_vrhaddq_uv16qi (__a, __b);
3782 }
3783 
3784 __extension__ extern __inline uint8x16_t
3785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_u8(uint8x16_t __a,uint8x16_t __b)3786 __arm_vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
3787 {
3788   return __builtin_mve_vqsubq_uv16qi (__a, __b);
3789 }
3790 
3791 __extension__ extern __inline uint8x16_t
3792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_n_u8(uint8x16_t __a,uint8_t __b)3793 __arm_vqsubq_n_u8 (uint8x16_t __a, uint8_t __b)
3794 {
3795   return __builtin_mve_vqsubq_n_uv16qi (__a, __b);
3796 }
3797 
3798 __extension__ extern __inline uint8x16_t
3799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_u8(uint8x16_t __a,uint8x16_t __b)3800 __arm_vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
3801 {
3802   return __builtin_mve_vqaddq_uv16qi (__a, __b);
3803 }
3804 
3805 __extension__ extern __inline uint8x16_t
3806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_n_u8(uint8x16_t __a,uint8_t __b)3807 __arm_vqaddq_n_u8 (uint8x16_t __a, uint8_t __b)
3808 {
3809   return __builtin_mve_vqaddq_n_uv16qi (__a, __b);
3810 }
3811 
3812 __extension__ extern __inline uint8x16_t
3813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_u8(uint8x16_t __a,uint8x16_t __b)3814 __arm_vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
3815 {
3816   return __builtin_mve_vorrq_uv16qi (__a, __b);
3817 }
3818 
3819 __extension__ extern __inline uint8x16_t
3820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_u8(uint8x16_t __a,uint8x16_t __b)3821 __arm_vornq_u8 (uint8x16_t __a, uint8x16_t __b)
3822 {
3823   return __builtin_mve_vornq_uv16qi (__a, __b);
3824 }
3825 
3826 __extension__ extern __inline uint8x16_t
3827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_u8(uint8x16_t __a,uint8x16_t __b)3828 __arm_vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
3829 {
3830   return __builtin_mve_vmulq_uv16qi (__a, __b);
3831 }
3832 
3833 __extension__ extern __inline uint8x16_t
3834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_n_u8(uint8x16_t __a,uint8_t __b)3835 __arm_vmulq_n_u8 (uint8x16_t __a, uint8_t __b)
3836 {
3837   return __builtin_mve_vmulq_n_uv16qi (__a, __b);
3838 }
3839 
3840 __extension__ extern __inline uint16x8_t
3841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_u8(uint8x16_t __a,uint8x16_t __b)3842 __arm_vmulltq_int_u8 (uint8x16_t __a, uint8x16_t __b)
3843 {
3844   return __builtin_mve_vmulltq_int_uv16qi (__a, __b);
3845 }
3846 
3847 __extension__ extern __inline uint16x8_t
3848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_u8(uint8x16_t __a,uint8x16_t __b)3849 __arm_vmullbq_int_u8 (uint8x16_t __a, uint8x16_t __b)
3850 {
3851   return __builtin_mve_vmullbq_int_uv16qi (__a, __b);
3852 }
3853 
3854 __extension__ extern __inline uint8x16_t
3855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_u8(uint8x16_t __a,uint8x16_t __b)3856 __arm_vmulhq_u8 (uint8x16_t __a, uint8x16_t __b)
3857 {
3858   return __builtin_mve_vmulhq_uv16qi (__a, __b);
3859 }
3860 
3861 __extension__ extern __inline uint32_t
3862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_u8(uint8x16_t __a,uint8x16_t __b)3863 __arm_vmladavq_u8 (uint8x16_t __a, uint8x16_t __b)
3864 {
3865   return __builtin_mve_vmladavq_uv16qi (__a, __b);
3866 }
3867 
3868 __extension__ extern __inline uint8_t
3869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_u8(uint8_t __a,uint8x16_t __b)3870 __arm_vminvq_u8 (uint8_t __a, uint8x16_t __b)
3871 {
3872   return __builtin_mve_vminvq_uv16qi (__a, __b);
3873 }
3874 
3875 __extension__ extern __inline uint8x16_t
3876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_u8(uint8x16_t __a,uint8x16_t __b)3877 __arm_vminq_u8 (uint8x16_t __a, uint8x16_t __b)
3878 {
3879   return __builtin_mve_vminq_uv16qi (__a, __b);
3880 }
3881 
3882 __extension__ extern __inline uint8_t
3883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_u8(uint8_t __a,uint8x16_t __b)3884 __arm_vmaxvq_u8 (uint8_t __a, uint8x16_t __b)
3885 {
3886   return __builtin_mve_vmaxvq_uv16qi (__a, __b);
3887 }
3888 
3889 __extension__ extern __inline uint8x16_t
3890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_u8(uint8x16_t __a,uint8x16_t __b)3891 __arm_vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
3892 {
3893   return __builtin_mve_vmaxq_uv16qi (__a, __b);
3894 }
3895 
3896 __extension__ extern __inline uint8x16_t
3897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_u8(uint8x16_t __a,uint8x16_t __b)3898 __arm_vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
3899 {
3900   return __builtin_mve_vhsubq_uv16qi (__a, __b);
3901 }
3902 
3903 __extension__ extern __inline uint8x16_t
3904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_n_u8(uint8x16_t __a,uint8_t __b)3905 __arm_vhsubq_n_u8 (uint8x16_t __a, uint8_t __b)
3906 {
3907   return __builtin_mve_vhsubq_n_uv16qi (__a, __b);
3908 }
3909 
3910 __extension__ extern __inline uint8x16_t
3911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_u8(uint8x16_t __a,uint8x16_t __b)3912 __arm_vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
3913 {
3914   return __builtin_mve_vhaddq_uv16qi (__a, __b);
3915 }
3916 
3917 __extension__ extern __inline uint8x16_t
3918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_n_u8(uint8x16_t __a,uint8_t __b)3919 __arm_vhaddq_n_u8 (uint8x16_t __a, uint8_t __b)
3920 {
3921   return __builtin_mve_vhaddq_n_uv16qi (__a, __b);
3922 }
3923 
3924 __extension__ extern __inline uint8x16_t
3925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_u8(uint8x16_t __a,uint8x16_t __b)3926 __arm_veorq_u8 (uint8x16_t __a, uint8x16_t __b)
3927 {
3928   return __builtin_mve_veorq_uv16qi (__a, __b);
3929 }
3930 
3931 __extension__ extern __inline mve_pred16_t
3932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_n_u8(uint8x16_t __a,uint8_t __b)3933 __arm_vcmpneq_n_u8 (uint8x16_t __a, uint8_t __b)
3934 {
3935   return __builtin_mve_vcmpneq_n_uv16qi (__a, __b);
3936 }
3937 
3938 __extension__ extern __inline mve_pred16_t
3939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_u8(uint8x16_t __a,uint8x16_t __b)3940 __arm_vcmphiq_u8 (uint8x16_t __a, uint8x16_t __b)
3941 {
3942   return __builtin_mve_vcmphiq_uv16qi (__a, __b);
3943 }
3944 
3945 __extension__ extern __inline mve_pred16_t
3946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_n_u8(uint8x16_t __a,uint8_t __b)3947 __arm_vcmphiq_n_u8 (uint8x16_t __a, uint8_t __b)
3948 {
3949   return __builtin_mve_vcmphiq_n_uv16qi (__a, __b);
3950 }
3951 
3952 __extension__ extern __inline mve_pred16_t
3953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_u8(uint8x16_t __a,uint8x16_t __b)3954 __arm_vcmpeqq_u8 (uint8x16_t __a, uint8x16_t __b)
3955 {
3956   return __builtin_mve_vcmpeqq_uv16qi (__a, __b);
3957 }
3958 
3959 __extension__ extern __inline mve_pred16_t
3960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_n_u8(uint8x16_t __a,uint8_t __b)3961 __arm_vcmpeqq_n_u8 (uint8x16_t __a, uint8_t __b)
3962 {
3963   return __builtin_mve_vcmpeqq_n_uv16qi (__a, __b);
3964 }
3965 
3966 __extension__ extern __inline mve_pred16_t
3967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_u8(uint8x16_t __a,uint8x16_t __b)3968 __arm_vcmpcsq_u8 (uint8x16_t __a, uint8x16_t __b)
3969 {
3970   return __builtin_mve_vcmpcsq_uv16qi (__a, __b);
3971 }
3972 
3973 __extension__ extern __inline mve_pred16_t
3974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_n_u8(uint8x16_t __a,uint8_t __b)3975 __arm_vcmpcsq_n_u8 (uint8x16_t __a, uint8_t __b)
3976 {
3977   return __builtin_mve_vcmpcsq_n_uv16qi (__a, __b);
3978 }
3979 
3980 __extension__ extern __inline uint8x16_t
3981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_u8(uint8x16_t __a,uint8x16_t __b)3982 __arm_vcaddq_rot90_u8 (uint8x16_t __a, uint8x16_t __b)
3983 {
3984   return __builtin_mve_vcaddq_rot90_uv16qi (__a, __b);
3985 }
3986 
3987 __extension__ extern __inline uint8x16_t
3988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_u8(uint8x16_t __a,uint8x16_t __b)3989 __arm_vcaddq_rot270_u8 (uint8x16_t __a, uint8x16_t __b)
3990 {
3991   return __builtin_mve_vcaddq_rot270_uv16qi (__a, __b);
3992 }
3993 
3994 __extension__ extern __inline uint8x16_t
3995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_u8(uint8x16_t __a,uint8x16_t __b)3996 __arm_vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
3997 {
3998   return __builtin_mve_vbicq_uv16qi (__a, __b);
3999 }
4000 
4001 __extension__ extern __inline uint8x16_t
4002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_u8(uint8x16_t __a,uint8x16_t __b)4003 __arm_vandq_u8 (uint8x16_t __a, uint8x16_t __b)
4004 {
4005   return __builtin_mve_vandq_uv16qi (__a, __b);
4006 }
4007 
4008 __extension__ extern __inline uint32_t
4009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p_u8(uint8x16_t __a,mve_pred16_t __p)4010 __arm_vaddvq_p_u8 (uint8x16_t __a, mve_pred16_t __p)
4011 {
4012   return __builtin_mve_vaddvq_p_uv16qi (__a, __p);
4013 }
4014 
4015 __extension__ extern __inline uint32_t
4016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_u8(uint32_t __a,uint8x16_t __b)4017 __arm_vaddvaq_u8 (uint32_t __a, uint8x16_t __b)
4018 {
4019   return __builtin_mve_vaddvaq_uv16qi (__a, __b);
4020 }
4021 
4022 __extension__ extern __inline uint8x16_t
4023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_n_u8(uint8x16_t __a,uint8_t __b)4024 __arm_vaddq_n_u8 (uint8x16_t __a, uint8_t __b)
4025 {
4026   return __builtin_mve_vaddq_n_uv16qi (__a, __b);
4027 }
4028 
4029 __extension__ extern __inline uint8x16_t
4030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_u8(uint8x16_t __a,uint8x16_t __b)4031 __arm_vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
4032 {
4033   return __builtin_mve_vabdq_uv16qi (__a, __b);
4034 }
4035 
4036 __extension__ extern __inline uint8x16_t
4037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r_u8(uint8x16_t __a,int32_t __b)4038 __arm_vshlq_r_u8 (uint8x16_t __a, int32_t __b)
4039 {
4040   return __builtin_mve_vshlq_r_uv16qi (__a, __b);
4041 }
4042 
4043 __extension__ extern __inline uint8x16_t
4044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_u8(uint8x16_t __a,int8x16_t __b)4045 __arm_vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
4046 {
4047   return __builtin_mve_vrshlq_uv16qi (__a, __b);
4048 }
4049 
4050 __extension__ extern __inline uint8x16_t
4051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_n_u8(uint8x16_t __a,int32_t __b)4052 __arm_vrshlq_n_u8 (uint8x16_t __a, int32_t __b)
4053 {
4054   return __builtin_mve_vrshlq_n_uv16qi (__a, __b);
4055 }
4056 
4057 __extension__ extern __inline uint8x16_t
4058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_u8(uint8x16_t __a,int8x16_t __b)4059 __arm_vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
4060 {
4061   return __builtin_mve_vqshlq_uv16qi (__a, __b);
4062 }
4063 
4064 __extension__ extern __inline uint8x16_t
4065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r_u8(uint8x16_t __a,int32_t __b)4066 __arm_vqshlq_r_u8 (uint8x16_t __a, int32_t __b)
4067 {
4068   return __builtin_mve_vqshlq_r_uv16qi (__a, __b);
4069 }
4070 
4071 __extension__ extern __inline uint8x16_t
4072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_u8(uint8x16_t __a,int8x16_t __b)4073 __arm_vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
4074 {
4075   return __builtin_mve_vqrshlq_uv16qi (__a, __b);
4076 }
4077 
4078 __extension__ extern __inline uint8x16_t
4079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_n_u8(uint8x16_t __a,int32_t __b)4080 __arm_vqrshlq_n_u8 (uint8x16_t __a, int32_t __b)
4081 {
4082   return __builtin_mve_vqrshlq_n_uv16qi (__a, __b);
4083 }
4084 
4085 __extension__ extern __inline uint8_t
4086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_s8(uint8_t __a,int8x16_t __b)4087 __arm_vminavq_s8 (uint8_t __a, int8x16_t __b)
4088 {
4089   return __builtin_mve_vminavq_sv16qi (__a, __b);
4090 }
4091 
4092 __extension__ extern __inline uint8x16_t
4093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq_s8(uint8x16_t __a,int8x16_t __b)4094 __arm_vminaq_s8 (uint8x16_t __a, int8x16_t __b)
4095 {
4096   return __builtin_mve_vminaq_sv16qi (__a, __b);
4097 }
4098 
4099 __extension__ extern __inline uint8_t
4100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq_s8(uint8_t __a,int8x16_t __b)4101 __arm_vmaxavq_s8 (uint8_t __a, int8x16_t __b)
4102 {
4103   return __builtin_mve_vmaxavq_sv16qi (__a, __b);
4104 }
4105 
4106 __extension__ extern __inline uint8x16_t
4107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq_s8(uint8x16_t __a,int8x16_t __b)4108 __arm_vmaxaq_s8 (uint8x16_t __a, int8x16_t __b)
4109 {
4110   return __builtin_mve_vmaxaq_sv16qi (__a, __b);
4111 }
4112 
4113 __extension__ extern __inline uint8x16_t
4114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_n_u8(uint8x16_t __a,int32_t __b)4115 __arm_vbrsrq_n_u8 (uint8x16_t __a, int32_t __b)
4116 {
4117   return __builtin_mve_vbrsrq_n_uv16qi (__a, __b);
4118 }
4119 
4120 __extension__ extern __inline uint8x16_t
4121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n_u8(uint8x16_t __a,const int __imm)4122 __arm_vshlq_n_u8 (uint8x16_t __a, const int __imm)
4123 {
4124   return __builtin_mve_vshlq_n_uv16qi (__a, __imm);
4125 }
4126 
4127 __extension__ extern __inline uint8x16_t
4128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_n_u8(uint8x16_t __a,const int __imm)4129 __arm_vrshrq_n_u8 (uint8x16_t __a, const int __imm)
4130 {
4131   return __builtin_mve_vrshrq_n_uv16qi (__a, __imm);
4132 }
4133 
4134 __extension__ extern __inline uint8x16_t
4135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n_u8(uint8x16_t __a,const int __imm)4136 __arm_vqshlq_n_u8 (uint8x16_t __a, const int __imm)
4137 {
4138   return __builtin_mve_vqshlq_n_uv16qi (__a, __imm);
4139 }
4140 
4141 __extension__ extern __inline mve_pred16_t
4142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_n_s8(int8x16_t __a,int8_t __b)4143 __arm_vcmpneq_n_s8 (int8x16_t __a, int8_t __b)
4144 {
4145   return __builtin_mve_vcmpneq_n_sv16qi (__a, __b);
4146 }
4147 
4148 __extension__ extern __inline mve_pred16_t
4149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_s8(int8x16_t __a,int8x16_t __b)4150 __arm_vcmpltq_s8 (int8x16_t __a, int8x16_t __b)
4151 {
4152   return __builtin_mve_vcmpltq_sv16qi (__a, __b);
4153 }
4154 
4155 __extension__ extern __inline mve_pred16_t
4156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_n_s8(int8x16_t __a,int8_t __b)4157 __arm_vcmpltq_n_s8 (int8x16_t __a, int8_t __b)
4158 {
4159   return __builtin_mve_vcmpltq_n_sv16qi (__a, __b);
4160 }
4161 
4162 __extension__ extern __inline mve_pred16_t
4163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_s8(int8x16_t __a,int8x16_t __b)4164 __arm_vcmpleq_s8 (int8x16_t __a, int8x16_t __b)
4165 {
4166   return __builtin_mve_vcmpleq_sv16qi (__a, __b);
4167 }
4168 
4169 __extension__ extern __inline mve_pred16_t
4170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_n_s8(int8x16_t __a,int8_t __b)4171 __arm_vcmpleq_n_s8 (int8x16_t __a, int8_t __b)
4172 {
4173   return __builtin_mve_vcmpleq_n_sv16qi (__a, __b);
4174 }
4175 
4176 __extension__ extern __inline mve_pred16_t
4177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_s8(int8x16_t __a,int8x16_t __b)4178 __arm_vcmpgtq_s8 (int8x16_t __a, int8x16_t __b)
4179 {
4180   return __builtin_mve_vcmpgtq_sv16qi (__a, __b);
4181 }
4182 
4183 __extension__ extern __inline mve_pred16_t
4184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_n_s8(int8x16_t __a,int8_t __b)4185 __arm_vcmpgtq_n_s8 (int8x16_t __a, int8_t __b)
4186 {
4187   return __builtin_mve_vcmpgtq_n_sv16qi (__a, __b);
4188 }
4189 
4190 __extension__ extern __inline mve_pred16_t
4191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_s8(int8x16_t __a,int8x16_t __b)4192 __arm_vcmpgeq_s8 (int8x16_t __a, int8x16_t __b)
4193 {
4194   return __builtin_mve_vcmpgeq_sv16qi (__a, __b);
4195 }
4196 
4197 __extension__ extern __inline mve_pred16_t
4198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_n_s8(int8x16_t __a,int8_t __b)4199 __arm_vcmpgeq_n_s8 (int8x16_t __a, int8_t __b)
4200 {
4201   return __builtin_mve_vcmpgeq_n_sv16qi (__a, __b);
4202 }
4203 
4204 __extension__ extern __inline mve_pred16_t
4205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_s8(int8x16_t __a,int8x16_t __b)4206 __arm_vcmpeqq_s8 (int8x16_t __a, int8x16_t __b)
4207 {
4208   return __builtin_mve_vcmpeqq_sv16qi (__a, __b);
4209 }
4210 
4211 __extension__ extern __inline mve_pred16_t
4212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_n_s8(int8x16_t __a,int8_t __b)4213 __arm_vcmpeqq_n_s8 (int8x16_t __a, int8_t __b)
4214 {
4215   return __builtin_mve_vcmpeqq_n_sv16qi (__a, __b);
4216 }
4217 
4218 __extension__ extern __inline uint8x16_t
4219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq_n_s8(int8x16_t __a,const int __imm)4220 __arm_vqshluq_n_s8 (int8x16_t __a, const int __imm)
4221 {
4222   return __builtin_mve_vqshluq_n_sv16qi (__a, __imm);
4223 }
4224 
4225 __extension__ extern __inline int32_t
4226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p_s8(int8x16_t __a,mve_pred16_t __p)4227 __arm_vaddvq_p_s8 (int8x16_t __a, mve_pred16_t __p)
4228 {
4229   return __builtin_mve_vaddvq_p_sv16qi (__a, __p);
4230 }
4231 
4232 __extension__ extern __inline int8x16_t
4233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_s8(int8x16_t __a,int8x16_t __b)4234 __arm_vsubq_s8 (int8x16_t __a, int8x16_t __b)
4235 {
4236   return __builtin_mve_vsubq_sv16qi (__a, __b);
4237 }
4238 
4239 __extension__ extern __inline int8x16_t
4240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_n_s8(int8x16_t __a,int8_t __b)4241 __arm_vsubq_n_s8 (int8x16_t __a, int8_t __b)
4242 {
4243   return __builtin_mve_vsubq_n_sv16qi (__a, __b);
4244 }
4245 
4246 __extension__ extern __inline int8x16_t
4247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r_s8(int8x16_t __a,int32_t __b)4248 __arm_vshlq_r_s8 (int8x16_t __a, int32_t __b)
4249 {
4250   return __builtin_mve_vshlq_r_sv16qi (__a, __b);
4251 }
4252 
4253 __extension__ extern __inline int8x16_t
4254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_s8(int8x16_t __a,int8x16_t __b)4255 __arm_vrshlq_s8 (int8x16_t __a, int8x16_t __b)
4256 {
4257   return __builtin_mve_vrshlq_sv16qi (__a, __b);
4258 }
4259 
4260 __extension__ extern __inline int8x16_t
4261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_n_s8(int8x16_t __a,int32_t __b)4262 __arm_vrshlq_n_s8 (int8x16_t __a, int32_t __b)
4263 {
4264   return __builtin_mve_vrshlq_n_sv16qi (__a, __b);
4265 }
4266 
4267 __extension__ extern __inline int8x16_t
4268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_s8(int8x16_t __a,int8x16_t __b)4269 __arm_vrmulhq_s8 (int8x16_t __a, int8x16_t __b)
4270 {
4271   return __builtin_mve_vrmulhq_sv16qi (__a, __b);
4272 }
4273 
4274 __extension__ extern __inline int8x16_t
4275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_s8(int8x16_t __a,int8x16_t __b)4276 __arm_vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
4277 {
4278   return __builtin_mve_vrhaddq_sv16qi (__a, __b);
4279 }
4280 
4281 __extension__ extern __inline int8x16_t
4282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_s8(int8x16_t __a,int8x16_t __b)4283 __arm_vqsubq_s8 (int8x16_t __a, int8x16_t __b)
4284 {
4285   return __builtin_mve_vqsubq_sv16qi (__a, __b);
4286 }
4287 
4288 __extension__ extern __inline int8x16_t
4289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_n_s8(int8x16_t __a,int8_t __b)4290 __arm_vqsubq_n_s8 (int8x16_t __a, int8_t __b)
4291 {
4292   return __builtin_mve_vqsubq_n_sv16qi (__a, __b);
4293 }
4294 
4295 __extension__ extern __inline int8x16_t
4296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_s8(int8x16_t __a,int8x16_t __b)4297 __arm_vqshlq_s8 (int8x16_t __a, int8x16_t __b)
4298 {
4299   return __builtin_mve_vqshlq_sv16qi (__a, __b);
4300 }
4301 
4302 __extension__ extern __inline int8x16_t
4303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r_s8(int8x16_t __a,int32_t __b)4304 __arm_vqshlq_r_s8 (int8x16_t __a, int32_t __b)
4305 {
4306   return __builtin_mve_vqshlq_r_sv16qi (__a, __b);
4307 }
4308 
4309 __extension__ extern __inline int8x16_t
4310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_s8(int8x16_t __a,int8x16_t __b)4311 __arm_vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
4312 {
4313   return __builtin_mve_vqrshlq_sv16qi (__a, __b);
4314 }
4315 
4316 __extension__ extern __inline int8x16_t
4317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_n_s8(int8x16_t __a,int32_t __b)4318 __arm_vqrshlq_n_s8 (int8x16_t __a, int32_t __b)
4319 {
4320   return __builtin_mve_vqrshlq_n_sv16qi (__a, __b);
4321 }
4322 
4323 __extension__ extern __inline int8x16_t
4324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_s8(int8x16_t __a,int8x16_t __b)4325 __arm_vqrdmulhq_s8 (int8x16_t __a, int8x16_t __b)
4326 {
4327   return __builtin_mve_vqrdmulhq_sv16qi (__a, __b);
4328 }
4329 
4330 __extension__ extern __inline int8x16_t
4331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_n_s8(int8x16_t __a,int8_t __b)4332 __arm_vqrdmulhq_n_s8 (int8x16_t __a, int8_t __b)
4333 {
4334   return __builtin_mve_vqrdmulhq_n_sv16qi (__a, __b);
4335 }
4336 
4337 __extension__ extern __inline int8x16_t
4338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_s8(int8x16_t __a,int8x16_t __b)4339 __arm_vqdmulhq_s8 (int8x16_t __a, int8x16_t __b)
4340 {
4341   return __builtin_mve_vqdmulhq_sv16qi (__a, __b);
4342 }
4343 
4344 __extension__ extern __inline int8x16_t
4345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_n_s8(int8x16_t __a,int8_t __b)4346 __arm_vqdmulhq_n_s8 (int8x16_t __a, int8_t __b)
4347 {
4348   return __builtin_mve_vqdmulhq_n_sv16qi (__a, __b);
4349 }
4350 
4351 __extension__ extern __inline int8x16_t
4352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_s8(int8x16_t __a,int8x16_t __b)4353 __arm_vqaddq_s8 (int8x16_t __a, int8x16_t __b)
4354 {
4355   return __builtin_mve_vqaddq_sv16qi (__a, __b);
4356 }
4357 
4358 __extension__ extern __inline int8x16_t
4359 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_n_s8(int8x16_t __a,int8_t __b)4360 __arm_vqaddq_n_s8 (int8x16_t __a, int8_t __b)
4361 {
4362   return __builtin_mve_vqaddq_n_sv16qi (__a, __b);
4363 }
4364 
4365 __extension__ extern __inline int8x16_t
4366 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_s8(int8x16_t __a,int8x16_t __b)4367 __arm_vorrq_s8 (int8x16_t __a, int8x16_t __b)
4368 {
4369   return __builtin_mve_vorrq_sv16qi (__a, __b);
4370 }
4371 
4372 __extension__ extern __inline int8x16_t
4373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_s8(int8x16_t __a,int8x16_t __b)4374 __arm_vornq_s8 (int8x16_t __a, int8x16_t __b)
4375 {
4376   return __builtin_mve_vornq_sv16qi (__a, __b);
4377 }
4378 
4379 __extension__ extern __inline int8x16_t
4380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_s8(int8x16_t __a,int8x16_t __b)4381 __arm_vmulq_s8 (int8x16_t __a, int8x16_t __b)
4382 {
4383   return __builtin_mve_vmulq_sv16qi (__a, __b);
4384 }
4385 
4386 __extension__ extern __inline int8x16_t
4387 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_n_s8(int8x16_t __a,int8_t __b)4388 __arm_vmulq_n_s8 (int8x16_t __a, int8_t __b)
4389 {
4390   return __builtin_mve_vmulq_n_sv16qi (__a, __b);
4391 }
4392 
4393 __extension__ extern __inline int16x8_t
4394 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_s8(int8x16_t __a,int8x16_t __b)4395 __arm_vmulltq_int_s8 (int8x16_t __a, int8x16_t __b)
4396 {
4397   return __builtin_mve_vmulltq_int_sv16qi (__a, __b);
4398 }
4399 
4400 __extension__ extern __inline int16x8_t
4401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_s8(int8x16_t __a,int8x16_t __b)4402 __arm_vmullbq_int_s8 (int8x16_t __a, int8x16_t __b)
4403 {
4404   return __builtin_mve_vmullbq_int_sv16qi (__a, __b);
4405 }
4406 
4407 __extension__ extern __inline int8x16_t
4408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_s8(int8x16_t __a,int8x16_t __b)4409 __arm_vmulhq_s8 (int8x16_t __a, int8x16_t __b)
4410 {
4411   return __builtin_mve_vmulhq_sv16qi (__a, __b);
4412 }
4413 
4414 __extension__ extern __inline int32_t
4415 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq_s8(int8x16_t __a,int8x16_t __b)4416 __arm_vmlsdavxq_s8 (int8x16_t __a, int8x16_t __b)
4417 {
4418   return __builtin_mve_vmlsdavxq_sv16qi (__a, __b);
4419 }
4420 
4421 __extension__ extern __inline int32_t
4422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq_s8(int8x16_t __a,int8x16_t __b)4423 __arm_vmlsdavq_s8 (int8x16_t __a, int8x16_t __b)
4424 {
4425   return __builtin_mve_vmlsdavq_sv16qi (__a, __b);
4426 }
4427 
4428 __extension__ extern __inline int32_t
4429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq_s8(int8x16_t __a,int8x16_t __b)4430 __arm_vmladavxq_s8 (int8x16_t __a, int8x16_t __b)
4431 {
4432   return __builtin_mve_vmladavxq_sv16qi (__a, __b);
4433 }
4434 
4435 __extension__ extern __inline int32_t
4436 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_s8(int8x16_t __a,int8x16_t __b)4437 __arm_vmladavq_s8 (int8x16_t __a, int8x16_t __b)
4438 {
4439   return __builtin_mve_vmladavq_sv16qi (__a, __b);
4440 }
4441 
4442 __extension__ extern __inline int8_t
4443 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_s8(int8_t __a,int8x16_t __b)4444 __arm_vminvq_s8 (int8_t __a, int8x16_t __b)
4445 {
4446   return __builtin_mve_vminvq_sv16qi (__a, __b);
4447 }
4448 
4449 __extension__ extern __inline int8x16_t
4450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_s8(int8x16_t __a,int8x16_t __b)4451 __arm_vminq_s8 (int8x16_t __a, int8x16_t __b)
4452 {
4453   return __builtin_mve_vminq_sv16qi (__a, __b);
4454 }
4455 
4456 __extension__ extern __inline int8_t
4457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_s8(int8_t __a,int8x16_t __b)4458 __arm_vmaxvq_s8 (int8_t __a, int8x16_t __b)
4459 {
4460   return __builtin_mve_vmaxvq_sv16qi (__a, __b);
4461 }
4462 
4463 __extension__ extern __inline int8x16_t
4464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_s8(int8x16_t __a,int8x16_t __b)4465 __arm_vmaxq_s8 (int8x16_t __a, int8x16_t __b)
4466 {
4467   return __builtin_mve_vmaxq_sv16qi (__a, __b);
4468 }
4469 
4470 __extension__ extern __inline int8x16_t
4471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_s8(int8x16_t __a,int8x16_t __b)4472 __arm_vhsubq_s8 (int8x16_t __a, int8x16_t __b)
4473 {
4474   return __builtin_mve_vhsubq_sv16qi (__a, __b);
4475 }
4476 
4477 __extension__ extern __inline int8x16_t
4478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_n_s8(int8x16_t __a,int8_t __b)4479 __arm_vhsubq_n_s8 (int8x16_t __a, int8_t __b)
4480 {
4481   return __builtin_mve_vhsubq_n_sv16qi (__a, __b);
4482 }
4483 
4484 __extension__ extern __inline int8x16_t
4485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_s8(int8x16_t __a,int8x16_t __b)4486 __arm_vhcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
4487 {
4488   return __builtin_mve_vhcaddq_rot90_sv16qi (__a, __b);
4489 }
4490 
4491 __extension__ extern __inline int8x16_t
4492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_s8(int8x16_t __a,int8x16_t __b)4493 __arm_vhcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b)
4494 {
4495   return __builtin_mve_vhcaddq_rot270_sv16qi (__a, __b);
4496 }
4497 
4498 __extension__ extern __inline int8x16_t
4499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_s8(int8x16_t __a,int8x16_t __b)4500 __arm_vhaddq_s8 (int8x16_t __a, int8x16_t __b)
4501 {
4502   return __builtin_mve_vhaddq_sv16qi (__a, __b);
4503 }
4504 
4505 __extension__ extern __inline int8x16_t
4506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_n_s8(int8x16_t __a,int8_t __b)4507 __arm_vhaddq_n_s8 (int8x16_t __a, int8_t __b)
4508 {
4509   return __builtin_mve_vhaddq_n_sv16qi (__a, __b);
4510 }
4511 
4512 __extension__ extern __inline int8x16_t
4513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_s8(int8x16_t __a,int8x16_t __b)4514 __arm_veorq_s8 (int8x16_t __a, int8x16_t __b)
4515 {
4516   return __builtin_mve_veorq_sv16qi (__a, __b);
4517 }
4518 
4519 __extension__ extern __inline int8x16_t
4520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_s8(int8x16_t __a,int8x16_t __b)4521 __arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
4522 {
4523   return __builtin_mve_vcaddq_rot90_sv16qi (__a, __b);
4524 }
4525 
4526 __extension__ extern __inline int8x16_t
4527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_s8(int8x16_t __a,int8x16_t __b)4528 __arm_vcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b)
4529 {
4530   return __builtin_mve_vcaddq_rot270_sv16qi (__a, __b);
4531 }
4532 
4533 __extension__ extern __inline int8x16_t
4534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_n_s8(int8x16_t __a,int32_t __b)4535 __arm_vbrsrq_n_s8 (int8x16_t __a, int32_t __b)
4536 {
4537   return __builtin_mve_vbrsrq_n_sv16qi (__a, __b);
4538 }
4539 
4540 __extension__ extern __inline int8x16_t
4541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_s8(int8x16_t __a,int8x16_t __b)4542 __arm_vbicq_s8 (int8x16_t __a, int8x16_t __b)
4543 {
4544   return __builtin_mve_vbicq_sv16qi (__a, __b);
4545 }
4546 
4547 __extension__ extern __inline int8x16_t
4548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_s8(int8x16_t __a,int8x16_t __b)4549 __arm_vandq_s8 (int8x16_t __a, int8x16_t __b)
4550 {
4551   return __builtin_mve_vandq_sv16qi (__a, __b);
4552 }
4553 
4554 __extension__ extern __inline int32_t
4555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_s8(int32_t __a,int8x16_t __b)4556 __arm_vaddvaq_s8 (int32_t __a, int8x16_t __b)
4557 {
4558   return __builtin_mve_vaddvaq_sv16qi (__a, __b);
4559 }
4560 
4561 __extension__ extern __inline int8x16_t
4562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_n_s8(int8x16_t __a,int8_t __b)4563 __arm_vaddq_n_s8 (int8x16_t __a, int8_t __b)
4564 {
4565   return __builtin_mve_vaddq_n_sv16qi (__a, __b);
4566 }
4567 
4568 __extension__ extern __inline int8x16_t
4569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_s8(int8x16_t __a,int8x16_t __b)4570 __arm_vabdq_s8 (int8x16_t __a, int8x16_t __b)
4571 {
4572   return __builtin_mve_vabdq_sv16qi (__a, __b);
4573 }
4574 
4575 __extension__ extern __inline int8x16_t
4576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n_s8(int8x16_t __a,const int __imm)4577 __arm_vshlq_n_s8 (int8x16_t __a, const int __imm)
4578 {
4579   return __builtin_mve_vshlq_n_sv16qi (__a, __imm);
4580 }
4581 
4582 __extension__ extern __inline int8x16_t
4583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_n_s8(int8x16_t __a,const int __imm)4584 __arm_vrshrq_n_s8 (int8x16_t __a, const int __imm)
4585 {
4586   return __builtin_mve_vrshrq_n_sv16qi (__a, __imm);
4587 }
4588 
4589 __extension__ extern __inline int8x16_t
4590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n_s8(int8x16_t __a,const int __imm)4591 __arm_vqshlq_n_s8 (int8x16_t __a, const int __imm)
4592 {
4593   return __builtin_mve_vqshlq_n_sv16qi (__a, __imm);
4594 }
4595 
4596 __extension__ extern __inline uint16x8_t
4597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_u16(uint16x8_t __a,uint16x8_t __b)4598 __arm_vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
4599 {
4600   return __builtin_mve_vsubq_uv8hi (__a, __b);
4601 }
4602 
4603 __extension__ extern __inline uint16x8_t
4604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_n_u16(uint16x8_t __a,uint16_t __b)4605 __arm_vsubq_n_u16 (uint16x8_t __a, uint16_t __b)
4606 {
4607   return __builtin_mve_vsubq_n_uv8hi (__a, __b);
4608 }
4609 
4610 __extension__ extern __inline uint16x8_t
4611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_u16(uint16x8_t __a,uint16x8_t __b)4612 __arm_vrmulhq_u16 (uint16x8_t __a, uint16x8_t __b)
4613 {
4614   return __builtin_mve_vrmulhq_uv8hi (__a, __b);
4615 }
4616 
4617 __extension__ extern __inline uint16x8_t
4618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_u16(uint16x8_t __a,uint16x8_t __b)4619 __arm_vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
4620 {
4621   return __builtin_mve_vrhaddq_uv8hi (__a, __b);
4622 }
4623 
4624 __extension__ extern __inline uint16x8_t
4625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_u16(uint16x8_t __a,uint16x8_t __b)4626 __arm_vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
4627 {
4628   return __builtin_mve_vqsubq_uv8hi (__a, __b);
4629 }
4630 
4631 __extension__ extern __inline uint16x8_t
4632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_n_u16(uint16x8_t __a,uint16_t __b)4633 __arm_vqsubq_n_u16 (uint16x8_t __a, uint16_t __b)
4634 {
4635   return __builtin_mve_vqsubq_n_uv8hi (__a, __b);
4636 }
4637 
4638 __extension__ extern __inline uint16x8_t
4639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_u16(uint16x8_t __a,uint16x8_t __b)4640 __arm_vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
4641 {
4642   return __builtin_mve_vqaddq_uv8hi (__a, __b);
4643 }
4644 
4645 __extension__ extern __inline uint16x8_t
4646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_n_u16(uint16x8_t __a,uint16_t __b)4647 __arm_vqaddq_n_u16 (uint16x8_t __a, uint16_t __b)
4648 {
4649   return __builtin_mve_vqaddq_n_uv8hi (__a, __b);
4650 }
4651 
4652 __extension__ extern __inline uint16x8_t
4653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_u16(uint16x8_t __a,uint16x8_t __b)4654 __arm_vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
4655 {
4656   return __builtin_mve_vorrq_uv8hi (__a, __b);
4657 }
4658 
4659 __extension__ extern __inline uint16x8_t
4660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_u16(uint16x8_t __a,uint16x8_t __b)4661 __arm_vornq_u16 (uint16x8_t __a, uint16x8_t __b)
4662 {
4663   return __builtin_mve_vornq_uv8hi (__a, __b);
4664 }
4665 
4666 __extension__ extern __inline uint16x8_t
4667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_u16(uint16x8_t __a,uint16x8_t __b)4668 __arm_vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
4669 {
4670   return __builtin_mve_vmulq_uv8hi (__a, __b);
4671 }
4672 
4673 __extension__ extern __inline uint16x8_t
4674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_n_u16(uint16x8_t __a,uint16_t __b)4675 __arm_vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
4676 {
4677   return __builtin_mve_vmulq_n_uv8hi (__a, __b);
4678 }
4679 
4680 __extension__ extern __inline uint32x4_t
4681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_u16(uint16x8_t __a,uint16x8_t __b)4682 __arm_vmulltq_int_u16 (uint16x8_t __a, uint16x8_t __b)
4683 {
4684   return __builtin_mve_vmulltq_int_uv8hi (__a, __b);
4685 }
4686 
4687 __extension__ extern __inline uint32x4_t
4688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_u16(uint16x8_t __a,uint16x8_t __b)4689 __arm_vmullbq_int_u16 (uint16x8_t __a, uint16x8_t __b)
4690 {
4691   return __builtin_mve_vmullbq_int_uv8hi (__a, __b);
4692 }
4693 
4694 __extension__ extern __inline uint16x8_t
4695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_u16(uint16x8_t __a,uint16x8_t __b)4696 __arm_vmulhq_u16 (uint16x8_t __a, uint16x8_t __b)
4697 {
4698   return __builtin_mve_vmulhq_uv8hi (__a, __b);
4699 }
4700 
4701 __extension__ extern __inline uint32_t
4702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_u16(uint16x8_t __a,uint16x8_t __b)4703 __arm_vmladavq_u16 (uint16x8_t __a, uint16x8_t __b)
4704 {
4705   return __builtin_mve_vmladavq_uv8hi (__a, __b);
4706 }
4707 
4708 __extension__ extern __inline uint16_t
4709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_u16(uint16_t __a,uint16x8_t __b)4710 __arm_vminvq_u16 (uint16_t __a, uint16x8_t __b)
4711 {
4712   return __builtin_mve_vminvq_uv8hi (__a, __b);
4713 }
4714 
4715 __extension__ extern __inline uint16x8_t
4716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_u16(uint16x8_t __a,uint16x8_t __b)4717 __arm_vminq_u16 (uint16x8_t __a, uint16x8_t __b)
4718 {
4719   return __builtin_mve_vminq_uv8hi (__a, __b);
4720 }
4721 
4722 __extension__ extern __inline uint16_t
4723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_u16(uint16_t __a,uint16x8_t __b)4724 __arm_vmaxvq_u16 (uint16_t __a, uint16x8_t __b)
4725 {
4726   return __builtin_mve_vmaxvq_uv8hi (__a, __b);
4727 }
4728 
4729 __extension__ extern __inline uint16x8_t
4730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_u16(uint16x8_t __a,uint16x8_t __b)4731 __arm_vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
4732 {
4733   return __builtin_mve_vmaxq_uv8hi (__a, __b);
4734 }
4735 
4736 __extension__ extern __inline uint16x8_t
4737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_u16(uint16x8_t __a,uint16x8_t __b)4738 __arm_vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
4739 {
4740   return __builtin_mve_vhsubq_uv8hi (__a, __b);
4741 }
4742 
4743 __extension__ extern __inline uint16x8_t
4744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_n_u16(uint16x8_t __a,uint16_t __b)4745 __arm_vhsubq_n_u16 (uint16x8_t __a, uint16_t __b)
4746 {
4747   return __builtin_mve_vhsubq_n_uv8hi (__a, __b);
4748 }
4749 
4750 __extension__ extern __inline uint16x8_t
4751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_u16(uint16x8_t __a,uint16x8_t __b)4752 __arm_vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
4753 {
4754   return __builtin_mve_vhaddq_uv8hi (__a, __b);
4755 }
4756 
4757 __extension__ extern __inline uint16x8_t
4758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_n_u16(uint16x8_t __a,uint16_t __b)4759 __arm_vhaddq_n_u16 (uint16x8_t __a, uint16_t __b)
4760 {
4761   return __builtin_mve_vhaddq_n_uv8hi (__a, __b);
4762 }
4763 
4764 __extension__ extern __inline uint16x8_t
4765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_u16(uint16x8_t __a,uint16x8_t __b)4766 __arm_veorq_u16 (uint16x8_t __a, uint16x8_t __b)
4767 {
4768   return __builtin_mve_veorq_uv8hi (__a, __b);
4769 }
4770 
4771 __extension__ extern __inline mve_pred16_t
4772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_n_u16(uint16x8_t __a,uint16_t __b)4773 __arm_vcmpneq_n_u16 (uint16x8_t __a, uint16_t __b)
4774 {
4775   return __builtin_mve_vcmpneq_n_uv8hi (__a, __b);
4776 }
4777 
4778 __extension__ extern __inline mve_pred16_t
4779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_u16(uint16x8_t __a,uint16x8_t __b)4780 __arm_vcmphiq_u16 (uint16x8_t __a, uint16x8_t __b)
4781 {
4782   return __builtin_mve_vcmphiq_uv8hi (__a, __b);
4783 }
4784 
4785 __extension__ extern __inline mve_pred16_t
4786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_n_u16(uint16x8_t __a,uint16_t __b)4787 __arm_vcmphiq_n_u16 (uint16x8_t __a, uint16_t __b)
4788 {
4789   return __builtin_mve_vcmphiq_n_uv8hi (__a, __b);
4790 }
4791 
4792 __extension__ extern __inline mve_pred16_t
4793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_u16(uint16x8_t __a,uint16x8_t __b)4794 __arm_vcmpeqq_u16 (uint16x8_t __a, uint16x8_t __b)
4795 {
4796   return __builtin_mve_vcmpeqq_uv8hi (__a, __b);
4797 }
4798 
4799 __extension__ extern __inline mve_pred16_t
4800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_n_u16(uint16x8_t __a,uint16_t __b)4801 __arm_vcmpeqq_n_u16 (uint16x8_t __a, uint16_t __b)
4802 {
4803   return __builtin_mve_vcmpeqq_n_uv8hi (__a, __b);
4804 }
4805 
4806 __extension__ extern __inline mve_pred16_t
4807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_u16(uint16x8_t __a,uint16x8_t __b)4808 __arm_vcmpcsq_u16 (uint16x8_t __a, uint16x8_t __b)
4809 {
4810   return __builtin_mve_vcmpcsq_uv8hi (__a, __b);
4811 }
4812 
4813 __extension__ extern __inline mve_pred16_t
4814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_n_u16(uint16x8_t __a,uint16_t __b)4815 __arm_vcmpcsq_n_u16 (uint16x8_t __a, uint16_t __b)
4816 {
4817   return __builtin_mve_vcmpcsq_n_uv8hi (__a, __b);
4818 }
4819 
4820 __extension__ extern __inline uint16x8_t
4821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_u16(uint16x8_t __a,uint16x8_t __b)4822 __arm_vcaddq_rot90_u16 (uint16x8_t __a, uint16x8_t __b)
4823 {
4824   return __builtin_mve_vcaddq_rot90_uv8hi (__a, __b);
4825 }
4826 
4827 __extension__ extern __inline uint16x8_t
4828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_u16(uint16x8_t __a,uint16x8_t __b)4829 __arm_vcaddq_rot270_u16 (uint16x8_t __a, uint16x8_t __b)
4830 {
4831   return __builtin_mve_vcaddq_rot270_uv8hi (__a, __b);
4832 }
4833 
4834 __extension__ extern __inline uint16x8_t
4835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_u16(uint16x8_t __a,uint16x8_t __b)4836 __arm_vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
4837 {
4838   return __builtin_mve_vbicq_uv8hi (__a, __b);
4839 }
4840 
4841 __extension__ extern __inline uint16x8_t
4842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_u16(uint16x8_t __a,uint16x8_t __b)4843 __arm_vandq_u16 (uint16x8_t __a, uint16x8_t __b)
4844 {
4845   return __builtin_mve_vandq_uv8hi (__a, __b);
4846 }
4847 
4848 __extension__ extern __inline uint32_t
4849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p_u16(uint16x8_t __a,mve_pred16_t __p)4850 __arm_vaddvq_p_u16 (uint16x8_t __a, mve_pred16_t __p)
4851 {
4852   return __builtin_mve_vaddvq_p_uv8hi (__a, __p);
4853 }
4854 
4855 __extension__ extern __inline uint32_t
4856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_u16(uint32_t __a,uint16x8_t __b)4857 __arm_vaddvaq_u16 (uint32_t __a, uint16x8_t __b)
4858 {
4859   return __builtin_mve_vaddvaq_uv8hi (__a, __b);
4860 }
4861 
4862 __extension__ extern __inline uint16x8_t
4863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_n_u16(uint16x8_t __a,uint16_t __b)4864 __arm_vaddq_n_u16 (uint16x8_t __a, uint16_t __b)
4865 {
4866   return __builtin_mve_vaddq_n_uv8hi (__a, __b);
4867 }
4868 
4869 __extension__ extern __inline uint16x8_t
4870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_u16(uint16x8_t __a,uint16x8_t __b)4871 __arm_vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
4872 {
4873   return __builtin_mve_vabdq_uv8hi (__a, __b);
4874 }
4875 
4876 __extension__ extern __inline uint16x8_t
4877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r_u16(uint16x8_t __a,int32_t __b)4878 __arm_vshlq_r_u16 (uint16x8_t __a, int32_t __b)
4879 {
4880   return __builtin_mve_vshlq_r_uv8hi (__a, __b);
4881 }
4882 
4883 __extension__ extern __inline uint16x8_t
4884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_u16(uint16x8_t __a,int16x8_t __b)4885 __arm_vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
4886 {
4887   return __builtin_mve_vrshlq_uv8hi (__a, __b);
4888 }
4889 
4890 __extension__ extern __inline uint16x8_t
4891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_n_u16(uint16x8_t __a,int32_t __b)4892 __arm_vrshlq_n_u16 (uint16x8_t __a, int32_t __b)
4893 {
4894   return __builtin_mve_vrshlq_n_uv8hi (__a, __b);
4895 }
4896 
4897 __extension__ extern __inline uint16x8_t
4898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_u16(uint16x8_t __a,int16x8_t __b)4899 __arm_vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
4900 {
4901   return __builtin_mve_vqshlq_uv8hi (__a, __b);
4902 }
4903 
4904 __extension__ extern __inline uint16x8_t
4905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r_u16(uint16x8_t __a,int32_t __b)4906 __arm_vqshlq_r_u16 (uint16x8_t __a, int32_t __b)
4907 {
4908   return __builtin_mve_vqshlq_r_uv8hi (__a, __b);
4909 }
4910 
4911 __extension__ extern __inline uint16x8_t
4912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_u16(uint16x8_t __a,int16x8_t __b)4913 __arm_vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
4914 {
4915   return __builtin_mve_vqrshlq_uv8hi (__a, __b);
4916 }
4917 
4918 __extension__ extern __inline uint16x8_t
4919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_n_u16(uint16x8_t __a,int32_t __b)4920 __arm_vqrshlq_n_u16 (uint16x8_t __a, int32_t __b)
4921 {
4922   return __builtin_mve_vqrshlq_n_uv8hi (__a, __b);
4923 }
4924 
4925 __extension__ extern __inline uint16_t
4926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_s16(uint16_t __a,int16x8_t __b)4927 __arm_vminavq_s16 (uint16_t __a, int16x8_t __b)
4928 {
4929   return __builtin_mve_vminavq_sv8hi (__a, __b);
4930 }
4931 
4932 __extension__ extern __inline uint16x8_t
4933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq_s16(uint16x8_t __a,int16x8_t __b)4934 __arm_vminaq_s16 (uint16x8_t __a, int16x8_t __b)
4935 {
4936   return __builtin_mve_vminaq_sv8hi (__a, __b);
4937 }
4938 
4939 __extension__ extern __inline uint16_t
4940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq_s16(uint16_t __a,int16x8_t __b)4941 __arm_vmaxavq_s16 (uint16_t __a, int16x8_t __b)
4942 {
4943   return __builtin_mve_vmaxavq_sv8hi (__a, __b);
4944 }
4945 
4946 __extension__ extern __inline uint16x8_t
4947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq_s16(uint16x8_t __a,int16x8_t __b)4948 __arm_vmaxaq_s16 (uint16x8_t __a, int16x8_t __b)
4949 {
4950   return __builtin_mve_vmaxaq_sv8hi (__a, __b);
4951 }
4952 
4953 __extension__ extern __inline uint16x8_t
4954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_n_u16(uint16x8_t __a,int32_t __b)4955 __arm_vbrsrq_n_u16 (uint16x8_t __a, int32_t __b)
4956 {
4957   return __builtin_mve_vbrsrq_n_uv8hi (__a, __b);
4958 }
4959 
4960 __extension__ extern __inline uint16x8_t
4961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n_u16(uint16x8_t __a,const int __imm)4962 __arm_vshlq_n_u16 (uint16x8_t __a, const int __imm)
4963 {
4964   return __builtin_mve_vshlq_n_uv8hi (__a, __imm);
4965 }
4966 
4967 __extension__ extern __inline uint16x8_t
4968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_n_u16(uint16x8_t __a,const int __imm)4969 __arm_vrshrq_n_u16 (uint16x8_t __a, const int __imm)
4970 {
4971   return __builtin_mve_vrshrq_n_uv8hi (__a, __imm);
4972 }
4973 
4974 __extension__ extern __inline uint16x8_t
4975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n_u16(uint16x8_t __a,const int __imm)4976 __arm_vqshlq_n_u16 (uint16x8_t __a, const int __imm)
4977 {
4978   return __builtin_mve_vqshlq_n_uv8hi (__a, __imm);
4979 }
4980 
4981 __extension__ extern __inline mve_pred16_t
4982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_n_s16(int16x8_t __a,int16_t __b)4983 __arm_vcmpneq_n_s16 (int16x8_t __a, int16_t __b)
4984 {
4985   return __builtin_mve_vcmpneq_n_sv8hi (__a, __b);
4986 }
4987 
4988 __extension__ extern __inline mve_pred16_t
4989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_s16(int16x8_t __a,int16x8_t __b)4990 __arm_vcmpltq_s16 (int16x8_t __a, int16x8_t __b)
4991 {
4992   return __builtin_mve_vcmpltq_sv8hi (__a, __b);
4993 }
4994 
4995 __extension__ extern __inline mve_pred16_t
4996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_n_s16(int16x8_t __a,int16_t __b)4997 __arm_vcmpltq_n_s16 (int16x8_t __a, int16_t __b)
4998 {
4999   return __builtin_mve_vcmpltq_n_sv8hi (__a, __b);
5000 }
5001 
5002 __extension__ extern __inline mve_pred16_t
5003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_s16(int16x8_t __a,int16x8_t __b)5004 __arm_vcmpleq_s16 (int16x8_t __a, int16x8_t __b)
5005 {
5006   return __builtin_mve_vcmpleq_sv8hi (__a, __b);
5007 }
5008 
5009 __extension__ extern __inline mve_pred16_t
5010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_n_s16(int16x8_t __a,int16_t __b)5011 __arm_vcmpleq_n_s16 (int16x8_t __a, int16_t __b)
5012 {
5013   return __builtin_mve_vcmpleq_n_sv8hi (__a, __b);
5014 }
5015 
5016 __extension__ extern __inline mve_pred16_t
5017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_s16(int16x8_t __a,int16x8_t __b)5018 __arm_vcmpgtq_s16 (int16x8_t __a, int16x8_t __b)
5019 {
5020   return __builtin_mve_vcmpgtq_sv8hi (__a, __b);
5021 }
5022 
5023 __extension__ extern __inline mve_pred16_t
5024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_n_s16(int16x8_t __a,int16_t __b)5025 __arm_vcmpgtq_n_s16 (int16x8_t __a, int16_t __b)
5026 {
5027   return __builtin_mve_vcmpgtq_n_sv8hi (__a, __b);
5028 }
5029 
5030 __extension__ extern __inline mve_pred16_t
5031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_s16(int16x8_t __a,int16x8_t __b)5032 __arm_vcmpgeq_s16 (int16x8_t __a, int16x8_t __b)
5033 {
5034   return __builtin_mve_vcmpgeq_sv8hi (__a, __b);
5035 }
5036 
5037 __extension__ extern __inline mve_pred16_t
5038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_n_s16(int16x8_t __a,int16_t __b)5039 __arm_vcmpgeq_n_s16 (int16x8_t __a, int16_t __b)
5040 {
5041   return __builtin_mve_vcmpgeq_n_sv8hi (__a, __b);
5042 }
5043 
5044 __extension__ extern __inline mve_pred16_t
5045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_s16(int16x8_t __a,int16x8_t __b)5046 __arm_vcmpeqq_s16 (int16x8_t __a, int16x8_t __b)
5047 {
5048   return __builtin_mve_vcmpeqq_sv8hi (__a, __b);
5049 }
5050 
5051 __extension__ extern __inline mve_pred16_t
5052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_n_s16(int16x8_t __a,int16_t __b)5053 __arm_vcmpeqq_n_s16 (int16x8_t __a, int16_t __b)
5054 {
5055   return __builtin_mve_vcmpeqq_n_sv8hi (__a, __b);
5056 }
5057 
5058 __extension__ extern __inline uint16x8_t
5059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq_n_s16(int16x8_t __a,const int __imm)5060 __arm_vqshluq_n_s16 (int16x8_t __a, const int __imm)
5061 {
5062   return __builtin_mve_vqshluq_n_sv8hi (__a, __imm);
5063 }
5064 
5065 __extension__ extern __inline int32_t
5066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p_s16(int16x8_t __a,mve_pred16_t __p)5067 __arm_vaddvq_p_s16 (int16x8_t __a, mve_pred16_t __p)
5068 {
5069   return __builtin_mve_vaddvq_p_sv8hi (__a, __p);
5070 }
5071 
5072 __extension__ extern __inline int16x8_t
5073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_s16(int16x8_t __a,int16x8_t __b)5074 __arm_vsubq_s16 (int16x8_t __a, int16x8_t __b)
5075 {
5076   return __builtin_mve_vsubq_sv8hi (__a, __b);
5077 }
5078 
5079 __extension__ extern __inline int16x8_t
5080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_n_s16(int16x8_t __a,int16_t __b)5081 __arm_vsubq_n_s16 (int16x8_t __a, int16_t __b)
5082 {
5083   return __builtin_mve_vsubq_n_sv8hi (__a, __b);
5084 }
5085 
5086 __extension__ extern __inline int16x8_t
5087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r_s16(int16x8_t __a,int32_t __b)5088 __arm_vshlq_r_s16 (int16x8_t __a, int32_t __b)
5089 {
5090   return __builtin_mve_vshlq_r_sv8hi (__a, __b);
5091 }
5092 
5093 __extension__ extern __inline int16x8_t
5094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_s16(int16x8_t __a,int16x8_t __b)5095 __arm_vrshlq_s16 (int16x8_t __a, int16x8_t __b)
5096 {
5097   return __builtin_mve_vrshlq_sv8hi (__a, __b);
5098 }
5099 
5100 __extension__ extern __inline int16x8_t
5101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_n_s16(int16x8_t __a,int32_t __b)5102 __arm_vrshlq_n_s16 (int16x8_t __a, int32_t __b)
5103 {
5104   return __builtin_mve_vrshlq_n_sv8hi (__a, __b);
5105 }
5106 
5107 __extension__ extern __inline int16x8_t
5108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_s16(int16x8_t __a,int16x8_t __b)5109 __arm_vrmulhq_s16 (int16x8_t __a, int16x8_t __b)
5110 {
5111   return __builtin_mve_vrmulhq_sv8hi (__a, __b);
5112 }
5113 
5114 __extension__ extern __inline int16x8_t
5115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_s16(int16x8_t __a,int16x8_t __b)5116 __arm_vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
5117 {
5118   return __builtin_mve_vrhaddq_sv8hi (__a, __b);
5119 }
5120 
5121 __extension__ extern __inline int16x8_t
5122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_s16(int16x8_t __a,int16x8_t __b)5123 __arm_vqsubq_s16 (int16x8_t __a, int16x8_t __b)
5124 {
5125   return __builtin_mve_vqsubq_sv8hi (__a, __b);
5126 }
5127 
5128 __extension__ extern __inline int16x8_t
5129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_n_s16(int16x8_t __a,int16_t __b)5130 __arm_vqsubq_n_s16 (int16x8_t __a, int16_t __b)
5131 {
5132   return __builtin_mve_vqsubq_n_sv8hi (__a, __b);
5133 }
5134 
5135 __extension__ extern __inline int16x8_t
5136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_s16(int16x8_t __a,int16x8_t __b)5137 __arm_vqshlq_s16 (int16x8_t __a, int16x8_t __b)
5138 {
5139   return __builtin_mve_vqshlq_sv8hi (__a, __b);
5140 }
5141 
5142 __extension__ extern __inline int16x8_t
5143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r_s16(int16x8_t __a,int32_t __b)5144 __arm_vqshlq_r_s16 (int16x8_t __a, int32_t __b)
5145 {
5146   return __builtin_mve_vqshlq_r_sv8hi (__a, __b);
5147 }
5148 
5149 __extension__ extern __inline int16x8_t
5150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_s16(int16x8_t __a,int16x8_t __b)5151 __arm_vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
5152 {
5153   return __builtin_mve_vqrshlq_sv8hi (__a, __b);
5154 }
5155 
5156 __extension__ extern __inline int16x8_t
5157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_n_s16(int16x8_t __a,int32_t __b)5158 __arm_vqrshlq_n_s16 (int16x8_t __a, int32_t __b)
5159 {
5160   return __builtin_mve_vqrshlq_n_sv8hi (__a, __b);
5161 }
5162 
5163 __extension__ extern __inline int16x8_t
5164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_s16(int16x8_t __a,int16x8_t __b)5165 __arm_vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
5166 {
5167   return __builtin_mve_vqrdmulhq_sv8hi (__a, __b);
5168 }
5169 
5170 __extension__ extern __inline int16x8_t
5171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_n_s16(int16x8_t __a,int16_t __b)5172 __arm_vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
5173 {
5174   return __builtin_mve_vqrdmulhq_n_sv8hi (__a, __b);
5175 }
5176 
5177 __extension__ extern __inline int16x8_t
5178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_s16(int16x8_t __a,int16x8_t __b)5179 __arm_vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
5180 {
5181   return __builtin_mve_vqdmulhq_sv8hi (__a, __b);
5182 }
5183 
5184 __extension__ extern __inline int16x8_t
5185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_n_s16(int16x8_t __a,int16_t __b)5186 __arm_vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
5187 {
5188   return __builtin_mve_vqdmulhq_n_sv8hi (__a, __b);
5189 }
5190 
5191 __extension__ extern __inline int16x8_t
5192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_s16(int16x8_t __a,int16x8_t __b)5193 __arm_vqaddq_s16 (int16x8_t __a, int16x8_t __b)
5194 {
5195   return __builtin_mve_vqaddq_sv8hi (__a, __b);
5196 }
5197 
5198 __extension__ extern __inline int16x8_t
5199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_n_s16(int16x8_t __a,int16_t __b)5200 __arm_vqaddq_n_s16 (int16x8_t __a, int16_t __b)
5201 {
5202   return __builtin_mve_vqaddq_n_sv8hi (__a, __b);
5203 }
5204 
5205 __extension__ extern __inline int16x8_t
5206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_s16(int16x8_t __a,int16x8_t __b)5207 __arm_vorrq_s16 (int16x8_t __a, int16x8_t __b)
5208 {
5209   return __builtin_mve_vorrq_sv8hi (__a, __b);
5210 }
5211 
5212 __extension__ extern __inline int16x8_t
5213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_s16(int16x8_t __a,int16x8_t __b)5214 __arm_vornq_s16 (int16x8_t __a, int16x8_t __b)
5215 {
5216   return __builtin_mve_vornq_sv8hi (__a, __b);
5217 }
5218 
5219 __extension__ extern __inline int16x8_t
5220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_s16(int16x8_t __a,int16x8_t __b)5221 __arm_vmulq_s16 (int16x8_t __a, int16x8_t __b)
5222 {
5223   return __builtin_mve_vmulq_sv8hi (__a, __b);
5224 }
5225 
5226 __extension__ extern __inline int16x8_t
5227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_n_s16(int16x8_t __a,int16_t __b)5228 __arm_vmulq_n_s16 (int16x8_t __a, int16_t __b)
5229 {
5230   return __builtin_mve_vmulq_n_sv8hi (__a, __b);
5231 }
5232 
5233 __extension__ extern __inline int32x4_t
5234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_s16(int16x8_t __a,int16x8_t __b)5235 __arm_vmulltq_int_s16 (int16x8_t __a, int16x8_t __b)
5236 {
5237   return __builtin_mve_vmulltq_int_sv8hi (__a, __b);
5238 }
5239 
5240 __extension__ extern __inline int32x4_t
5241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_s16(int16x8_t __a,int16x8_t __b)5242 __arm_vmullbq_int_s16 (int16x8_t __a, int16x8_t __b)
5243 {
5244   return __builtin_mve_vmullbq_int_sv8hi (__a, __b);
5245 }
5246 
5247 __extension__ extern __inline int16x8_t
5248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_s16(int16x8_t __a,int16x8_t __b)5249 __arm_vmulhq_s16 (int16x8_t __a, int16x8_t __b)
5250 {
5251   return __builtin_mve_vmulhq_sv8hi (__a, __b);
5252 }
5253 
5254 __extension__ extern __inline int32_t
5255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq_s16(int16x8_t __a,int16x8_t __b)5256 __arm_vmlsdavxq_s16 (int16x8_t __a, int16x8_t __b)
5257 {
5258   return __builtin_mve_vmlsdavxq_sv8hi (__a, __b);
5259 }
5260 
5261 __extension__ extern __inline int32_t
5262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq_s16(int16x8_t __a,int16x8_t __b)5263 __arm_vmlsdavq_s16 (int16x8_t __a, int16x8_t __b)
5264 {
5265   return __builtin_mve_vmlsdavq_sv8hi (__a, __b);
5266 }
5267 
5268 __extension__ extern __inline int32_t
5269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq_s16(int16x8_t __a,int16x8_t __b)5270 __arm_vmladavxq_s16 (int16x8_t __a, int16x8_t __b)
5271 {
5272   return __builtin_mve_vmladavxq_sv8hi (__a, __b);
5273 }
5274 
5275 __extension__ extern __inline int32_t
5276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_s16(int16x8_t __a,int16x8_t __b)5277 __arm_vmladavq_s16 (int16x8_t __a, int16x8_t __b)
5278 {
5279   return __builtin_mve_vmladavq_sv8hi (__a, __b);
5280 }
5281 
5282 __extension__ extern __inline int16_t
5283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_s16(int16_t __a,int16x8_t __b)5284 __arm_vminvq_s16 (int16_t __a, int16x8_t __b)
5285 {
5286   return __builtin_mve_vminvq_sv8hi (__a, __b);
5287 }
5288 
5289 __extension__ extern __inline int16x8_t
5290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_s16(int16x8_t __a,int16x8_t __b)5291 __arm_vminq_s16 (int16x8_t __a, int16x8_t __b)
5292 {
5293   return __builtin_mve_vminq_sv8hi (__a, __b);
5294 }
5295 
5296 __extension__ extern __inline int16_t
5297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_s16(int16_t __a,int16x8_t __b)5298 __arm_vmaxvq_s16 (int16_t __a, int16x8_t __b)
5299 {
5300   return __builtin_mve_vmaxvq_sv8hi (__a, __b);
5301 }
5302 
5303 __extension__ extern __inline int16x8_t
5304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_s16(int16x8_t __a,int16x8_t __b)5305 __arm_vmaxq_s16 (int16x8_t __a, int16x8_t __b)
5306 {
5307   return __builtin_mve_vmaxq_sv8hi (__a, __b);
5308 }
5309 
5310 __extension__ extern __inline int16x8_t
5311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_s16(int16x8_t __a,int16x8_t __b)5312 __arm_vhsubq_s16 (int16x8_t __a, int16x8_t __b)
5313 {
5314   return __builtin_mve_vhsubq_sv8hi (__a, __b);
5315 }
5316 
5317 __extension__ extern __inline int16x8_t
5318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_n_s16(int16x8_t __a,int16_t __b)5319 __arm_vhsubq_n_s16 (int16x8_t __a, int16_t __b)
5320 {
5321   return __builtin_mve_vhsubq_n_sv8hi (__a, __b);
5322 }
5323 
5324 __extension__ extern __inline int16x8_t
5325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_s16(int16x8_t __a,int16x8_t __b)5326 __arm_vhcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
5327 {
5328   return __builtin_mve_vhcaddq_rot90_sv8hi (__a, __b);
5329 }
5330 
5331 __extension__ extern __inline int16x8_t
5332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_s16(int16x8_t __a,int16x8_t __b)5333 __arm_vhcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b)
5334 {
5335   return __builtin_mve_vhcaddq_rot270_sv8hi (__a, __b);
5336 }
5337 
5338 __extension__ extern __inline int16x8_t
5339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_s16(int16x8_t __a,int16x8_t __b)5340 __arm_vhaddq_s16 (int16x8_t __a, int16x8_t __b)
5341 {
5342   return __builtin_mve_vhaddq_sv8hi (__a, __b);
5343 }
5344 
5345 __extension__ extern __inline int16x8_t
5346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_n_s16(int16x8_t __a,int16_t __b)5347 __arm_vhaddq_n_s16 (int16x8_t __a, int16_t __b)
5348 {
5349   return __builtin_mve_vhaddq_n_sv8hi (__a, __b);
5350 }
5351 
5352 __extension__ extern __inline int16x8_t
5353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_s16(int16x8_t __a,int16x8_t __b)5354 __arm_veorq_s16 (int16x8_t __a, int16x8_t __b)
5355 {
5356   return __builtin_mve_veorq_sv8hi (__a, __b);
5357 }
5358 
5359 __extension__ extern __inline int16x8_t
5360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_s16(int16x8_t __a,int16x8_t __b)5361 __arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
5362 {
5363   return __builtin_mve_vcaddq_rot90_sv8hi (__a, __b);
5364 }
5365 
5366 __extension__ extern __inline int16x8_t
5367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_s16(int16x8_t __a,int16x8_t __b)5368 __arm_vcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b)
5369 {
5370   return __builtin_mve_vcaddq_rot270_sv8hi (__a, __b);
5371 }
5372 
5373 __extension__ extern __inline int16x8_t
5374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_n_s16(int16x8_t __a,int32_t __b)5375 __arm_vbrsrq_n_s16 (int16x8_t __a, int32_t __b)
5376 {
5377   return __builtin_mve_vbrsrq_n_sv8hi (__a, __b);
5378 }
5379 
5380 __extension__ extern __inline int16x8_t
5381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_s16(int16x8_t __a,int16x8_t __b)5382 __arm_vbicq_s16 (int16x8_t __a, int16x8_t __b)
5383 {
5384   return __builtin_mve_vbicq_sv8hi (__a, __b);
5385 }
5386 
5387 __extension__ extern __inline int16x8_t
5388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_s16(int16x8_t __a,int16x8_t __b)5389 __arm_vandq_s16 (int16x8_t __a, int16x8_t __b)
5390 {
5391   return __builtin_mve_vandq_sv8hi (__a, __b);
5392 }
5393 
5394 __extension__ extern __inline int32_t
5395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_s16(int32_t __a,int16x8_t __b)5396 __arm_vaddvaq_s16 (int32_t __a, int16x8_t __b)
5397 {
5398   return __builtin_mve_vaddvaq_sv8hi (__a, __b);
5399 }
5400 
5401 __extension__ extern __inline int16x8_t
5402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_n_s16(int16x8_t __a,int16_t __b)5403 __arm_vaddq_n_s16 (int16x8_t __a, int16_t __b)
5404 {
5405   return __builtin_mve_vaddq_n_sv8hi (__a, __b);
5406 }
5407 
5408 __extension__ extern __inline int16x8_t
5409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_s16(int16x8_t __a,int16x8_t __b)5410 __arm_vabdq_s16 (int16x8_t __a, int16x8_t __b)
5411 {
5412   return __builtin_mve_vabdq_sv8hi (__a, __b);
5413 }
5414 
5415 __extension__ extern __inline int16x8_t
5416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n_s16(int16x8_t __a,const int __imm)5417 __arm_vshlq_n_s16 (int16x8_t __a, const int __imm)
5418 {
5419   return __builtin_mve_vshlq_n_sv8hi (__a, __imm);
5420 }
5421 
5422 __extension__ extern __inline int16x8_t
5423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_n_s16(int16x8_t __a,const int __imm)5424 __arm_vrshrq_n_s16 (int16x8_t __a, const int __imm)
5425 {
5426   return __builtin_mve_vrshrq_n_sv8hi (__a, __imm);
5427 }
5428 
5429 __extension__ extern __inline int16x8_t
5430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n_s16(int16x8_t __a,const int __imm)5431 __arm_vqshlq_n_s16 (int16x8_t __a, const int __imm)
5432 {
5433   return __builtin_mve_vqshlq_n_sv8hi (__a, __imm);
5434 }
5435 
5436 __extension__ extern __inline uint32x4_t
5437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_u32(uint32x4_t __a,uint32x4_t __b)5438 __arm_vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
5439 {
5440   return __builtin_mve_vsubq_uv4si (__a, __b);
5441 }
5442 
5443 __extension__ extern __inline uint32x4_t
5444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_n_u32(uint32x4_t __a,uint32_t __b)5445 __arm_vsubq_n_u32 (uint32x4_t __a, uint32_t __b)
5446 {
5447   return __builtin_mve_vsubq_n_uv4si (__a, __b);
5448 }
5449 
5450 __extension__ extern __inline uint32x4_t
5451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_u32(uint32x4_t __a,uint32x4_t __b)5452 __arm_vrmulhq_u32 (uint32x4_t __a, uint32x4_t __b)
5453 {
5454   return __builtin_mve_vrmulhq_uv4si (__a, __b);
5455 }
5456 
5457 __extension__ extern __inline uint32x4_t
5458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_u32(uint32x4_t __a,uint32x4_t __b)5459 __arm_vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
5460 {
5461   return __builtin_mve_vrhaddq_uv4si (__a, __b);
5462 }
5463 
5464 __extension__ extern __inline uint32x4_t
5465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_u32(uint32x4_t __a,uint32x4_t __b)5466 __arm_vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
5467 {
5468   return __builtin_mve_vqsubq_uv4si (__a, __b);
5469 }
5470 
5471 __extension__ extern __inline uint32x4_t
5472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_n_u32(uint32x4_t __a,uint32_t __b)5473 __arm_vqsubq_n_u32 (uint32x4_t __a, uint32_t __b)
5474 {
5475   return __builtin_mve_vqsubq_n_uv4si (__a, __b);
5476 }
5477 
5478 __extension__ extern __inline uint32x4_t
5479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_u32(uint32x4_t __a,uint32x4_t __b)5480 __arm_vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
5481 {
5482   return __builtin_mve_vqaddq_uv4si (__a, __b);
5483 }
5484 
5485 __extension__ extern __inline uint32x4_t
5486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_n_u32(uint32x4_t __a,uint32_t __b)5487 __arm_vqaddq_n_u32 (uint32x4_t __a, uint32_t __b)
5488 {
5489   return __builtin_mve_vqaddq_n_uv4si (__a, __b);
5490 }
5491 
5492 __extension__ extern __inline uint32x4_t
5493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_u32(uint32x4_t __a,uint32x4_t __b)5494 __arm_vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
5495 {
5496   return __builtin_mve_vorrq_uv4si (__a, __b);
5497 }
5498 
5499 __extension__ extern __inline uint32x4_t
5500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_u32(uint32x4_t __a,uint32x4_t __b)5501 __arm_vornq_u32 (uint32x4_t __a, uint32x4_t __b)
5502 {
5503   return __builtin_mve_vornq_uv4si (__a, __b);
5504 }
5505 
5506 __extension__ extern __inline uint32x4_t
5507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_u32(uint32x4_t __a,uint32x4_t __b)5508 __arm_vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
5509 {
5510   return __builtin_mve_vmulq_uv4si (__a, __b);
5511 }
5512 
5513 __extension__ extern __inline uint32x4_t
5514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_n_u32(uint32x4_t __a,uint32_t __b)5515 __arm_vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
5516 {
5517   return __builtin_mve_vmulq_n_uv4si (__a, __b);
5518 }
5519 
5520 __extension__ extern __inline uint64x2_t
5521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_u32(uint32x4_t __a,uint32x4_t __b)5522 __arm_vmulltq_int_u32 (uint32x4_t __a, uint32x4_t __b)
5523 {
5524   return __builtin_mve_vmulltq_int_uv4si (__a, __b);
5525 }
5526 
5527 __extension__ extern __inline uint64x2_t
5528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_u32(uint32x4_t __a,uint32x4_t __b)5529 __arm_vmullbq_int_u32 (uint32x4_t __a, uint32x4_t __b)
5530 {
5531   return __builtin_mve_vmullbq_int_uv4si (__a, __b);
5532 }
5533 
5534 __extension__ extern __inline uint32x4_t
5535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_u32(uint32x4_t __a,uint32x4_t __b)5536 __arm_vmulhq_u32 (uint32x4_t __a, uint32x4_t __b)
5537 {
5538   return __builtin_mve_vmulhq_uv4si (__a, __b);
5539 }
5540 
5541 __extension__ extern __inline uint32_t
5542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_u32(uint32x4_t __a,uint32x4_t __b)5543 __arm_vmladavq_u32 (uint32x4_t __a, uint32x4_t __b)
5544 {
5545   return __builtin_mve_vmladavq_uv4si (__a, __b);
5546 }
5547 
5548 __extension__ extern __inline uint32_t
5549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_u32(uint32_t __a,uint32x4_t __b)5550 __arm_vminvq_u32 (uint32_t __a, uint32x4_t __b)
5551 {
5552   return __builtin_mve_vminvq_uv4si (__a, __b);
5553 }
5554 
5555 __extension__ extern __inline uint32x4_t
5556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_u32(uint32x4_t __a,uint32x4_t __b)5557 __arm_vminq_u32 (uint32x4_t __a, uint32x4_t __b)
5558 {
5559   return __builtin_mve_vminq_uv4si (__a, __b);
5560 }
5561 
5562 __extension__ extern __inline uint32_t
5563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_u32(uint32_t __a,uint32x4_t __b)5564 __arm_vmaxvq_u32 (uint32_t __a, uint32x4_t __b)
5565 {
5566   return __builtin_mve_vmaxvq_uv4si (__a, __b);
5567 }
5568 
5569 __extension__ extern __inline uint32x4_t
5570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_u32(uint32x4_t __a,uint32x4_t __b)5571 __arm_vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
5572 {
5573   return __builtin_mve_vmaxq_uv4si (__a, __b);
5574 }
5575 
5576 __extension__ extern __inline uint32x4_t
5577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_u32(uint32x4_t __a,uint32x4_t __b)5578 __arm_vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
5579 {
5580   return __builtin_mve_vhsubq_uv4si (__a, __b);
5581 }
5582 
5583 __extension__ extern __inline uint32x4_t
5584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_n_u32(uint32x4_t __a,uint32_t __b)5585 __arm_vhsubq_n_u32 (uint32x4_t __a, uint32_t __b)
5586 {
5587   return __builtin_mve_vhsubq_n_uv4si (__a, __b);
5588 }
5589 
5590 __extension__ extern __inline uint32x4_t
5591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_u32(uint32x4_t __a,uint32x4_t __b)5592 __arm_vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
5593 {
5594   return __builtin_mve_vhaddq_uv4si (__a, __b);
5595 }
5596 
5597 __extension__ extern __inline uint32x4_t
5598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_n_u32(uint32x4_t __a,uint32_t __b)5599 __arm_vhaddq_n_u32 (uint32x4_t __a, uint32_t __b)
5600 {
5601   return __builtin_mve_vhaddq_n_uv4si (__a, __b);
5602 }
5603 
5604 __extension__ extern __inline uint32x4_t
5605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_u32(uint32x4_t __a,uint32x4_t __b)5606 __arm_veorq_u32 (uint32x4_t __a, uint32x4_t __b)
5607 {
5608   return __builtin_mve_veorq_uv4si (__a, __b);
5609 }
5610 
5611 __extension__ extern __inline mve_pred16_t
5612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_n_u32(uint32x4_t __a,uint32_t __b)5613 __arm_vcmpneq_n_u32 (uint32x4_t __a, uint32_t __b)
5614 {
5615   return __builtin_mve_vcmpneq_n_uv4si (__a, __b);
5616 }
5617 
5618 __extension__ extern __inline mve_pred16_t
5619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_u32(uint32x4_t __a,uint32x4_t __b)5620 __arm_vcmphiq_u32 (uint32x4_t __a, uint32x4_t __b)
5621 {
5622   return __builtin_mve_vcmphiq_uv4si (__a, __b);
5623 }
5624 
5625 __extension__ extern __inline mve_pred16_t
5626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_n_u32(uint32x4_t __a,uint32_t __b)5627 __arm_vcmphiq_n_u32 (uint32x4_t __a, uint32_t __b)
5628 {
5629   return __builtin_mve_vcmphiq_n_uv4si (__a, __b);
5630 }
5631 
5632 __extension__ extern __inline mve_pred16_t
5633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_u32(uint32x4_t __a,uint32x4_t __b)5634 __arm_vcmpeqq_u32 (uint32x4_t __a, uint32x4_t __b)
5635 {
5636   return __builtin_mve_vcmpeqq_uv4si (__a, __b);
5637 }
5638 
5639 __extension__ extern __inline mve_pred16_t
5640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_n_u32(uint32x4_t __a,uint32_t __b)5641 __arm_vcmpeqq_n_u32 (uint32x4_t __a, uint32_t __b)
5642 {
5643   return __builtin_mve_vcmpeqq_n_uv4si (__a, __b);
5644 }
5645 
5646 __extension__ extern __inline mve_pred16_t
5647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_u32(uint32x4_t __a,uint32x4_t __b)5648 __arm_vcmpcsq_u32 (uint32x4_t __a, uint32x4_t __b)
5649 {
5650   return __builtin_mve_vcmpcsq_uv4si (__a, __b);
5651 }
5652 
5653 __extension__ extern __inline mve_pred16_t
5654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_n_u32(uint32x4_t __a,uint32_t __b)5655 __arm_vcmpcsq_n_u32 (uint32x4_t __a, uint32_t __b)
5656 {
5657   return __builtin_mve_vcmpcsq_n_uv4si (__a, __b);
5658 }
5659 
5660 __extension__ extern __inline uint32x4_t
5661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_u32(uint32x4_t __a,uint32x4_t __b)5662 __arm_vcaddq_rot90_u32 (uint32x4_t __a, uint32x4_t __b)
5663 {
5664   return __builtin_mve_vcaddq_rot90_uv4si (__a, __b);
5665 }
5666 
5667 __extension__ extern __inline uint32x4_t
5668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_u32(uint32x4_t __a,uint32x4_t __b)5669 __arm_vcaddq_rot270_u32 (uint32x4_t __a, uint32x4_t __b)
5670 {
5671   return __builtin_mve_vcaddq_rot270_uv4si (__a, __b);
5672 }
5673 
5674 __extension__ extern __inline uint32x4_t
5675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_u32(uint32x4_t __a,uint32x4_t __b)5676 __arm_vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
5677 {
5678   return __builtin_mve_vbicq_uv4si (__a, __b);
5679 }
5680 
5681 __extension__ extern __inline uint32x4_t
5682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_u32(uint32x4_t __a,uint32x4_t __b)5683 __arm_vandq_u32 (uint32x4_t __a, uint32x4_t __b)
5684 {
5685   return __builtin_mve_vandq_uv4si (__a, __b);
5686 }
5687 
5688 __extension__ extern __inline uint32_t
5689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p_u32(uint32x4_t __a,mve_pred16_t __p)5690 __arm_vaddvq_p_u32 (uint32x4_t __a, mve_pred16_t __p)
5691 {
5692   return __builtin_mve_vaddvq_p_uv4si (__a, __p);
5693 }
5694 
5695 __extension__ extern __inline uint32_t
5696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_u32(uint32_t __a,uint32x4_t __b)5697 __arm_vaddvaq_u32 (uint32_t __a, uint32x4_t __b)
5698 {
5699   return __builtin_mve_vaddvaq_uv4si (__a, __b);
5700 }
5701 
5702 __extension__ extern __inline uint32x4_t
5703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_n_u32(uint32x4_t __a,uint32_t __b)5704 __arm_vaddq_n_u32 (uint32x4_t __a, uint32_t __b)
5705 {
5706   return __builtin_mve_vaddq_n_uv4si (__a, __b);
5707 }
5708 
5709 __extension__ extern __inline uint32x4_t
5710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_u32(uint32x4_t __a,uint32x4_t __b)5711 __arm_vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
5712 {
5713   return __builtin_mve_vabdq_uv4si (__a, __b);
5714 }
5715 
5716 __extension__ extern __inline uint32x4_t
5717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r_u32(uint32x4_t __a,int32_t __b)5718 __arm_vshlq_r_u32 (uint32x4_t __a, int32_t __b)
5719 {
5720   return __builtin_mve_vshlq_r_uv4si (__a, __b);
5721 }
5722 
5723 __extension__ extern __inline uint32x4_t
5724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_u32(uint32x4_t __a,int32x4_t __b)5725 __arm_vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
5726 {
5727   return __builtin_mve_vrshlq_uv4si (__a, __b);
5728 }
5729 
5730 __extension__ extern __inline uint32x4_t
5731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_n_u32(uint32x4_t __a,int32_t __b)5732 __arm_vrshlq_n_u32 (uint32x4_t __a, int32_t __b)
5733 {
5734   return __builtin_mve_vrshlq_n_uv4si (__a, __b);
5735 }
5736 
5737 __extension__ extern __inline uint32x4_t
5738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_u32(uint32x4_t __a,int32x4_t __b)5739 __arm_vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
5740 {
5741   return __builtin_mve_vqshlq_uv4si (__a, __b);
5742 }
5743 
5744 __extension__ extern __inline uint32x4_t
5745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r_u32(uint32x4_t __a,int32_t __b)5746 __arm_vqshlq_r_u32 (uint32x4_t __a, int32_t __b)
5747 {
5748   return __builtin_mve_vqshlq_r_uv4si (__a, __b);
5749 }
5750 
5751 __extension__ extern __inline uint32x4_t
5752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_u32(uint32x4_t __a,int32x4_t __b)5753 __arm_vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
5754 {
5755   return __builtin_mve_vqrshlq_uv4si (__a, __b);
5756 }
5757 
5758 __extension__ extern __inline uint32x4_t
5759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_n_u32(uint32x4_t __a,int32_t __b)5760 __arm_vqrshlq_n_u32 (uint32x4_t __a, int32_t __b)
5761 {
5762   return __builtin_mve_vqrshlq_n_uv4si (__a, __b);
5763 }
5764 
5765 __extension__ extern __inline uint32_t
5766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_s32(uint32_t __a,int32x4_t __b)5767 __arm_vminavq_s32 (uint32_t __a, int32x4_t __b)
5768 {
5769   return __builtin_mve_vminavq_sv4si (__a, __b);
5770 }
5771 
5772 __extension__ extern __inline uint32x4_t
5773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq_s32(uint32x4_t __a,int32x4_t __b)5774 __arm_vminaq_s32 (uint32x4_t __a, int32x4_t __b)
5775 {
5776   return __builtin_mve_vminaq_sv4si (__a, __b);
5777 }
5778 
5779 __extension__ extern __inline uint32_t
5780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq_s32(uint32_t __a,int32x4_t __b)5781 __arm_vmaxavq_s32 (uint32_t __a, int32x4_t __b)
5782 {
5783   return __builtin_mve_vmaxavq_sv4si (__a, __b);
5784 }
5785 
5786 __extension__ extern __inline uint32x4_t
5787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq_s32(uint32x4_t __a,int32x4_t __b)5788 __arm_vmaxaq_s32 (uint32x4_t __a, int32x4_t __b)
5789 {
5790   return __builtin_mve_vmaxaq_sv4si (__a, __b);
5791 }
5792 
5793 __extension__ extern __inline uint32x4_t
5794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_n_u32(uint32x4_t __a,int32_t __b)5795 __arm_vbrsrq_n_u32 (uint32x4_t __a, int32_t __b)
5796 {
5797   return __builtin_mve_vbrsrq_n_uv4si (__a, __b);
5798 }
5799 
5800 __extension__ extern __inline uint32x4_t
5801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n_u32(uint32x4_t __a,const int __imm)5802 __arm_vshlq_n_u32 (uint32x4_t __a, const int __imm)
5803 {
5804   return __builtin_mve_vshlq_n_uv4si (__a, __imm);
5805 }
5806 
5807 __extension__ extern __inline uint32x4_t
5808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_n_u32(uint32x4_t __a,const int __imm)5809 __arm_vrshrq_n_u32 (uint32x4_t __a, const int __imm)
5810 {
5811   return __builtin_mve_vrshrq_n_uv4si (__a, __imm);
5812 }
5813 
5814 __extension__ extern __inline uint32x4_t
5815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n_u32(uint32x4_t __a,const int __imm)5816 __arm_vqshlq_n_u32 (uint32x4_t __a, const int __imm)
5817 {
5818   return __builtin_mve_vqshlq_n_uv4si (__a, __imm);
5819 }
5820 
5821 __extension__ extern __inline mve_pred16_t
5822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_n_s32(int32x4_t __a,int32_t __b)5823 __arm_vcmpneq_n_s32 (int32x4_t __a, int32_t __b)
5824 {
5825   return __builtin_mve_vcmpneq_n_sv4si (__a, __b);
5826 }
5827 
5828 __extension__ extern __inline mve_pred16_t
5829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_s32(int32x4_t __a,int32x4_t __b)5830 __arm_vcmpltq_s32 (int32x4_t __a, int32x4_t __b)
5831 {
5832   return __builtin_mve_vcmpltq_sv4si (__a, __b);
5833 }
5834 
5835 __extension__ extern __inline mve_pred16_t
5836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_n_s32(int32x4_t __a,int32_t __b)5837 __arm_vcmpltq_n_s32 (int32x4_t __a, int32_t __b)
5838 {
5839   return __builtin_mve_vcmpltq_n_sv4si (__a, __b);
5840 }
5841 
5842 __extension__ extern __inline mve_pred16_t
5843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_s32(int32x4_t __a,int32x4_t __b)5844 __arm_vcmpleq_s32 (int32x4_t __a, int32x4_t __b)
5845 {
5846   return __builtin_mve_vcmpleq_sv4si (__a, __b);
5847 }
5848 
5849 __extension__ extern __inline mve_pred16_t
5850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_n_s32(int32x4_t __a,int32_t __b)5851 __arm_vcmpleq_n_s32 (int32x4_t __a, int32_t __b)
5852 {
5853   return __builtin_mve_vcmpleq_n_sv4si (__a, __b);
5854 }
5855 
5856 __extension__ extern __inline mve_pred16_t
5857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_s32(int32x4_t __a,int32x4_t __b)5858 __arm_vcmpgtq_s32 (int32x4_t __a, int32x4_t __b)
5859 {
5860   return __builtin_mve_vcmpgtq_sv4si (__a, __b);
5861 }
5862 
5863 __extension__ extern __inline mve_pred16_t
5864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_n_s32(int32x4_t __a,int32_t __b)5865 __arm_vcmpgtq_n_s32 (int32x4_t __a, int32_t __b)
5866 {
5867   return __builtin_mve_vcmpgtq_n_sv4si (__a, __b);
5868 }
5869 
5870 __extension__ extern __inline mve_pred16_t
5871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_s32(int32x4_t __a,int32x4_t __b)5872 __arm_vcmpgeq_s32 (int32x4_t __a, int32x4_t __b)
5873 {
5874   return __builtin_mve_vcmpgeq_sv4si (__a, __b);
5875 }
5876 
5877 __extension__ extern __inline mve_pred16_t
5878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_n_s32(int32x4_t __a,int32_t __b)5879 __arm_vcmpgeq_n_s32 (int32x4_t __a, int32_t __b)
5880 {
5881   return __builtin_mve_vcmpgeq_n_sv4si (__a, __b);
5882 }
5883 
5884 __extension__ extern __inline mve_pred16_t
5885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_s32(int32x4_t __a,int32x4_t __b)5886 __arm_vcmpeqq_s32 (int32x4_t __a, int32x4_t __b)
5887 {
5888   return __builtin_mve_vcmpeqq_sv4si (__a, __b);
5889 }
5890 
5891 __extension__ extern __inline mve_pred16_t
5892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_n_s32(int32x4_t __a,int32_t __b)5893 __arm_vcmpeqq_n_s32 (int32x4_t __a, int32_t __b)
5894 {
5895   return __builtin_mve_vcmpeqq_n_sv4si (__a, __b);
5896 }
5897 
5898 __extension__ extern __inline uint32x4_t
5899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq_n_s32(int32x4_t __a,const int __imm)5900 __arm_vqshluq_n_s32 (int32x4_t __a, const int __imm)
5901 {
5902   return __builtin_mve_vqshluq_n_sv4si (__a, __imm);
5903 }
5904 
5905 __extension__ extern __inline int32_t
5906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p_s32(int32x4_t __a,mve_pred16_t __p)5907 __arm_vaddvq_p_s32 (int32x4_t __a, mve_pred16_t __p)
5908 {
5909   return __builtin_mve_vaddvq_p_sv4si (__a, __p);
5910 }
5911 
5912 __extension__ extern __inline int32x4_t
5913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_s32(int32x4_t __a,int32x4_t __b)5914 __arm_vsubq_s32 (int32x4_t __a, int32x4_t __b)
5915 {
5916   return __builtin_mve_vsubq_sv4si (__a, __b);
5917 }
5918 
5919 __extension__ extern __inline int32x4_t
5920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_n_s32(int32x4_t __a,int32_t __b)5921 __arm_vsubq_n_s32 (int32x4_t __a, int32_t __b)
5922 {
5923   return __builtin_mve_vsubq_n_sv4si (__a, __b);
5924 }
5925 
5926 __extension__ extern __inline int32x4_t
5927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r_s32(int32x4_t __a,int32_t __b)5928 __arm_vshlq_r_s32 (int32x4_t __a, int32_t __b)
5929 {
5930   return __builtin_mve_vshlq_r_sv4si (__a, __b);
5931 }
5932 
5933 __extension__ extern __inline int32x4_t
5934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_s32(int32x4_t __a,int32x4_t __b)5935 __arm_vrshlq_s32 (int32x4_t __a, int32x4_t __b)
5936 {
5937   return __builtin_mve_vrshlq_sv4si (__a, __b);
5938 }
5939 
5940 __extension__ extern __inline int32x4_t
5941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_n_s32(int32x4_t __a,int32_t __b)5942 __arm_vrshlq_n_s32 (int32x4_t __a, int32_t __b)
5943 {
5944   return __builtin_mve_vrshlq_n_sv4si (__a, __b);
5945 }
5946 
5947 __extension__ extern __inline int32x4_t
5948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_s32(int32x4_t __a,int32x4_t __b)5949 __arm_vrmulhq_s32 (int32x4_t __a, int32x4_t __b)
5950 {
5951   return __builtin_mve_vrmulhq_sv4si (__a, __b);
5952 }
5953 
5954 __extension__ extern __inline int32x4_t
5955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_s32(int32x4_t __a,int32x4_t __b)5956 __arm_vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
5957 {
5958   return __builtin_mve_vrhaddq_sv4si (__a, __b);
5959 }
5960 
5961 __extension__ extern __inline int32x4_t
5962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_s32(int32x4_t __a,int32x4_t __b)5963 __arm_vqsubq_s32 (int32x4_t __a, int32x4_t __b)
5964 {
5965   return __builtin_mve_vqsubq_sv4si (__a, __b);
5966 }
5967 
5968 __extension__ extern __inline int32x4_t
5969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_n_s32(int32x4_t __a,int32_t __b)5970 __arm_vqsubq_n_s32 (int32x4_t __a, int32_t __b)
5971 {
5972   return __builtin_mve_vqsubq_n_sv4si (__a, __b);
5973 }
5974 
5975 __extension__ extern __inline int32x4_t
5976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_s32(int32x4_t __a,int32x4_t __b)5977 __arm_vqshlq_s32 (int32x4_t __a, int32x4_t __b)
5978 {
5979   return __builtin_mve_vqshlq_sv4si (__a, __b);
5980 }
5981 
5982 __extension__ extern __inline int32x4_t
5983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r_s32(int32x4_t __a,int32_t __b)5984 __arm_vqshlq_r_s32 (int32x4_t __a, int32_t __b)
5985 {
5986   return __builtin_mve_vqshlq_r_sv4si (__a, __b);
5987 }
5988 
5989 __extension__ extern __inline int32x4_t
5990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_s32(int32x4_t __a,int32x4_t __b)5991 __arm_vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
5992 {
5993   return __builtin_mve_vqrshlq_sv4si (__a, __b);
5994 }
5995 
5996 __extension__ extern __inline int32x4_t
5997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_n_s32(int32x4_t __a,int32_t __b)5998 __arm_vqrshlq_n_s32 (int32x4_t __a, int32_t __b)
5999 {
6000   return __builtin_mve_vqrshlq_n_sv4si (__a, __b);
6001 }
6002 
6003 __extension__ extern __inline int32x4_t
6004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_s32(int32x4_t __a,int32x4_t __b)6005 __arm_vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
6006 {
6007   return __builtin_mve_vqrdmulhq_sv4si (__a, __b);
6008 }
6009 
6010 __extension__ extern __inline int32x4_t
6011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_n_s32(int32x4_t __a,int32_t __b)6012 __arm_vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
6013 {
6014   return __builtin_mve_vqrdmulhq_n_sv4si (__a, __b);
6015 }
6016 
6017 __extension__ extern __inline int32x4_t
6018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_s32(int32x4_t __a,int32x4_t __b)6019 __arm_vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
6020 {
6021   return __builtin_mve_vqdmulhq_sv4si (__a, __b);
6022 }
6023 
6024 __extension__ extern __inline int32x4_t
6025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_n_s32(int32x4_t __a,int32_t __b)6026 __arm_vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
6027 {
6028   return __builtin_mve_vqdmulhq_n_sv4si (__a, __b);
6029 }
6030 
6031 __extension__ extern __inline int32x4_t
6032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_s32(int32x4_t __a,int32x4_t __b)6033 __arm_vqaddq_s32 (int32x4_t __a, int32x4_t __b)
6034 {
6035   return __builtin_mve_vqaddq_sv4si (__a, __b);
6036 }
6037 
6038 __extension__ extern __inline int32x4_t
6039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_n_s32(int32x4_t __a,int32_t __b)6040 __arm_vqaddq_n_s32 (int32x4_t __a, int32_t __b)
6041 {
6042   return __builtin_mve_vqaddq_n_sv4si (__a, __b);
6043 }
6044 
6045 __extension__ extern __inline int32x4_t
6046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_s32(int32x4_t __a,int32x4_t __b)6047 __arm_vorrq_s32 (int32x4_t __a, int32x4_t __b)
6048 {
6049   return __builtin_mve_vorrq_sv4si (__a, __b);
6050 }
6051 
6052 __extension__ extern __inline int32x4_t
6053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_s32(int32x4_t __a,int32x4_t __b)6054 __arm_vornq_s32 (int32x4_t __a, int32x4_t __b)
6055 {
6056   return __builtin_mve_vornq_sv4si (__a, __b);
6057 }
6058 
6059 __extension__ extern __inline int32x4_t
6060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_s32(int32x4_t __a,int32x4_t __b)6061 __arm_vmulq_s32 (int32x4_t __a, int32x4_t __b)
6062 {
6063   return __builtin_mve_vmulq_sv4si (__a, __b);
6064 }
6065 
6066 __extension__ extern __inline int32x4_t
6067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_n_s32(int32x4_t __a,int32_t __b)6068 __arm_vmulq_n_s32 (int32x4_t __a, int32_t __b)
6069 {
6070   return __builtin_mve_vmulq_n_sv4si (__a, __b);
6071 }
6072 
6073 __extension__ extern __inline int64x2_t
6074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_s32(int32x4_t __a,int32x4_t __b)6075 __arm_vmulltq_int_s32 (int32x4_t __a, int32x4_t __b)
6076 {
6077   return __builtin_mve_vmulltq_int_sv4si (__a, __b);
6078 }
6079 
6080 __extension__ extern __inline int64x2_t
6081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_s32(int32x4_t __a,int32x4_t __b)6082 __arm_vmullbq_int_s32 (int32x4_t __a, int32x4_t __b)
6083 {
6084   return __builtin_mve_vmullbq_int_sv4si (__a, __b);
6085 }
6086 
6087 __extension__ extern __inline int32x4_t
6088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_s32(int32x4_t __a,int32x4_t __b)6089 __arm_vmulhq_s32 (int32x4_t __a, int32x4_t __b)
6090 {
6091   return __builtin_mve_vmulhq_sv4si (__a, __b);
6092 }
6093 
6094 __extension__ extern __inline int32_t
6095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq_s32(int32x4_t __a,int32x4_t __b)6096 __arm_vmlsdavxq_s32 (int32x4_t __a, int32x4_t __b)
6097 {
6098   return __builtin_mve_vmlsdavxq_sv4si (__a, __b);
6099 }
6100 
6101 __extension__ extern __inline int32_t
6102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq_s32(int32x4_t __a,int32x4_t __b)6103 __arm_vmlsdavq_s32 (int32x4_t __a, int32x4_t __b)
6104 {
6105   return __builtin_mve_vmlsdavq_sv4si (__a, __b);
6106 }
6107 
6108 __extension__ extern __inline int32_t
6109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq_s32(int32x4_t __a,int32x4_t __b)6110 __arm_vmladavxq_s32 (int32x4_t __a, int32x4_t __b)
6111 {
6112   return __builtin_mve_vmladavxq_sv4si (__a, __b);
6113 }
6114 
6115 __extension__ extern __inline int32_t
6116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_s32(int32x4_t __a,int32x4_t __b)6117 __arm_vmladavq_s32 (int32x4_t __a, int32x4_t __b)
6118 {
6119   return __builtin_mve_vmladavq_sv4si (__a, __b);
6120 }
6121 
6122 __extension__ extern __inline int32_t
6123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_s32(int32_t __a,int32x4_t __b)6124 __arm_vminvq_s32 (int32_t __a, int32x4_t __b)
6125 {
6126   return __builtin_mve_vminvq_sv4si (__a, __b);
6127 }
6128 
6129 __extension__ extern __inline int32x4_t
6130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_s32(int32x4_t __a,int32x4_t __b)6131 __arm_vminq_s32 (int32x4_t __a, int32x4_t __b)
6132 {
6133   return __builtin_mve_vminq_sv4si (__a, __b);
6134 }
6135 
6136 __extension__ extern __inline int32_t
6137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_s32(int32_t __a,int32x4_t __b)6138 __arm_vmaxvq_s32 (int32_t __a, int32x4_t __b)
6139 {
6140   return __builtin_mve_vmaxvq_sv4si (__a, __b);
6141 }
6142 
6143 __extension__ extern __inline int32x4_t
6144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_s32(int32x4_t __a,int32x4_t __b)6145 __arm_vmaxq_s32 (int32x4_t __a, int32x4_t __b)
6146 {
6147   return __builtin_mve_vmaxq_sv4si (__a, __b);
6148 }
6149 
6150 __extension__ extern __inline int32x4_t
6151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_s32(int32x4_t __a,int32x4_t __b)6152 __arm_vhsubq_s32 (int32x4_t __a, int32x4_t __b)
6153 {
6154   return __builtin_mve_vhsubq_sv4si (__a, __b);
6155 }
6156 
6157 __extension__ extern __inline int32x4_t
6158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_n_s32(int32x4_t __a,int32_t __b)6159 __arm_vhsubq_n_s32 (int32x4_t __a, int32_t __b)
6160 {
6161   return __builtin_mve_vhsubq_n_sv4si (__a, __b);
6162 }
6163 
6164 __extension__ extern __inline int32x4_t
6165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_s32(int32x4_t __a,int32x4_t __b)6166 __arm_vhcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
6167 {
6168   return __builtin_mve_vhcaddq_rot90_sv4si (__a, __b);
6169 }
6170 
6171 __extension__ extern __inline int32x4_t
6172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_s32(int32x4_t __a,int32x4_t __b)6173 __arm_vhcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b)
6174 {
6175   return __builtin_mve_vhcaddq_rot270_sv4si (__a, __b);
6176 }
6177 
6178 __extension__ extern __inline int32x4_t
6179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_s32(int32x4_t __a,int32x4_t __b)6180 __arm_vhaddq_s32 (int32x4_t __a, int32x4_t __b)
6181 {
6182   return __builtin_mve_vhaddq_sv4si (__a, __b);
6183 }
6184 
6185 __extension__ extern __inline int32x4_t
6186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_n_s32(int32x4_t __a,int32_t __b)6187 __arm_vhaddq_n_s32 (int32x4_t __a, int32_t __b)
6188 {
6189   return __builtin_mve_vhaddq_n_sv4si (__a, __b);
6190 }
6191 
6192 __extension__ extern __inline int32x4_t
6193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_s32(int32x4_t __a,int32x4_t __b)6194 __arm_veorq_s32 (int32x4_t __a, int32x4_t __b)
6195 {
6196   return __builtin_mve_veorq_sv4si (__a, __b);
6197 }
6198 
6199 __extension__ extern __inline int32x4_t
6200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_s32(int32x4_t __a,int32x4_t __b)6201 __arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
6202 {
6203   return __builtin_mve_vcaddq_rot90_sv4si (__a, __b);
6204 }
6205 
6206 __extension__ extern __inline int32x4_t
6207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_s32(int32x4_t __a,int32x4_t __b)6208 __arm_vcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b)
6209 {
6210   return __builtin_mve_vcaddq_rot270_sv4si (__a, __b);
6211 }
6212 
6213 __extension__ extern __inline int32x4_t
6214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_n_s32(int32x4_t __a,int32_t __b)6215 __arm_vbrsrq_n_s32 (int32x4_t __a, int32_t __b)
6216 {
6217   return __builtin_mve_vbrsrq_n_sv4si (__a, __b);
6218 }
6219 
6220 __extension__ extern __inline int32x4_t
6221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_s32(int32x4_t __a,int32x4_t __b)6222 __arm_vbicq_s32 (int32x4_t __a, int32x4_t __b)
6223 {
6224   return __builtin_mve_vbicq_sv4si (__a, __b);
6225 }
6226 
6227 __extension__ extern __inline int32x4_t
6228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_s32(int32x4_t __a,int32x4_t __b)6229 __arm_vandq_s32 (int32x4_t __a, int32x4_t __b)
6230 {
6231   return __builtin_mve_vandq_sv4si (__a, __b);
6232 }
6233 
6234 __extension__ extern __inline int32_t
6235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_s32(int32_t __a,int32x4_t __b)6236 __arm_vaddvaq_s32 (int32_t __a, int32x4_t __b)
6237 {
6238   return __builtin_mve_vaddvaq_sv4si (__a, __b);
6239 }
6240 
6241 __extension__ extern __inline int32x4_t
6242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_n_s32(int32x4_t __a,int32_t __b)6243 __arm_vaddq_n_s32 (int32x4_t __a, int32_t __b)
6244 {
6245   return __builtin_mve_vaddq_n_sv4si (__a, __b);
6246 }
6247 
6248 __extension__ extern __inline int32x4_t
6249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_s32(int32x4_t __a,int32x4_t __b)6250 __arm_vabdq_s32 (int32x4_t __a, int32x4_t __b)
6251 {
6252   return __builtin_mve_vabdq_sv4si (__a, __b);
6253 }
6254 
6255 __extension__ extern __inline int32x4_t
6256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n_s32(int32x4_t __a,const int __imm)6257 __arm_vshlq_n_s32 (int32x4_t __a, const int __imm)
6258 {
6259   return __builtin_mve_vshlq_n_sv4si (__a, __imm);
6260 }
6261 
6262 __extension__ extern __inline int32x4_t
6263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_n_s32(int32x4_t __a,const int __imm)6264 __arm_vrshrq_n_s32 (int32x4_t __a, const int __imm)
6265 {
6266   return __builtin_mve_vrshrq_n_sv4si (__a, __imm);
6267 }
6268 
6269 __extension__ extern __inline int32x4_t
6270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n_s32(int32x4_t __a,const int __imm)6271 __arm_vqshlq_n_s32 (int32x4_t __a, const int __imm)
6272 {
6273   return __builtin_mve_vqshlq_n_sv4si (__a, __imm);
6274 }
6275 
6276 __extension__ extern __inline uint8x16_t
6277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_u16(uint8x16_t __a,uint16x8_t __b)6278 __arm_vqmovntq_u16 (uint8x16_t __a, uint16x8_t __b)
6279 {
6280   return __builtin_mve_vqmovntq_uv8hi (__a, __b);
6281 }
6282 
6283 __extension__ extern __inline uint8x16_t
6284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_u16(uint8x16_t __a,uint16x8_t __b)6285 __arm_vqmovnbq_u16 (uint8x16_t __a, uint16x8_t __b)
6286 {
6287   return __builtin_mve_vqmovnbq_uv8hi (__a, __b);
6288 }
6289 
6290 __extension__ extern __inline uint16x8_t
6291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_p8(uint8x16_t __a,uint8x16_t __b)6292 __arm_vmulltq_poly_p8 (uint8x16_t __a, uint8x16_t __b)
6293 {
6294   return __builtin_mve_vmulltq_poly_pv16qi (__a, __b);
6295 }
6296 
6297 __extension__ extern __inline uint16x8_t
6298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_p8(uint8x16_t __a,uint8x16_t __b)6299 __arm_vmullbq_poly_p8 (uint8x16_t __a, uint8x16_t __b)
6300 {
6301   return __builtin_mve_vmullbq_poly_pv16qi (__a, __b);
6302 }
6303 
6304 __extension__ extern __inline uint8x16_t
6305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_u16(uint8x16_t __a,uint16x8_t __b)6306 __arm_vmovntq_u16 (uint8x16_t __a, uint16x8_t __b)
6307 {
6308   return __builtin_mve_vmovntq_uv8hi (__a, __b);
6309 }
6310 
6311 __extension__ extern __inline uint8x16_t
6312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_u16(uint8x16_t __a,uint16x8_t __b)6313 __arm_vmovnbq_u16 (uint8x16_t __a, uint16x8_t __b)
6314 {
6315   return __builtin_mve_vmovnbq_uv8hi (__a, __b);
6316 }
6317 
6318 __extension__ extern __inline uint64_t
6319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_u16(uint16x8_t __a,uint16x8_t __b)6320 __arm_vmlaldavq_u16 (uint16x8_t __a, uint16x8_t __b)
6321 {
6322   return __builtin_mve_vmlaldavq_uv8hi (__a, __b);
6323 }
6324 
6325 __extension__ extern __inline uint8x16_t
6326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovuntq_s16(uint8x16_t __a,int16x8_t __b)6327 __arm_vqmovuntq_s16 (uint8x16_t __a, int16x8_t __b)
6328 {
6329   return __builtin_mve_vqmovuntq_sv8hi (__a, __b);
6330 }
6331 
6332 __extension__ extern __inline uint8x16_t
6333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovunbq_s16(uint8x16_t __a,int16x8_t __b)6334 __arm_vqmovunbq_s16 (uint8x16_t __a, int16x8_t __b)
6335 {
6336   return __builtin_mve_vqmovunbq_sv8hi (__a, __b);
6337 }
6338 
6339 __extension__ extern __inline uint16x8_t
6340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_n_u8(uint8x16_t __a,const int __imm)6341 __arm_vshlltq_n_u8 (uint8x16_t __a, const int __imm)
6342 {
6343   return __builtin_mve_vshlltq_n_uv16qi (__a, __imm);
6344 }
6345 
6346 __extension__ extern __inline uint16x8_t
6347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_n_u8(uint8x16_t __a,const int __imm)6348 __arm_vshllbq_n_u8 (uint8x16_t __a, const int __imm)
6349 {
6350   return __builtin_mve_vshllbq_n_uv16qi (__a, __imm);
6351 }
6352 
6353 __extension__ extern __inline uint16x8_t
6354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_n_u16(uint16x8_t __a,const int __imm)6355 __arm_vorrq_n_u16 (uint16x8_t __a, const int __imm)
6356 {
6357   return __builtin_mve_vorrq_n_uv8hi (__a, __imm);
6358 }
6359 
6360 __extension__ extern __inline uint16x8_t
6361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_n_u16(uint16x8_t __a,const int __imm)6362 __arm_vbicq_n_u16 (uint16x8_t __a, const int __imm)
6363 {
6364   return __builtin_mve_vbicq_n_uv8hi (__a, __imm);
6365 }
6366 
6367 __extension__ extern __inline int8x16_t
6368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_s16(int8x16_t __a,int16x8_t __b)6369 __arm_vqmovntq_s16 (int8x16_t __a, int16x8_t __b)
6370 {
6371   return __builtin_mve_vqmovntq_sv8hi (__a, __b);
6372 }
6373 
6374 __extension__ extern __inline int8x16_t
6375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_s16(int8x16_t __a,int16x8_t __b)6376 __arm_vqmovnbq_s16 (int8x16_t __a, int16x8_t __b)
6377 {
6378   return __builtin_mve_vqmovnbq_sv8hi (__a, __b);
6379 }
6380 
6381 __extension__ extern __inline int32x4_t
6382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_s16(int16x8_t __a,int16x8_t __b)6383 __arm_vqdmulltq_s16 (int16x8_t __a, int16x8_t __b)
6384 {
6385   return __builtin_mve_vqdmulltq_sv8hi (__a, __b);
6386 }
6387 
6388 __extension__ extern __inline int32x4_t
6389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_n_s16(int16x8_t __a,int16_t __b)6390 __arm_vqdmulltq_n_s16 (int16x8_t __a, int16_t __b)
6391 {
6392   return __builtin_mve_vqdmulltq_n_sv8hi (__a, __b);
6393 }
6394 
6395 __extension__ extern __inline int32x4_t
6396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_s16(int16x8_t __a,int16x8_t __b)6397 __arm_vqdmullbq_s16 (int16x8_t __a, int16x8_t __b)
6398 {
6399   return __builtin_mve_vqdmullbq_sv8hi (__a, __b);
6400 }
6401 
6402 __extension__ extern __inline int32x4_t
6403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_n_s16(int16x8_t __a,int16_t __b)6404 __arm_vqdmullbq_n_s16 (int16x8_t __a, int16_t __b)
6405 {
6406   return __builtin_mve_vqdmullbq_n_sv8hi (__a, __b);
6407 }
6408 
6409 __extension__ extern __inline int8x16_t
6410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_s16(int8x16_t __a,int16x8_t __b)6411 __arm_vmovntq_s16 (int8x16_t __a, int16x8_t __b)
6412 {
6413   return __builtin_mve_vmovntq_sv8hi (__a, __b);
6414 }
6415 
6416 __extension__ extern __inline int8x16_t
6417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_s16(int8x16_t __a,int16x8_t __b)6418 __arm_vmovnbq_s16 (int8x16_t __a, int16x8_t __b)
6419 {
6420   return __builtin_mve_vmovnbq_sv8hi (__a, __b);
6421 }
6422 
6423 __extension__ extern __inline int64_t
6424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavxq_s16(int16x8_t __a,int16x8_t __b)6425 __arm_vmlsldavxq_s16 (int16x8_t __a, int16x8_t __b)
6426 {
6427   return __builtin_mve_vmlsldavxq_sv8hi (__a, __b);
6428 }
6429 
6430 __extension__ extern __inline int64_t
6431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavq_s16(int16x8_t __a,int16x8_t __b)6432 __arm_vmlsldavq_s16 (int16x8_t __a, int16x8_t __b)
6433 {
6434   return __builtin_mve_vmlsldavq_sv8hi (__a, __b);
6435 }
6436 
6437 __extension__ extern __inline int64_t
6438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavxq_s16(int16x8_t __a,int16x8_t __b)6439 __arm_vmlaldavxq_s16 (int16x8_t __a, int16x8_t __b)
6440 {
6441   return __builtin_mve_vmlaldavxq_sv8hi (__a, __b);
6442 }
6443 
6444 __extension__ extern __inline int64_t
6445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_s16(int16x8_t __a,int16x8_t __b)6446 __arm_vmlaldavq_s16 (int16x8_t __a, int16x8_t __b)
6447 {
6448   return __builtin_mve_vmlaldavq_sv8hi (__a, __b);
6449 }
6450 
6451 __extension__ extern __inline int16x8_t
6452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_n_s8(int8x16_t __a,const int __imm)6453 __arm_vshlltq_n_s8 (int8x16_t __a, const int __imm)
6454 {
6455   return __builtin_mve_vshlltq_n_sv16qi (__a, __imm);
6456 }
6457 
6458 __extension__ extern __inline int16x8_t
6459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_n_s8(int8x16_t __a,const int __imm)6460 __arm_vshllbq_n_s8 (int8x16_t __a, const int __imm)
6461 {
6462   return __builtin_mve_vshllbq_n_sv16qi (__a, __imm);
6463 }
6464 
6465 __extension__ extern __inline int16x8_t
6466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_n_s16(int16x8_t __a,const int __imm)6467 __arm_vorrq_n_s16 (int16x8_t __a, const int __imm)
6468 {
6469   return __builtin_mve_vorrq_n_sv8hi (__a, __imm);
6470 }
6471 
6472 __extension__ extern __inline int16x8_t
6473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_n_s16(int16x8_t __a,const int __imm)6474 __arm_vbicq_n_s16 (int16x8_t __a, const int __imm)
6475 {
6476   return __builtin_mve_vbicq_n_sv8hi (__a, __imm);
6477 }
6478 
6479 __extension__ extern __inline uint16x8_t
6480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_u32(uint16x8_t __a,uint32x4_t __b)6481 __arm_vqmovntq_u32 (uint16x8_t __a, uint32x4_t __b)
6482 {
6483   return __builtin_mve_vqmovntq_uv4si (__a, __b);
6484 }
6485 
6486 __extension__ extern __inline uint16x8_t
6487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_u32(uint16x8_t __a,uint32x4_t __b)6488 __arm_vqmovnbq_u32 (uint16x8_t __a, uint32x4_t __b)
6489 {
6490   return __builtin_mve_vqmovnbq_uv4si (__a, __b);
6491 }
6492 
6493 __extension__ extern __inline uint32x4_t
6494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_p16(uint16x8_t __a,uint16x8_t __b)6495 __arm_vmulltq_poly_p16 (uint16x8_t __a, uint16x8_t __b)
6496 {
6497   return __builtin_mve_vmulltq_poly_pv8hi (__a, __b);
6498 }
6499 
6500 __extension__ extern __inline uint32x4_t
6501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_p16(uint16x8_t __a,uint16x8_t __b)6502 __arm_vmullbq_poly_p16 (uint16x8_t __a, uint16x8_t __b)
6503 {
6504   return __builtin_mve_vmullbq_poly_pv8hi (__a, __b);
6505 }
6506 
6507 __extension__ extern __inline uint16x8_t
6508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_u32(uint16x8_t __a,uint32x4_t __b)6509 __arm_vmovntq_u32 (uint16x8_t __a, uint32x4_t __b)
6510 {
6511   return __builtin_mve_vmovntq_uv4si (__a, __b);
6512 }
6513 
6514 __extension__ extern __inline uint16x8_t
6515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_u32(uint16x8_t __a,uint32x4_t __b)6516 __arm_vmovnbq_u32 (uint16x8_t __a, uint32x4_t __b)
6517 {
6518   return __builtin_mve_vmovnbq_uv4si (__a, __b);
6519 }
6520 
6521 __extension__ extern __inline uint64_t
6522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_u32(uint32x4_t __a,uint32x4_t __b)6523 __arm_vmlaldavq_u32 (uint32x4_t __a, uint32x4_t __b)
6524 {
6525   return __builtin_mve_vmlaldavq_uv4si (__a, __b);
6526 }
6527 
6528 __extension__ extern __inline uint16x8_t
6529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovuntq_s32(uint16x8_t __a,int32x4_t __b)6530 __arm_vqmovuntq_s32 (uint16x8_t __a, int32x4_t __b)
6531 {
6532   return __builtin_mve_vqmovuntq_sv4si (__a, __b);
6533 }
6534 
6535 __extension__ extern __inline uint16x8_t
6536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovunbq_s32(uint16x8_t __a,int32x4_t __b)6537 __arm_vqmovunbq_s32 (uint16x8_t __a, int32x4_t __b)
6538 {
6539   return __builtin_mve_vqmovunbq_sv4si (__a, __b);
6540 }
6541 
6542 __extension__ extern __inline uint32x4_t
6543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_n_u16(uint16x8_t __a,const int __imm)6544 __arm_vshlltq_n_u16 (uint16x8_t __a, const int __imm)
6545 {
6546   return __builtin_mve_vshlltq_n_uv8hi (__a, __imm);
6547 }
6548 
6549 __extension__ extern __inline uint32x4_t
6550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_n_u16(uint16x8_t __a,const int __imm)6551 __arm_vshllbq_n_u16 (uint16x8_t __a, const int __imm)
6552 {
6553   return __builtin_mve_vshllbq_n_uv8hi (__a, __imm);
6554 }
6555 
6556 __extension__ extern __inline uint32x4_t
6557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_n_u32(uint32x4_t __a,const int __imm)6558 __arm_vorrq_n_u32 (uint32x4_t __a, const int __imm)
6559 {
6560   return __builtin_mve_vorrq_n_uv4si (__a, __imm);
6561 }
6562 
6563 __extension__ extern __inline uint32x4_t
6564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_n_u32(uint32x4_t __a,const int __imm)6565 __arm_vbicq_n_u32 (uint32x4_t __a, const int __imm)
6566 {
6567   return __builtin_mve_vbicq_n_uv4si (__a, __imm);
6568 }
6569 
6570 __extension__ extern __inline int16x8_t
6571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_s32(int16x8_t __a,int32x4_t __b)6572 __arm_vqmovntq_s32 (int16x8_t __a, int32x4_t __b)
6573 {
6574   return __builtin_mve_vqmovntq_sv4si (__a, __b);
6575 }
6576 
6577 __extension__ extern __inline int16x8_t
6578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_s32(int16x8_t __a,int32x4_t __b)6579 __arm_vqmovnbq_s32 (int16x8_t __a, int32x4_t __b)
6580 {
6581   return __builtin_mve_vqmovnbq_sv4si (__a, __b);
6582 }
6583 
6584 __extension__ extern __inline int64x2_t
6585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_s32(int32x4_t __a,int32x4_t __b)6586 __arm_vqdmulltq_s32 (int32x4_t __a, int32x4_t __b)
6587 {
6588   return __builtin_mve_vqdmulltq_sv4si (__a, __b);
6589 }
6590 
6591 __extension__ extern __inline int64x2_t
6592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_n_s32(int32x4_t __a,int32_t __b)6593 __arm_vqdmulltq_n_s32 (int32x4_t __a, int32_t __b)
6594 {
6595   return __builtin_mve_vqdmulltq_n_sv4si (__a, __b);
6596 }
6597 
6598 __extension__ extern __inline int64x2_t
6599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_s32(int32x4_t __a,int32x4_t __b)6600 __arm_vqdmullbq_s32 (int32x4_t __a, int32x4_t __b)
6601 {
6602   return __builtin_mve_vqdmullbq_sv4si (__a, __b);
6603 }
6604 
6605 __extension__ extern __inline int64x2_t
6606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_n_s32(int32x4_t __a,int32_t __b)6607 __arm_vqdmullbq_n_s32 (int32x4_t __a, int32_t __b)
6608 {
6609   return __builtin_mve_vqdmullbq_n_sv4si (__a, __b);
6610 }
6611 
6612 __extension__ extern __inline int16x8_t
6613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_s32(int16x8_t __a,int32x4_t __b)6614 __arm_vmovntq_s32 (int16x8_t __a, int32x4_t __b)
6615 {
6616   return __builtin_mve_vmovntq_sv4si (__a, __b);
6617 }
6618 
6619 __extension__ extern __inline int16x8_t
6620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_s32(int16x8_t __a,int32x4_t __b)6621 __arm_vmovnbq_s32 (int16x8_t __a, int32x4_t __b)
6622 {
6623   return __builtin_mve_vmovnbq_sv4si (__a, __b);
6624 }
6625 
6626 __extension__ extern __inline int64_t
6627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavxq_s32(int32x4_t __a,int32x4_t __b)6628 __arm_vmlsldavxq_s32 (int32x4_t __a, int32x4_t __b)
6629 {
6630   return __builtin_mve_vmlsldavxq_sv4si (__a, __b);
6631 }
6632 
6633 __extension__ extern __inline int64_t
6634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavq_s32(int32x4_t __a,int32x4_t __b)6635 __arm_vmlsldavq_s32 (int32x4_t __a, int32x4_t __b)
6636 {
6637   return __builtin_mve_vmlsldavq_sv4si (__a, __b);
6638 }
6639 
6640 __extension__ extern __inline int64_t
6641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavxq_s32(int32x4_t __a,int32x4_t __b)6642 __arm_vmlaldavxq_s32 (int32x4_t __a, int32x4_t __b)
6643 {
6644   return __builtin_mve_vmlaldavxq_sv4si (__a, __b);
6645 }
6646 
6647 __extension__ extern __inline int64_t
6648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_s32(int32x4_t __a,int32x4_t __b)6649 __arm_vmlaldavq_s32 (int32x4_t __a, int32x4_t __b)
6650 {
6651   return __builtin_mve_vmlaldavq_sv4si (__a, __b);
6652 }
6653 
6654 __extension__ extern __inline int32x4_t
6655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_n_s16(int16x8_t __a,const int __imm)6656 __arm_vshlltq_n_s16 (int16x8_t __a, const int __imm)
6657 {
6658   return __builtin_mve_vshlltq_n_sv8hi (__a, __imm);
6659 }
6660 
6661 __extension__ extern __inline int32x4_t
6662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_n_s16(int16x8_t __a,const int __imm)6663 __arm_vshllbq_n_s16 (int16x8_t __a, const int __imm)
6664 {
6665   return __builtin_mve_vshllbq_n_sv8hi (__a, __imm);
6666 }
6667 
6668 __extension__ extern __inline int32x4_t
6669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_n_s32(int32x4_t __a,const int __imm)6670 __arm_vorrq_n_s32 (int32x4_t __a, const int __imm)
6671 {
6672   return __builtin_mve_vorrq_n_sv4si (__a, __imm);
6673 }
6674 
6675 __extension__ extern __inline int32x4_t
6676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_n_s32(int32x4_t __a,const int __imm)6677 __arm_vbicq_n_s32 (int32x4_t __a, const int __imm)
6678 {
6679   return __builtin_mve_vbicq_n_sv4si (__a, __imm);
6680 }
6681 
6682 __extension__ extern __inline uint64_t
6683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhq_u32(uint32x4_t __a,uint32x4_t __b)6684 __arm_vrmlaldavhq_u32 (uint32x4_t __a, uint32x4_t __b)
6685 {
6686   return __builtin_mve_vrmlaldavhq_uv4si (__a, __b);
6687 }
6688 
6689 __extension__ extern __inline mve_pred16_t
6690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vctp8q_m(uint32_t __a,mve_pred16_t __p)6691 __arm_vctp8q_m (uint32_t __a, mve_pred16_t __p)
6692 {
6693   return __builtin_mve_vctp8q_mhi (__a, __p);
6694 }
6695 
6696 __extension__ extern __inline mve_pred16_t
6697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vctp64q_m(uint32_t __a,mve_pred16_t __p)6698 __arm_vctp64q_m (uint32_t __a, mve_pred16_t __p)
6699 {
6700   return __builtin_mve_vctp64q_mhi (__a, __p);
6701 }
6702 
6703 __extension__ extern __inline mve_pred16_t
6704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vctp32q_m(uint32_t __a,mve_pred16_t __p)6705 __arm_vctp32q_m (uint32_t __a, mve_pred16_t __p)
6706 {
6707   return __builtin_mve_vctp32q_mhi (__a, __p);
6708 }
6709 
6710 __extension__ extern __inline mve_pred16_t
6711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vctp16q_m(uint32_t __a,mve_pred16_t __p)6712 __arm_vctp16q_m (uint32_t __a, mve_pred16_t __p)
6713 {
6714   return __builtin_mve_vctp16q_mhi (__a, __p);
6715 }
6716 
6717 __extension__ extern __inline uint64_t
6718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvaq_u32(uint64_t __a,uint32x4_t __b)6719 __arm_vaddlvaq_u32 (uint64_t __a, uint32x4_t __b)
6720 {
6721   return __builtin_mve_vaddlvaq_uv4si (__a, __b);
6722 }
6723 
6724 __extension__ extern __inline int64_t
6725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhxq_s32(int32x4_t __a,int32x4_t __b)6726 __arm_vrmlsldavhxq_s32 (int32x4_t __a, int32x4_t __b)
6727 {
6728   return __builtin_mve_vrmlsldavhxq_sv4si (__a, __b);
6729 }
6730 
6731 __extension__ extern __inline int64_t
6732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhq_s32(int32x4_t __a,int32x4_t __b)6733 __arm_vrmlsldavhq_s32 (int32x4_t __a, int32x4_t __b)
6734 {
6735   return __builtin_mve_vrmlsldavhq_sv4si (__a, __b);
6736 }
6737 
6738 __extension__ extern __inline int64_t
6739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhxq_s32(int32x4_t __a,int32x4_t __b)6740 __arm_vrmlaldavhxq_s32 (int32x4_t __a, int32x4_t __b)
6741 {
6742   return __builtin_mve_vrmlaldavhxq_sv4si (__a, __b);
6743 }
6744 
6745 __extension__ extern __inline int64_t
6746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhq_s32(int32x4_t __a,int32x4_t __b)6747 __arm_vrmlaldavhq_s32 (int32x4_t __a, int32x4_t __b)
6748 {
6749   return __builtin_mve_vrmlaldavhq_sv4si (__a, __b);
6750 }
6751 
6752 __extension__ extern __inline int64_t
6753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvaq_s32(int64_t __a,int32x4_t __b)6754 __arm_vaddlvaq_s32 (int64_t __a, int32x4_t __b)
6755 {
6756   return __builtin_mve_vaddlvaq_sv4si (__a, __b);
6757 }
6758 
6759 __extension__ extern __inline uint32_t
6760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_s8(uint32_t __a,int8x16_t __b,int8x16_t __c)6761 __arm_vabavq_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c)
6762 {
6763   return __builtin_mve_vabavq_sv16qi (__a, __b, __c);
6764 }
6765 
6766 __extension__ extern __inline uint32_t
6767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_s16(uint32_t __a,int16x8_t __b,int16x8_t __c)6768 __arm_vabavq_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c)
6769 {
6770   return __builtin_mve_vabavq_sv8hi (__a, __b, __c);
6771 }
6772 
6773 __extension__ extern __inline uint32_t
6774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_s32(uint32_t __a,int32x4_t __b,int32x4_t __c)6775 __arm_vabavq_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c)
6776 {
6777   return __builtin_mve_vabavq_sv4si (__a, __b, __c);
6778 }
6779 
6780 __extension__ extern __inline uint32_t
6781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_u8(uint32_t __a,uint8x16_t __b,uint8x16_t __c)6782 __arm_vabavq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
6783 {
6784   return __builtin_mve_vabavq_uv16qi(__a, __b, __c);
6785 }
6786 
6787 __extension__ extern __inline uint32_t
6788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_u16(uint32_t __a,uint16x8_t __b,uint16x8_t __c)6789 __arm_vabavq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
6790 {
6791   return __builtin_mve_vabavq_uv8hi(__a, __b, __c);
6792 }
6793 
6794 __extension__ extern __inline uint32_t
6795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_u32(uint32_t __a,uint32x4_t __b,uint32x4_t __c)6796 __arm_vabavq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
6797 {
6798   return __builtin_mve_vabavq_uv4si(__a, __b, __c);
6799 }
6800 
6801 __extension__ extern __inline int16x8_t
6802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_n_s16(int16x8_t __a,const int __imm,mve_pred16_t __p)6803 __arm_vbicq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
6804 {
6805   return __builtin_mve_vbicq_m_n_sv8hi (__a, __imm, __p);
6806 }
6807 
6808 __extension__ extern __inline int32x4_t
6809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_n_s32(int32x4_t __a,const int __imm,mve_pred16_t __p)6810 __arm_vbicq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
6811 {
6812   return __builtin_mve_vbicq_m_n_sv4si (__a, __imm, __p);
6813 }
6814 
6815 __extension__ extern __inline uint16x8_t
6816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_n_u16(uint16x8_t __a,const int __imm,mve_pred16_t __p)6817 __arm_vbicq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
6818 {
6819   return __builtin_mve_vbicq_m_n_uv8hi (__a, __imm, __p);
6820 }
6821 
6822 __extension__ extern __inline uint32x4_t
6823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_n_u32(uint32x4_t __a,const int __imm,mve_pred16_t __p)6824 __arm_vbicq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
6825 {
6826   return __builtin_mve_vbicq_m_n_uv4si (__a, __imm, __p);
6827 }
6828 
6829 __extension__ extern __inline int8x16_t
6830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_n_s16(int8x16_t __a,int16x8_t __b,const int __imm)6831 __arm_vqrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
6832 {
6833   return __builtin_mve_vqrshrnbq_n_sv8hi (__a, __b, __imm);
6834 }
6835 
6836 __extension__ extern __inline uint8x16_t
6837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm)6838 __arm_vqrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
6839 {
6840   return __builtin_mve_vqrshrnbq_n_uv8hi (__a, __b, __imm);
6841 }
6842 
6843 __extension__ extern __inline int16x8_t
6844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_n_s32(int16x8_t __a,int32x4_t __b,const int __imm)6845 __arm_vqrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
6846 {
6847   return __builtin_mve_vqrshrnbq_n_sv4si (__a, __b, __imm);
6848 }
6849 
6850 __extension__ extern __inline uint16x8_t
6851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm)6852 __arm_vqrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
6853 {
6854   return __builtin_mve_vqrshrnbq_n_uv4si (__a, __b, __imm);
6855 }
6856 
6857 __extension__ extern __inline uint8x16_t
6858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrunbq_n_s16(uint8x16_t __a,int16x8_t __b,const int __imm)6859 __arm_vqrshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
6860 {
6861   return __builtin_mve_vqrshrunbq_n_sv8hi (__a, __b, __imm);
6862 }
6863 
6864 __extension__ extern __inline uint16x8_t
6865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrunbq_n_s32(uint16x8_t __a,int32x4_t __b,const int __imm)6866 __arm_vqrshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
6867 {
6868   return __builtin_mve_vqrshrunbq_n_sv4si (__a, __b, __imm);
6869 }
6870 
6871 __extension__ extern __inline int64_t
6872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaq_s32(int64_t __a,int32x4_t __b,int32x4_t __c)6873 __arm_vrmlaldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
6874 {
6875   return __builtin_mve_vrmlaldavhaq_sv4si (__a, __b, __c);
6876 }
6877 
6878 __extension__ extern __inline uint64_t
6879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaq_u32(uint64_t __a,uint32x4_t __b,uint32x4_t __c)6880 __arm_vrmlaldavhaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
6881 {
6882   return __builtin_mve_vrmlaldavhaq_uv4si (__a, __b, __c);
6883 }
6884 
6885 __extension__ extern __inline int8x16_t
6886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_s8(int8x16_t __a,uint32_t * __b,const int __imm)6887 __arm_vshlcq_s8 (int8x16_t __a, uint32_t * __b, const int __imm)
6888 {
6889   int8x16_t __res = __builtin_mve_vshlcq_vec_sv16qi (__a, *__b, __imm);
6890   *__b = __builtin_mve_vshlcq_carry_sv16qi (__a, *__b, __imm);
6891   return __res;
6892 }
6893 
6894 __extension__ extern __inline uint8x16_t
6895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_u8(uint8x16_t __a,uint32_t * __b,const int __imm)6896 __arm_vshlcq_u8 (uint8x16_t __a, uint32_t * __b, const int __imm)
6897 {
6898   uint8x16_t __res = __builtin_mve_vshlcq_vec_uv16qi (__a, *__b, __imm);
6899   *__b = __builtin_mve_vshlcq_carry_uv16qi (__a, *__b, __imm);
6900   return __res;
6901 }
6902 
6903 __extension__ extern __inline int16x8_t
6904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_s16(int16x8_t __a,uint32_t * __b,const int __imm)6905 __arm_vshlcq_s16 (int16x8_t __a, uint32_t * __b, const int __imm)
6906 {
6907   int16x8_t __res = __builtin_mve_vshlcq_vec_sv8hi (__a, *__b, __imm);
6908   *__b = __builtin_mve_vshlcq_carry_sv8hi (__a, *__b, __imm);
6909   return __res;
6910 }
6911 
6912 __extension__ extern __inline uint16x8_t
6913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_u16(uint16x8_t __a,uint32_t * __b,const int __imm)6914 __arm_vshlcq_u16 (uint16x8_t __a, uint32_t * __b, const int __imm)
6915 {
6916   uint16x8_t __res = __builtin_mve_vshlcq_vec_uv8hi (__a, *__b, __imm);
6917   *__b = __builtin_mve_vshlcq_carry_uv8hi (__a, *__b, __imm);
6918   return __res;
6919 }
6920 
6921 __extension__ extern __inline int32x4_t
6922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_s32(int32x4_t __a,uint32_t * __b,const int __imm)6923 __arm_vshlcq_s32 (int32x4_t __a, uint32_t * __b, const int __imm)
6924 {
6925   int32x4_t __res = __builtin_mve_vshlcq_vec_sv4si (__a, *__b, __imm);
6926   *__b = __builtin_mve_vshlcq_carry_sv4si (__a, *__b, __imm);
6927   return __res;
6928 }
6929 
6930 __extension__ extern __inline uint32x4_t
6931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_u32(uint32x4_t __a,uint32_t * __b,const int __imm)6932 __arm_vshlcq_u32 (uint32x4_t __a, uint32_t * __b, const int __imm)
6933 {
6934   uint32x4_t __res = __builtin_mve_vshlcq_vec_uv4si (__a, *__b, __imm);
6935   *__b = __builtin_mve_vshlcq_carry_uv4si (__a, *__b, __imm);
6936   return __res;
6937 }
6938 
6939 __extension__ extern __inline uint8x16_t
6940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)6941 __arm_vpselq_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
6942 {
6943   return __builtin_mve_vpselq_uv16qi (__a, __b, __p);
6944 }
6945 
6946 __extension__ extern __inline int8x16_t
6947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)6948 __arm_vpselq_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
6949 {
6950   return __builtin_mve_vpselq_sv16qi (__a, __b, __p);
6951 }
6952 
6953 __extension__ extern __inline uint8x16_t
6954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_u8(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)6955 __arm_vrev64q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
6956 {
6957   return __builtin_mve_vrev64q_m_uv16qi (__inactive, __a, __p);
6958 }
6959 
6960 __extension__ extern __inline uint8x16_t
6961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_u8(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)6962 __arm_vmvnq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
6963 {
6964   return __builtin_mve_vmvnq_m_uv16qi (__inactive, __a, __p);
6965 }
6966 
6967 __extension__ extern __inline uint8x16_t
6968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_n_u8(uint8x16_t __a,uint8x16_t __b,uint8_t __c)6969 __arm_vmlasq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
6970 {
6971   return __builtin_mve_vmlasq_n_uv16qi (__a, __b, __c);
6972 }
6973 
6974 __extension__ extern __inline uint8x16_t
6975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_n_u8(uint8x16_t __a,uint8x16_t __b,uint8_t __c)6976 __arm_vmlaq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
6977 {
6978   return __builtin_mve_vmlaq_n_uv16qi (__a, __b, __c);
6979 }
6980 
6981 __extension__ extern __inline uint32_t
6982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)6983 __arm_vmladavq_p_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
6984 {
6985   return __builtin_mve_vmladavq_p_uv16qi (__a, __b, __p);
6986 }
6987 
6988 __extension__ extern __inline uint32_t
6989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_u8(uint32_t __a,uint8x16_t __b,uint8x16_t __c)6990 __arm_vmladavaq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
6991 {
6992   return __builtin_mve_vmladavaq_uv16qi (__a, __b, __c);
6993 }
6994 
6995 __extension__ extern __inline uint8_t
6996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p_u8(uint8_t __a,uint8x16_t __b,mve_pred16_t __p)6997 __arm_vminvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
6998 {
6999   return __builtin_mve_vminvq_p_uv16qi (__a, __b, __p);
7000 }
7001 
7002 __extension__ extern __inline uint8_t
7003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p_u8(uint8_t __a,uint8x16_t __b,mve_pred16_t __p)7004 __arm_vmaxvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
7005 {
7006   return __builtin_mve_vmaxvq_p_uv16qi (__a, __b, __p);
7007 }
7008 
7009 __extension__ extern __inline uint8x16_t
7010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m_n_u8(uint8x16_t __inactive,uint8_t __a,mve_pred16_t __p)7011 __arm_vdupq_m_n_u8 (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p)
7012 {
7013   return __builtin_mve_vdupq_m_n_uv16qi (__inactive, __a, __p);
7014 }
7015 
7016 __extension__ extern __inline mve_pred16_t
7017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)7018 __arm_vcmpneq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
7019 {
7020   return __builtin_mve_vcmpneq_m_uv16qi (__a, __b, __p);
7021 }
7022 
7023 __extension__ extern __inline mve_pred16_t
7024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_n_u8(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)7025 __arm_vcmpneq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
7026 {
7027   return __builtin_mve_vcmpneq_m_n_uv16qi (__a, __b, __p);
7028 }
7029 
7030 __extension__ extern __inline mve_pred16_t
7031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)7032 __arm_vcmphiq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
7033 {
7034   return __builtin_mve_vcmphiq_m_uv16qi (__a, __b, __p);
7035 }
7036 
7037 __extension__ extern __inline mve_pred16_t
7038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m_n_u8(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)7039 __arm_vcmphiq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
7040 {
7041   return __builtin_mve_vcmphiq_m_n_uv16qi (__a, __b, __p);
7042 }
7043 
7044 __extension__ extern __inline mve_pred16_t
7045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)7046 __arm_vcmpeqq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
7047 {
7048   return __builtin_mve_vcmpeqq_m_uv16qi (__a, __b, __p);
7049 }
7050 
7051 __extension__ extern __inline mve_pred16_t
7052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_n_u8(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)7053 __arm_vcmpeqq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
7054 {
7055   return __builtin_mve_vcmpeqq_m_n_uv16qi (__a, __b, __p);
7056 }
7057 
7058 __extension__ extern __inline mve_pred16_t
7059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)7060 __arm_vcmpcsq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
7061 {
7062   return __builtin_mve_vcmpcsq_m_uv16qi (__a, __b, __p);
7063 }
7064 
7065 __extension__ extern __inline mve_pred16_t
7066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m_n_u8(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)7067 __arm_vcmpcsq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
7068 {
7069   return __builtin_mve_vcmpcsq_m_n_uv16qi (__a, __b, __p);
7070 }
7071 
7072 __extension__ extern __inline uint8x16_t
7073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m_u8(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)7074 __arm_vclzq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
7075 {
7076   return __builtin_mve_vclzq_m_uv16qi (__inactive, __a, __p);
7077 }
7078 
7079 __extension__ extern __inline uint32_t
7080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p_u8(uint32_t __a,uint8x16_t __b,mve_pred16_t __p)7081 __arm_vaddvaq_p_u8 (uint32_t __a, uint8x16_t __b, mve_pred16_t __p)
7082 {
7083   return __builtin_mve_vaddvaq_p_uv16qi (__a, __b, __p);
7084 }
7085 
7086 __extension__ extern __inline uint8x16_t
7087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_n_u8(uint8x16_t __a,uint8x16_t __b,const int __imm)7088 __arm_vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm)
7089 {
7090   return __builtin_mve_vsriq_n_uv16qi (__a, __b, __imm);
7091 }
7092 
7093 __extension__ extern __inline uint8x16_t
7094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_n_u8(uint8x16_t __a,uint8x16_t __b,const int __imm)7095 __arm_vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm)
7096 {
7097   return __builtin_mve_vsliq_n_uv16qi (__a, __b, __imm);
7098 }
7099 
7100 __extension__ extern __inline uint8x16_t
7101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r_u8(uint8x16_t __a,int32_t __b,mve_pred16_t __p)7102 __arm_vshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
7103 {
7104   return __builtin_mve_vshlq_m_r_uv16qi (__a, __b, __p);
7105 }
7106 
7107 __extension__ extern __inline uint8x16_t
7108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n_u8(uint8x16_t __a,int32_t __b,mve_pred16_t __p)7109 __arm_vrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
7110 {
7111   return __builtin_mve_vrshlq_m_n_uv16qi (__a, __b, __p);
7112 }
7113 
7114 __extension__ extern __inline uint8x16_t
7115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r_u8(uint8x16_t __a,int32_t __b,mve_pred16_t __p)7116 __arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
7117 {
7118   return __builtin_mve_vqshlq_m_r_uv16qi (__a, __b, __p);
7119 }
7120 
7121 __extension__ extern __inline uint8x16_t
7122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n_u8(uint8x16_t __a,int32_t __b,mve_pred16_t __p)7123 __arm_vqrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
7124 {
7125   return __builtin_mve_vqrshlq_m_n_uv16qi (__a, __b, __p);
7126 }
7127 
7128 __extension__ extern __inline uint8_t
7129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p_s8(uint8_t __a,int8x16_t __b,mve_pred16_t __p)7130 __arm_vminavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
7131 {
7132   return __builtin_mve_vminavq_p_sv16qi (__a, __b, __p);
7133 }
7134 
7135 __extension__ extern __inline uint8x16_t
7136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq_m_s8(uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)7137 __arm_vminaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7138 {
7139   return __builtin_mve_vminaq_m_sv16qi (__a, __b, __p);
7140 }
7141 
7142 __extension__ extern __inline uint8_t
7143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq_p_s8(uint8_t __a,int8x16_t __b,mve_pred16_t __p)7144 __arm_vmaxavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
7145 {
7146   return __builtin_mve_vmaxavq_p_sv16qi (__a, __b, __p);
7147 }
7148 
7149 __extension__ extern __inline uint8x16_t
7150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq_m_s8(uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)7151 __arm_vmaxaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7152 {
7153   return __builtin_mve_vmaxaq_m_sv16qi (__a, __b, __p);
7154 }
7155 
7156 __extension__ extern __inline mve_pred16_t
7157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7158 __arm_vcmpneq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7159 {
7160   return __builtin_mve_vcmpneq_m_sv16qi (__a, __b, __p);
7161 }
7162 
7163 __extension__ extern __inline mve_pred16_t
7164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)7165 __arm_vcmpneq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7166 {
7167   return __builtin_mve_vcmpneq_m_n_sv16qi (__a, __b, __p);
7168 }
7169 
7170 __extension__ extern __inline mve_pred16_t
7171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7172 __arm_vcmpltq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7173 {
7174   return __builtin_mve_vcmpltq_m_sv16qi (__a, __b, __p);
7175 }
7176 
7177 __extension__ extern __inline mve_pred16_t
7178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)7179 __arm_vcmpltq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7180 {
7181   return __builtin_mve_vcmpltq_m_n_sv16qi (__a, __b, __p);
7182 }
7183 
7184 __extension__ extern __inline mve_pred16_t
7185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7186 __arm_vcmpleq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7187 {
7188   return __builtin_mve_vcmpleq_m_sv16qi (__a, __b, __p);
7189 }
7190 
7191 __extension__ extern __inline mve_pred16_t
7192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)7193 __arm_vcmpleq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7194 {
7195   return __builtin_mve_vcmpleq_m_n_sv16qi (__a, __b, __p);
7196 }
7197 
7198 __extension__ extern __inline mve_pred16_t
7199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7200 __arm_vcmpgtq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7201 {
7202   return __builtin_mve_vcmpgtq_m_sv16qi (__a, __b, __p);
7203 }
7204 
7205 __extension__ extern __inline mve_pred16_t
7206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)7207 __arm_vcmpgtq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7208 {
7209   return __builtin_mve_vcmpgtq_m_n_sv16qi (__a, __b, __p);
7210 }
7211 
7212 __extension__ extern __inline mve_pred16_t
7213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7214 __arm_vcmpgeq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7215 {
7216   return __builtin_mve_vcmpgeq_m_sv16qi (__a, __b, __p);
7217 }
7218 
7219 __extension__ extern __inline mve_pred16_t
7220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)7221 __arm_vcmpgeq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7222 {
7223   return __builtin_mve_vcmpgeq_m_n_sv16qi (__a, __b, __p);
7224 }
7225 
7226 __extension__ extern __inline mve_pred16_t
7227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7228 __arm_vcmpeqq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7229 {
7230   return __builtin_mve_vcmpeqq_m_sv16qi (__a, __b, __p);
7231 }
7232 
7233 __extension__ extern __inline mve_pred16_t
7234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)7235 __arm_vcmpeqq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7236 {
7237   return __builtin_mve_vcmpeqq_m_n_sv16qi (__a, __b, __p);
7238 }
7239 
7240 __extension__ extern __inline int8x16_t
7241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r_s8(int8x16_t __a,int32_t __b,mve_pred16_t __p)7242 __arm_vshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
7243 {
7244   return __builtin_mve_vshlq_m_r_sv16qi (__a, __b, __p);
7245 }
7246 
7247 __extension__ extern __inline int8x16_t
7248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n_s8(int8x16_t __a,int32_t __b,mve_pred16_t __p)7249 __arm_vrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
7250 {
7251   return __builtin_mve_vrshlq_m_n_sv16qi (__a, __b, __p);
7252 }
7253 
7254 __extension__ extern __inline int8x16_t
7255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)7256 __arm_vrev64q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7257 {
7258   return __builtin_mve_vrev64q_m_sv16qi (__inactive, __a, __p);
7259 }
7260 
7261 __extension__ extern __inline int8x16_t
7262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r_s8(int8x16_t __a,int32_t __b,mve_pred16_t __p)7263 __arm_vqshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
7264 {
7265   return __builtin_mve_vqshlq_m_r_sv16qi (__a, __b, __p);
7266 }
7267 
7268 __extension__ extern __inline int8x16_t
7269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n_s8(int8x16_t __a,int32_t __b,mve_pred16_t __p)7270 __arm_vqrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
7271 {
7272   return __builtin_mve_vqrshlq_m_n_sv16qi (__a, __b, __p);
7273 }
7274 
7275 __extension__ extern __inline int8x16_t
7276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)7277 __arm_vqnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7278 {
7279   return __builtin_mve_vqnegq_m_sv16qi (__inactive, __a, __p);
7280 }
7281 
7282 __extension__ extern __inline int8x16_t
7283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)7284 __arm_vqabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7285 {
7286   return __builtin_mve_vqabsq_m_sv16qi (__inactive, __a, __p);
7287 }
7288 
7289 __extension__ extern __inline int8x16_t
7290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)7291 __arm_vnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7292 {
7293   return __builtin_mve_vnegq_m_sv16qi (__inactive, __a, __p);
7294 }
7295 
7296 
7297 __extension__ extern __inline int8x16_t
7298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)7299 __arm_vmvnq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7300 {
7301   return __builtin_mve_vmvnq_m_sv16qi (__inactive, __a, __p);
7302 }
7303 
7304 __extension__ extern __inline int32_t
7305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq_p_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7306 __arm_vmlsdavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7307 {
7308   return __builtin_mve_vmlsdavxq_p_sv16qi (__a, __b, __p);
7309 }
7310 
7311 __extension__ extern __inline int32_t
7312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq_p_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7313 __arm_vmlsdavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7314 {
7315   return __builtin_mve_vmlsdavq_p_sv16qi (__a, __b, __p);
7316 }
7317 
7318 __extension__ extern __inline int32_t
7319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq_p_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7320 __arm_vmladavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7321 {
7322   return __builtin_mve_vmladavxq_p_sv16qi (__a, __b, __p);
7323 }
7324 
7325 __extension__ extern __inline int32_t
7326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)7327 __arm_vmladavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7328 {
7329   return __builtin_mve_vmladavq_p_sv16qi (__a, __b, __p);
7330 }
7331 
7332 __extension__ extern __inline int8_t
7333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p_s8(int8_t __a,int8x16_t __b,mve_pred16_t __p)7334 __arm_vminvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p)
7335 {
7336   return __builtin_mve_vminvq_p_sv16qi (__a, __b, __p);
7337 }
7338 
7339 __extension__ extern __inline int8_t
7340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p_s8(int8_t __a,int8x16_t __b,mve_pred16_t __p)7341 __arm_vmaxvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p)
7342 {
7343   return __builtin_mve_vmaxvq_p_sv16qi (__a, __b, __p);
7344 }
7345 
7346 __extension__ extern __inline int8x16_t
7347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m_n_s8(int8x16_t __inactive,int8_t __a,mve_pred16_t __p)7348 __arm_vdupq_m_n_s8 (int8x16_t __inactive, int8_t __a, mve_pred16_t __p)
7349 {
7350   return __builtin_mve_vdupq_m_n_sv16qi (__inactive, __a, __p);
7351 }
7352 
7353 __extension__ extern __inline int8x16_t
7354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)7355 __arm_vclzq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7356 {
7357   return __builtin_mve_vclzq_m_sv16qi (__inactive, __a, __p);
7358 }
7359 
7360 __extension__ extern __inline int8x16_t
7361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)7362 __arm_vclsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7363 {
7364   return __builtin_mve_vclsq_m_sv16qi (__inactive, __a, __p);
7365 }
7366 
7367 __extension__ extern __inline int32_t
7368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p_s8(int32_t __a,int8x16_t __b,mve_pred16_t __p)7369 __arm_vaddvaq_p_s8 (int32_t __a, int8x16_t __b, mve_pred16_t __p)
7370 {
7371   return __builtin_mve_vaddvaq_p_sv16qi (__a, __b, __p);
7372 }
7373 
7374 __extension__ extern __inline int8x16_t
7375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)7376 __arm_vabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7377 {
7378   return __builtin_mve_vabsq_m_sv16qi (__inactive, __a, __p);
7379 }
7380 
7381 __extension__ extern __inline int8x16_t
7382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)7383 __arm_vqrdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7384 {
7385   return __builtin_mve_vqrdmlsdhxq_sv16qi (__inactive, __a, __b);
7386 }
7387 
7388 __extension__ extern __inline int8x16_t
7389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)7390 __arm_vqrdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7391 {
7392   return __builtin_mve_vqrdmlsdhq_sv16qi (__inactive, __a, __b);
7393 }
7394 
7395 __extension__ extern __inline int8x16_t
7396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c)7397 __arm_vqrdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7398 {
7399   return __builtin_mve_vqrdmlashq_n_sv16qi (__a, __b, __c);
7400 }
7401 
7402 __extension__ extern __inline int8x16_t
7403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c)7404 __arm_vqdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7405 {
7406   return __builtin_mve_vqdmlashq_n_sv16qi (__a, __b, __c);
7407 }
7408 
7409 __extension__ extern __inline int8x16_t
7410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c)7411 __arm_vqrdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7412 {
7413   return __builtin_mve_vqrdmlahq_n_sv16qi (__a, __b, __c);
7414 }
7415 
7416 __extension__ extern __inline int8x16_t
7417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)7418 __arm_vqrdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7419 {
7420   return __builtin_mve_vqrdmladhxq_sv16qi (__inactive, __a, __b);
7421 }
7422 
7423 __extension__ extern __inline int8x16_t
7424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)7425 __arm_vqrdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7426 {
7427   return __builtin_mve_vqrdmladhq_sv16qi (__inactive, __a, __b);
7428 }
7429 
7430 __extension__ extern __inline int8x16_t
7431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)7432 __arm_vqdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7433 {
7434   return __builtin_mve_vqdmlsdhxq_sv16qi (__inactive, __a, __b);
7435 }
7436 
7437 __extension__ extern __inline int8x16_t
7438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)7439 __arm_vqdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7440 {
7441   return __builtin_mve_vqdmlsdhq_sv16qi (__inactive, __a, __b);
7442 }
7443 
7444 __extension__ extern __inline int8x16_t
7445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c)7446 __arm_vqdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7447 {
7448   return __builtin_mve_vqdmlahq_n_sv16qi (__a, __b, __c);
7449 }
7450 
7451 __extension__ extern __inline int8x16_t
7452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)7453 __arm_vqdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7454 {
7455   return __builtin_mve_vqdmladhxq_sv16qi (__inactive, __a, __b);
7456 }
7457 
7458 __extension__ extern __inline int8x16_t
7459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)7460 __arm_vqdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7461 {
7462   return __builtin_mve_vqdmladhq_sv16qi (__inactive, __a, __b);
7463 }
7464 
7465 __extension__ extern __inline int32_t
7466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq_s8(int32_t __a,int8x16_t __b,int8x16_t __c)7467 __arm_vmlsdavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
7468 {
7469   return __builtin_mve_vmlsdavaxq_sv16qi (__a, __b, __c);
7470 }
7471 
7472 __extension__ extern __inline int32_t
7473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq_s8(int32_t __a,int8x16_t __b,int8x16_t __c)7474 __arm_vmlsdavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
7475 {
7476   return __builtin_mve_vmlsdavaq_sv16qi (__a, __b, __c);
7477 }
7478 
7479 __extension__ extern __inline int8x16_t
7480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c)7481 __arm_vmlasq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7482 {
7483   return __builtin_mve_vmlasq_n_sv16qi (__a, __b, __c);
7484 }
7485 
7486 __extension__ extern __inline int8x16_t
7487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c)7488 __arm_vmlaq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7489 {
7490   return __builtin_mve_vmlaq_n_sv16qi (__a, __b, __c);
7491 }
7492 
7493 __extension__ extern __inline int32_t
7494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq_s8(int32_t __a,int8x16_t __b,int8x16_t __c)7495 __arm_vmladavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
7496 {
7497   return __builtin_mve_vmladavaxq_sv16qi (__a, __b, __c);
7498 }
7499 
7500 __extension__ extern __inline int32_t
7501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_s8(int32_t __a,int8x16_t __b,int8x16_t __c)7502 __arm_vmladavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
7503 {
7504   return __builtin_mve_vmladavaq_sv16qi (__a, __b, __c);
7505 }
7506 
7507 __extension__ extern __inline int8x16_t
7508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_n_s8(int8x16_t __a,int8x16_t __b,const int __imm)7509 __arm_vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm)
7510 {
7511   return __builtin_mve_vsriq_n_sv16qi (__a, __b, __imm);
7512 }
7513 
7514 __extension__ extern __inline int8x16_t
7515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_n_s8(int8x16_t __a,int8x16_t __b,const int __imm)7516 __arm_vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm)
7517 {
7518   return __builtin_mve_vsliq_n_sv16qi (__a, __b, __imm);
7519 }
7520 
7521 __extension__ extern __inline uint16x8_t
7522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)7523 __arm_vpselq_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7524 {
7525   return __builtin_mve_vpselq_uv8hi (__a, __b, __p);
7526 }
7527 
7528 __extension__ extern __inline int16x8_t
7529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7530 __arm_vpselq_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7531 {
7532   return __builtin_mve_vpselq_sv8hi (__a, __b, __p);
7533 }
7534 
7535 __extension__ extern __inline uint16x8_t
7536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_u16(uint16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)7537 __arm_vrev64q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
7538 {
7539   return __builtin_mve_vrev64q_m_uv8hi (__inactive, __a, __p);
7540 }
7541 
7542 __extension__ extern __inline uint16x8_t
7543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_u16(uint16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)7544 __arm_vmvnq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
7545 {
7546   return __builtin_mve_vmvnq_m_uv8hi (__inactive, __a, __p);
7547 }
7548 
7549 __extension__ extern __inline uint16x8_t
7550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_n_u16(uint16x8_t __a,uint16x8_t __b,uint16_t __c)7551 __arm_vmlasq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
7552 {
7553   return __builtin_mve_vmlasq_n_uv8hi (__a, __b, __c);
7554 }
7555 
7556 __extension__ extern __inline uint16x8_t
7557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_n_u16(uint16x8_t __a,uint16x8_t __b,uint16_t __c)7558 __arm_vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
7559 {
7560   return __builtin_mve_vmlaq_n_uv8hi (__a, __b, __c);
7561 }
7562 
7563 __extension__ extern __inline uint32_t
7564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)7565 __arm_vmladavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7566 {
7567   return __builtin_mve_vmladavq_p_uv8hi (__a, __b, __p);
7568 }
7569 
7570 __extension__ extern __inline uint32_t
7571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_u16(uint32_t __a,uint16x8_t __b,uint16x8_t __c)7572 __arm_vmladavaq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
7573 {
7574   return __builtin_mve_vmladavaq_uv8hi (__a, __b, __c);
7575 }
7576 
7577 __extension__ extern __inline uint16_t
7578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p_u16(uint16_t __a,uint16x8_t __b,mve_pred16_t __p)7579 __arm_vminvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
7580 {
7581   return __builtin_mve_vminvq_p_uv8hi (__a, __b, __p);
7582 }
7583 
7584 __extension__ extern __inline uint16_t
7585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p_u16(uint16_t __a,uint16x8_t __b,mve_pred16_t __p)7586 __arm_vmaxvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
7587 {
7588   return __builtin_mve_vmaxvq_p_uv8hi (__a, __b, __p);
7589 }
7590 
7591 __extension__ extern __inline uint16x8_t
7592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m_n_u16(uint16x8_t __inactive,uint16_t __a,mve_pred16_t __p)7593 __arm_vdupq_m_n_u16 (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p)
7594 {
7595   return __builtin_mve_vdupq_m_n_uv8hi (__inactive, __a, __p);
7596 }
7597 
7598 __extension__ extern __inline mve_pred16_t
7599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)7600 __arm_vcmpneq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7601 {
7602   return __builtin_mve_vcmpneq_m_uv8hi (__a, __b, __p);
7603 }
7604 
7605 __extension__ extern __inline mve_pred16_t
7606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_n_u16(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)7607 __arm_vcmpneq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
7608 {
7609   return __builtin_mve_vcmpneq_m_n_uv8hi (__a, __b, __p);
7610 }
7611 
7612 __extension__ extern __inline mve_pred16_t
7613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)7614 __arm_vcmphiq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7615 {
7616   return __builtin_mve_vcmphiq_m_uv8hi (__a, __b, __p);
7617 }
7618 
7619 __extension__ extern __inline mve_pred16_t
7620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m_n_u16(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)7621 __arm_vcmphiq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
7622 {
7623   return __builtin_mve_vcmphiq_m_n_uv8hi (__a, __b, __p);
7624 }
7625 
7626 __extension__ extern __inline mve_pred16_t
7627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)7628 __arm_vcmpeqq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7629 {
7630   return __builtin_mve_vcmpeqq_m_uv8hi (__a, __b, __p);
7631 }
7632 
7633 __extension__ extern __inline mve_pred16_t
7634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_n_u16(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)7635 __arm_vcmpeqq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
7636 {
7637   return __builtin_mve_vcmpeqq_m_n_uv8hi (__a, __b, __p);
7638 }
7639 
7640 __extension__ extern __inline mve_pred16_t
7641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)7642 __arm_vcmpcsq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7643 {
7644   return __builtin_mve_vcmpcsq_m_uv8hi (__a, __b, __p);
7645 }
7646 
7647 __extension__ extern __inline mve_pred16_t
7648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m_n_u16(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)7649 __arm_vcmpcsq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
7650 {
7651   return __builtin_mve_vcmpcsq_m_n_uv8hi (__a, __b, __p);
7652 }
7653 
7654 __extension__ extern __inline uint16x8_t
7655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m_u16(uint16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)7656 __arm_vclzq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
7657 {
7658   return __builtin_mve_vclzq_m_uv8hi (__inactive, __a, __p);
7659 }
7660 
7661 __extension__ extern __inline uint32_t
7662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p_u16(uint32_t __a,uint16x8_t __b,mve_pred16_t __p)7663 __arm_vaddvaq_p_u16 (uint32_t __a, uint16x8_t __b, mve_pred16_t __p)
7664 {
7665   return __builtin_mve_vaddvaq_p_uv8hi (__a, __b, __p);
7666 }
7667 
7668 __extension__ extern __inline uint16x8_t
7669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_n_u16(uint16x8_t __a,uint16x8_t __b,const int __imm)7670 __arm_vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm)
7671 {
7672   return __builtin_mve_vsriq_n_uv8hi (__a, __b, __imm);
7673 }
7674 
7675 __extension__ extern __inline uint16x8_t
7676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_n_u16(uint16x8_t __a,uint16x8_t __b,const int __imm)7677 __arm_vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm)
7678 {
7679   return __builtin_mve_vsliq_n_uv8hi (__a, __b, __imm);
7680 }
7681 
7682 __extension__ extern __inline uint16x8_t
7683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r_u16(uint16x8_t __a,int32_t __b,mve_pred16_t __p)7684 __arm_vshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
7685 {
7686   return __builtin_mve_vshlq_m_r_uv8hi (__a, __b, __p);
7687 }
7688 
7689 __extension__ extern __inline uint16x8_t
7690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n_u16(uint16x8_t __a,int32_t __b,mve_pred16_t __p)7691 __arm_vrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
7692 {
7693   return __builtin_mve_vrshlq_m_n_uv8hi (__a, __b, __p);
7694 }
7695 
7696 __extension__ extern __inline uint16x8_t
7697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r_u16(uint16x8_t __a,int32_t __b,mve_pred16_t __p)7698 __arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
7699 {
7700   return __builtin_mve_vqshlq_m_r_uv8hi (__a, __b, __p);
7701 }
7702 
7703 __extension__ extern __inline uint16x8_t
7704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n_u16(uint16x8_t __a,int32_t __b,mve_pred16_t __p)7705 __arm_vqrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
7706 {
7707   return __builtin_mve_vqrshlq_m_n_uv8hi (__a, __b, __p);
7708 }
7709 
7710 __extension__ extern __inline uint16_t
7711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p_s16(uint16_t __a,int16x8_t __b,mve_pred16_t __p)7712 __arm_vminavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
7713 {
7714   return __builtin_mve_vminavq_p_sv8hi (__a, __b, __p);
7715 }
7716 
7717 __extension__ extern __inline uint16x8_t
7718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq_m_s16(uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)7719 __arm_vminaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7720 {
7721   return __builtin_mve_vminaq_m_sv8hi (__a, __b, __p);
7722 }
7723 
7724 __extension__ extern __inline uint16_t
7725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq_p_s16(uint16_t __a,int16x8_t __b,mve_pred16_t __p)7726 __arm_vmaxavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
7727 {
7728   return __builtin_mve_vmaxavq_p_sv8hi (__a, __b, __p);
7729 }
7730 
7731 __extension__ extern __inline uint16x8_t
7732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq_m_s16(uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)7733 __arm_vmaxaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7734 {
7735   return __builtin_mve_vmaxaq_m_sv8hi (__a, __b, __p);
7736 }
7737 
7738 __extension__ extern __inline mve_pred16_t
7739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7740 __arm_vcmpneq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7741 {
7742   return __builtin_mve_vcmpneq_m_sv8hi (__a, __b, __p);
7743 }
7744 
7745 __extension__ extern __inline mve_pred16_t
7746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)7747 __arm_vcmpneq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7748 {
7749   return __builtin_mve_vcmpneq_m_n_sv8hi (__a, __b, __p);
7750 }
7751 
7752 __extension__ extern __inline mve_pred16_t
7753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7754 __arm_vcmpltq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7755 {
7756   return __builtin_mve_vcmpltq_m_sv8hi (__a, __b, __p);
7757 }
7758 
7759 __extension__ extern __inline mve_pred16_t
7760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)7761 __arm_vcmpltq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7762 {
7763   return __builtin_mve_vcmpltq_m_n_sv8hi (__a, __b, __p);
7764 }
7765 
7766 __extension__ extern __inline mve_pred16_t
7767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7768 __arm_vcmpleq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7769 {
7770   return __builtin_mve_vcmpleq_m_sv8hi (__a, __b, __p);
7771 }
7772 
7773 __extension__ extern __inline mve_pred16_t
7774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)7775 __arm_vcmpleq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7776 {
7777   return __builtin_mve_vcmpleq_m_n_sv8hi (__a, __b, __p);
7778 }
7779 
7780 __extension__ extern __inline mve_pred16_t
7781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7782 __arm_vcmpgtq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7783 {
7784   return __builtin_mve_vcmpgtq_m_sv8hi (__a, __b, __p);
7785 }
7786 
7787 __extension__ extern __inline mve_pred16_t
7788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)7789 __arm_vcmpgtq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7790 {
7791   return __builtin_mve_vcmpgtq_m_n_sv8hi (__a, __b, __p);
7792 }
7793 
7794 __extension__ extern __inline mve_pred16_t
7795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7796 __arm_vcmpgeq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7797 {
7798   return __builtin_mve_vcmpgeq_m_sv8hi (__a, __b, __p);
7799 }
7800 
7801 __extension__ extern __inline mve_pred16_t
7802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)7803 __arm_vcmpgeq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7804 {
7805   return __builtin_mve_vcmpgeq_m_n_sv8hi (__a, __b, __p);
7806 }
7807 
7808 __extension__ extern __inline mve_pred16_t
7809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7810 __arm_vcmpeqq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7811 {
7812   return __builtin_mve_vcmpeqq_m_sv8hi (__a, __b, __p);
7813 }
7814 
7815 __extension__ extern __inline mve_pred16_t
7816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)7817 __arm_vcmpeqq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7818 {
7819   return __builtin_mve_vcmpeqq_m_n_sv8hi (__a, __b, __p);
7820 }
7821 
7822 __extension__ extern __inline int16x8_t
7823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r_s16(int16x8_t __a,int32_t __b,mve_pred16_t __p)7824 __arm_vshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
7825 {
7826   return __builtin_mve_vshlq_m_r_sv8hi (__a, __b, __p);
7827 }
7828 
7829 __extension__ extern __inline int16x8_t
7830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n_s16(int16x8_t __a,int32_t __b,mve_pred16_t __p)7831 __arm_vrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
7832 {
7833   return __builtin_mve_vrshlq_m_n_sv8hi (__a, __b, __p);
7834 }
7835 
7836 __extension__ extern __inline int16x8_t
7837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_s16(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)7838 __arm_vrev64q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7839 {
7840   return __builtin_mve_vrev64q_m_sv8hi (__inactive, __a, __p);
7841 }
7842 
7843 __extension__ extern __inline int16x8_t
7844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r_s16(int16x8_t __a,int32_t __b,mve_pred16_t __p)7845 __arm_vqshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
7846 {
7847   return __builtin_mve_vqshlq_m_r_sv8hi (__a, __b, __p);
7848 }
7849 
7850 __extension__ extern __inline int16x8_t
7851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n_s16(int16x8_t __a,int32_t __b,mve_pred16_t __p)7852 __arm_vqrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
7853 {
7854   return __builtin_mve_vqrshlq_m_n_sv8hi (__a, __b, __p);
7855 }
7856 
7857 __extension__ extern __inline int16x8_t
7858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m_s16(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)7859 __arm_vqnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7860 {
7861   return __builtin_mve_vqnegq_m_sv8hi (__inactive, __a, __p);
7862 }
7863 
7864 __extension__ extern __inline int16x8_t
7865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq_m_s16(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)7866 __arm_vqabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7867 {
7868   return __builtin_mve_vqabsq_m_sv8hi (__inactive, __a, __p);
7869 }
7870 
7871 __extension__ extern __inline int16x8_t
7872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m_s16(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)7873 __arm_vnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7874 {
7875   return __builtin_mve_vnegq_m_sv8hi (__inactive, __a, __p);
7876 }
7877 
7878 __extension__ extern __inline int16x8_t
7879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_s16(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)7880 __arm_vmvnq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7881 {
7882   return __builtin_mve_vmvnq_m_sv8hi (__inactive, __a, __p);
7883 }
7884 
7885 __extension__ extern __inline int32_t
7886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq_p_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7887 __arm_vmlsdavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7888 {
7889   return __builtin_mve_vmlsdavxq_p_sv8hi (__a, __b, __p);
7890 }
7891 
7892 __extension__ extern __inline int32_t
7893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq_p_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7894 __arm_vmlsdavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7895 {
7896   return __builtin_mve_vmlsdavq_p_sv8hi (__a, __b, __p);
7897 }
7898 
7899 __extension__ extern __inline int32_t
7900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq_p_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7901 __arm_vmladavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7902 {
7903   return __builtin_mve_vmladavxq_p_sv8hi (__a, __b, __p);
7904 }
7905 
7906 __extension__ extern __inline int32_t
7907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)7908 __arm_vmladavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7909 {
7910   return __builtin_mve_vmladavq_p_sv8hi (__a, __b, __p);
7911 }
7912 
7913 __extension__ extern __inline int16_t
7914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p_s16(int16_t __a,int16x8_t __b,mve_pred16_t __p)7915 __arm_vminvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p)
7916 {
7917   return __builtin_mve_vminvq_p_sv8hi (__a, __b, __p);
7918 }
7919 
7920 __extension__ extern __inline int16_t
7921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p_s16(int16_t __a,int16x8_t __b,mve_pred16_t __p)7922 __arm_vmaxvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p)
7923 {
7924   return __builtin_mve_vmaxvq_p_sv8hi (__a, __b, __p);
7925 }
7926 
7927 __extension__ extern __inline int16x8_t
7928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m_n_s16(int16x8_t __inactive,int16_t __a,mve_pred16_t __p)7929 __arm_vdupq_m_n_s16 (int16x8_t __inactive, int16_t __a, mve_pred16_t __p)
7930 {
7931   return __builtin_mve_vdupq_m_n_sv8hi (__inactive, __a, __p);
7932 }
7933 
7934 __extension__ extern __inline int16x8_t
7935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m_s16(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)7936 __arm_vclzq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7937 {
7938   return __builtin_mve_vclzq_m_sv8hi (__inactive, __a, __p);
7939 }
7940 
7941 __extension__ extern __inline int16x8_t
7942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_m_s16(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)7943 __arm_vclsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7944 {
7945   return __builtin_mve_vclsq_m_sv8hi (__inactive, __a, __p);
7946 }
7947 
7948 __extension__ extern __inline int32_t
7949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p_s16(int32_t __a,int16x8_t __b,mve_pred16_t __p)7950 __arm_vaddvaq_p_s16 (int32_t __a, int16x8_t __b, mve_pred16_t __p)
7951 {
7952   return __builtin_mve_vaddvaq_p_sv8hi (__a, __b, __p);
7953 }
7954 
7955 __extension__ extern __inline int16x8_t
7956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m_s16(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)7957 __arm_vabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7958 {
7959   return __builtin_mve_vabsq_m_sv8hi (__inactive, __a, __p);
7960 }
7961 
7962 __extension__ extern __inline int16x8_t
7963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)7964 __arm_vqrdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
7965 {
7966   return __builtin_mve_vqrdmlsdhxq_sv8hi (__inactive, __a, __b);
7967 }
7968 
7969 __extension__ extern __inline int16x8_t
7970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)7971 __arm_vqrdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
7972 {
7973   return __builtin_mve_vqrdmlsdhq_sv8hi (__inactive, __a, __b);
7974 }
7975 
7976 __extension__ extern __inline int16x8_t
7977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c)7978 __arm_vqrdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
7979 {
7980   return __builtin_mve_vqrdmlashq_n_sv8hi (__a, __b, __c);
7981 }
7982 
7983 __extension__ extern __inline int16x8_t
7984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c)7985 __arm_vqdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
7986 {
7987   return __builtin_mve_vqdmlashq_n_sv8hi (__a, __b, __c);
7988 }
7989 
7990 __extension__ extern __inline int16x8_t
7991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c)7992 __arm_vqrdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
7993 {
7994   return __builtin_mve_vqrdmlahq_n_sv8hi (__a, __b, __c);
7995 }
7996 
7997 __extension__ extern __inline int16x8_t
7998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)7999 __arm_vqrdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8000 {
8001   return __builtin_mve_vqrdmladhxq_sv8hi (__inactive, __a, __b);
8002 }
8003 
8004 __extension__ extern __inline int16x8_t
8005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)8006 __arm_vqrdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8007 {
8008   return __builtin_mve_vqrdmladhq_sv8hi (__inactive, __a, __b);
8009 }
8010 
8011 __extension__ extern __inline int16x8_t
8012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)8013 __arm_vqdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8014 {
8015   return __builtin_mve_vqdmlsdhxq_sv8hi (__inactive, __a, __b);
8016 }
8017 
8018 __extension__ extern __inline int16x8_t
8019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)8020 __arm_vqdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8021 {
8022   return __builtin_mve_vqdmlsdhq_sv8hi (__inactive, __a, __b);
8023 }
8024 
8025 __extension__ extern __inline int16x8_t
8026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c)8027 __arm_vqdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
8028 {
8029   return __builtin_mve_vqdmlahq_n_sv8hi (__a, __b, __c);
8030 }
8031 
8032 __extension__ extern __inline int16x8_t
8033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)8034 __arm_vqdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8035 {
8036   return __builtin_mve_vqdmladhxq_sv8hi (__inactive, __a, __b);
8037 }
8038 
8039 __extension__ extern __inline int16x8_t
8040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)8041 __arm_vqdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8042 {
8043   return __builtin_mve_vqdmladhq_sv8hi (__inactive, __a, __b);
8044 }
8045 
8046 __extension__ extern __inline int32_t
8047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq_s16(int32_t __a,int16x8_t __b,int16x8_t __c)8048 __arm_vmlsdavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
8049 {
8050   return __builtin_mve_vmlsdavaxq_sv8hi (__a, __b, __c);
8051 }
8052 
8053 __extension__ extern __inline int32_t
8054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq_s16(int32_t __a,int16x8_t __b,int16x8_t __c)8055 __arm_vmlsdavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
8056 {
8057   return __builtin_mve_vmlsdavaq_sv8hi (__a, __b, __c);
8058 }
8059 
8060 __extension__ extern __inline int16x8_t
8061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c)8062 __arm_vmlasq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
8063 {
8064   return __builtin_mve_vmlasq_n_sv8hi (__a, __b, __c);
8065 }
8066 
8067 __extension__ extern __inline int16x8_t
8068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c)8069 __arm_vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
8070 {
8071   return __builtin_mve_vmlaq_n_sv8hi (__a, __b, __c);
8072 }
8073 
8074 __extension__ extern __inline int32_t
8075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq_s16(int32_t __a,int16x8_t __b,int16x8_t __c)8076 __arm_vmladavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
8077 {
8078   return __builtin_mve_vmladavaxq_sv8hi (__a, __b, __c);
8079 }
8080 
8081 __extension__ extern __inline int32_t
8082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_s16(int32_t __a,int16x8_t __b,int16x8_t __c)8083 __arm_vmladavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
8084 {
8085   return __builtin_mve_vmladavaq_sv8hi (__a, __b, __c);
8086 }
8087 
8088 __extension__ extern __inline int16x8_t
8089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_n_s16(int16x8_t __a,int16x8_t __b,const int __imm)8090 __arm_vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm)
8091 {
8092   return __builtin_mve_vsriq_n_sv8hi (__a, __b, __imm);
8093 }
8094 
8095 __extension__ extern __inline int16x8_t
8096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_n_s16(int16x8_t __a,int16x8_t __b,const int __imm)8097 __arm_vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm)
8098 {
8099   return __builtin_mve_vsliq_n_sv8hi (__a, __b, __imm);
8100 }
8101 
8102 __extension__ extern __inline uint32x4_t
8103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)8104 __arm_vpselq_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8105 {
8106   return __builtin_mve_vpselq_uv4si (__a, __b, __p);
8107 }
8108 
8109 __extension__ extern __inline int32x4_t
8110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8111 __arm_vpselq_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8112 {
8113   return __builtin_mve_vpselq_sv4si (__a, __b, __p);
8114 }
8115 
8116 __extension__ extern __inline uint32x4_t
8117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_u32(uint32x4_t __inactive,uint32x4_t __a,mve_pred16_t __p)8118 __arm_vrev64q_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
8119 {
8120   return __builtin_mve_vrev64q_m_uv4si (__inactive, __a, __p);
8121 }
8122 
8123 __extension__ extern __inline uint32x4_t
8124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_u32(uint32x4_t __inactive,uint32x4_t __a,mve_pred16_t __p)8125 __arm_vmvnq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
8126 {
8127   return __builtin_mve_vmvnq_m_uv4si (__inactive, __a, __p);
8128 }
8129 
8130 __extension__ extern __inline uint32x4_t
8131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_n_u32(uint32x4_t __a,uint32x4_t __b,uint32_t __c)8132 __arm_vmlasq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
8133 {
8134   return __builtin_mve_vmlasq_n_uv4si (__a, __b, __c);
8135 }
8136 
8137 __extension__ extern __inline uint32x4_t
8138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_n_u32(uint32x4_t __a,uint32x4_t __b,uint32_t __c)8139 __arm_vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
8140 {
8141   return __builtin_mve_vmlaq_n_uv4si (__a, __b, __c);
8142 }
8143 
8144 __extension__ extern __inline uint32_t
8145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)8146 __arm_vmladavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8147 {
8148   return __builtin_mve_vmladavq_p_uv4si (__a, __b, __p);
8149 }
8150 
8151 __extension__ extern __inline uint32_t
8152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_u32(uint32_t __a,uint32x4_t __b,uint32x4_t __c)8153 __arm_vmladavaq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
8154 {
8155   return __builtin_mve_vmladavaq_uv4si (__a, __b, __c);
8156 }
8157 
8158 __extension__ extern __inline uint32_t
8159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p_u32(uint32_t __a,uint32x4_t __b,mve_pred16_t __p)8160 __arm_vminvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
8161 {
8162   return __builtin_mve_vminvq_p_uv4si (__a, __b, __p);
8163 }
8164 
8165 __extension__ extern __inline uint32_t
8166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p_u32(uint32_t __a,uint32x4_t __b,mve_pred16_t __p)8167 __arm_vmaxvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
8168 {
8169   return __builtin_mve_vmaxvq_p_uv4si (__a, __b, __p);
8170 }
8171 
8172 __extension__ extern __inline uint32x4_t
8173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m_n_u32(uint32x4_t __inactive,uint32_t __a,mve_pred16_t __p)8174 __arm_vdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p)
8175 {
8176   return __builtin_mve_vdupq_m_n_uv4si (__inactive, __a, __p);
8177 }
8178 
8179 __extension__ extern __inline mve_pred16_t
8180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)8181 __arm_vcmpneq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8182 {
8183   return __builtin_mve_vcmpneq_m_uv4si (__a, __b, __p);
8184 }
8185 
8186 __extension__ extern __inline mve_pred16_t
8187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_n_u32(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)8188 __arm_vcmpneq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
8189 {
8190   return __builtin_mve_vcmpneq_m_n_uv4si (__a, __b, __p);
8191 }
8192 
8193 __extension__ extern __inline mve_pred16_t
8194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)8195 __arm_vcmphiq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8196 {
8197   return __builtin_mve_vcmphiq_m_uv4si (__a, __b, __p);
8198 }
8199 
8200 __extension__ extern __inline mve_pred16_t
8201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m_n_u32(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)8202 __arm_vcmphiq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
8203 {
8204   return __builtin_mve_vcmphiq_m_n_uv4si (__a, __b, __p);
8205 }
8206 
8207 __extension__ extern __inline mve_pred16_t
8208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)8209 __arm_vcmpeqq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8210 {
8211   return __builtin_mve_vcmpeqq_m_uv4si (__a, __b, __p);
8212 }
8213 
8214 __extension__ extern __inline mve_pred16_t
8215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_n_u32(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)8216 __arm_vcmpeqq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
8217 {
8218   return __builtin_mve_vcmpeqq_m_n_uv4si (__a, __b, __p);
8219 }
8220 
8221 __extension__ extern __inline mve_pred16_t
8222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)8223 __arm_vcmpcsq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8224 {
8225   return __builtin_mve_vcmpcsq_m_uv4si (__a, __b, __p);
8226 }
8227 
8228 __extension__ extern __inline mve_pred16_t
8229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m_n_u32(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)8230 __arm_vcmpcsq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
8231 {
8232   return __builtin_mve_vcmpcsq_m_n_uv4si (__a, __b, __p);
8233 }
8234 
8235 __extension__ extern __inline uint32x4_t
8236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m_u32(uint32x4_t __inactive,uint32x4_t __a,mve_pred16_t __p)8237 __arm_vclzq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
8238 {
8239   return __builtin_mve_vclzq_m_uv4si (__inactive, __a, __p);
8240 }
8241 
8242 __extension__ extern __inline uint32_t
8243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p_u32(uint32_t __a,uint32x4_t __b,mve_pred16_t __p)8244 __arm_vaddvaq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
8245 {
8246   return __builtin_mve_vaddvaq_p_uv4si (__a, __b, __p);
8247 }
8248 
8249 __extension__ extern __inline uint32x4_t
8250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_n_u32(uint32x4_t __a,uint32x4_t __b,const int __imm)8251 __arm_vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm)
8252 {
8253   return __builtin_mve_vsriq_n_uv4si (__a, __b, __imm);
8254 }
8255 
8256 __extension__ extern __inline uint32x4_t
8257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_n_u32(uint32x4_t __a,uint32x4_t __b,const int __imm)8258 __arm_vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm)
8259 {
8260   return __builtin_mve_vsliq_n_uv4si (__a, __b, __imm);
8261 }
8262 
8263 __extension__ extern __inline uint32x4_t
8264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r_u32(uint32x4_t __a,int32_t __b,mve_pred16_t __p)8265 __arm_vshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
8266 {
8267   return __builtin_mve_vshlq_m_r_uv4si (__a, __b, __p);
8268 }
8269 
8270 __extension__ extern __inline uint32x4_t
8271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n_u32(uint32x4_t __a,int32_t __b,mve_pred16_t __p)8272 __arm_vrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
8273 {
8274   return __builtin_mve_vrshlq_m_n_uv4si (__a, __b, __p);
8275 }
8276 
8277 __extension__ extern __inline uint32x4_t
8278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r_u32(uint32x4_t __a,int32_t __b,mve_pred16_t __p)8279 __arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
8280 {
8281   return __builtin_mve_vqshlq_m_r_uv4si (__a, __b, __p);
8282 }
8283 
8284 __extension__ extern __inline uint32x4_t
8285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n_u32(uint32x4_t __a,int32_t __b,mve_pred16_t __p)8286 __arm_vqrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
8287 {
8288   return __builtin_mve_vqrshlq_m_n_uv4si (__a, __b, __p);
8289 }
8290 
8291 __extension__ extern __inline uint32_t
8292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p_s32(uint32_t __a,int32x4_t __b,mve_pred16_t __p)8293 __arm_vminavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
8294 {
8295   return __builtin_mve_vminavq_p_sv4si (__a, __b, __p);
8296 }
8297 
8298 __extension__ extern __inline uint32x4_t
8299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq_m_s32(uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)8300 __arm_vminaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8301 {
8302   return __builtin_mve_vminaq_m_sv4si (__a, __b, __p);
8303 }
8304 
8305 __extension__ extern __inline uint32_t
8306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq_p_s32(uint32_t __a,int32x4_t __b,mve_pred16_t __p)8307 __arm_vmaxavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
8308 {
8309   return __builtin_mve_vmaxavq_p_sv4si (__a, __b, __p);
8310 }
8311 
8312 __extension__ extern __inline uint32x4_t
8313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq_m_s32(uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)8314 __arm_vmaxaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8315 {
8316   return __builtin_mve_vmaxaq_m_sv4si (__a, __b, __p);
8317 }
8318 
8319 __extension__ extern __inline mve_pred16_t
8320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8321 __arm_vcmpneq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8322 {
8323   return __builtin_mve_vcmpneq_m_sv4si (__a, __b, __p);
8324 }
8325 
8326 __extension__ extern __inline mve_pred16_t
8327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8328 __arm_vcmpneq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8329 {
8330   return __builtin_mve_vcmpneq_m_n_sv4si (__a, __b, __p);
8331 }
8332 
8333 __extension__ extern __inline mve_pred16_t
8334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8335 __arm_vcmpltq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8336 {
8337   return __builtin_mve_vcmpltq_m_sv4si (__a, __b, __p);
8338 }
8339 
8340 __extension__ extern __inline mve_pred16_t
8341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8342 __arm_vcmpltq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8343 {
8344   return __builtin_mve_vcmpltq_m_n_sv4si (__a, __b, __p);
8345 }
8346 
8347 __extension__ extern __inline mve_pred16_t
8348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8349 __arm_vcmpleq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8350 {
8351   return __builtin_mve_vcmpleq_m_sv4si (__a, __b, __p);
8352 }
8353 
8354 __extension__ extern __inline mve_pred16_t
8355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8356 __arm_vcmpleq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8357 {
8358   return __builtin_mve_vcmpleq_m_n_sv4si (__a, __b, __p);
8359 }
8360 
8361 __extension__ extern __inline mve_pred16_t
8362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8363 __arm_vcmpgtq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8364 {
8365   return __builtin_mve_vcmpgtq_m_sv4si (__a, __b, __p);
8366 }
8367 
8368 __extension__ extern __inline mve_pred16_t
8369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8370 __arm_vcmpgtq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8371 {
8372   return __builtin_mve_vcmpgtq_m_n_sv4si (__a, __b, __p);
8373 }
8374 
8375 __extension__ extern __inline mve_pred16_t
8376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8377 __arm_vcmpgeq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8378 {
8379   return __builtin_mve_vcmpgeq_m_sv4si (__a, __b, __p);
8380 }
8381 
8382 __extension__ extern __inline mve_pred16_t
8383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8384 __arm_vcmpgeq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8385 {
8386   return __builtin_mve_vcmpgeq_m_n_sv4si (__a, __b, __p);
8387 }
8388 
8389 __extension__ extern __inline mve_pred16_t
8390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8391 __arm_vcmpeqq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8392 {
8393   return __builtin_mve_vcmpeqq_m_sv4si (__a, __b, __p);
8394 }
8395 
8396 __extension__ extern __inline mve_pred16_t
8397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8398 __arm_vcmpeqq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8399 {
8400   return __builtin_mve_vcmpeqq_m_n_sv4si (__a, __b, __p);
8401 }
8402 
8403 __extension__ extern __inline int32x4_t
8404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8405 __arm_vshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8406 {
8407   return __builtin_mve_vshlq_m_r_sv4si (__a, __b, __p);
8408 }
8409 
8410 __extension__ extern __inline int32x4_t
8411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8412 __arm_vrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8413 {
8414   return __builtin_mve_vrshlq_m_n_sv4si (__a, __b, __p);
8415 }
8416 
8417 __extension__ extern __inline int32x4_t
8418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_s32(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)8419 __arm_vrev64q_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8420 {
8421   return __builtin_mve_vrev64q_m_sv4si (__inactive, __a, __p);
8422 }
8423 
8424 __extension__ extern __inline int32x4_t
8425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8426 __arm_vqshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8427 {
8428   return __builtin_mve_vqshlq_m_r_sv4si (__a, __b, __p);
8429 }
8430 
8431 __extension__ extern __inline int32x4_t
8432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)8433 __arm_vqrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8434 {
8435   return __builtin_mve_vqrshlq_m_n_sv4si (__a, __b, __p);
8436 }
8437 
8438 __extension__ extern __inline int32x4_t
8439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m_s32(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)8440 __arm_vqnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8441 {
8442   return __builtin_mve_vqnegq_m_sv4si (__inactive, __a, __p);
8443 }
8444 
8445 __extension__ extern __inline int32x4_t
8446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq_m_s32(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)8447 __arm_vqabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8448 {
8449   return __builtin_mve_vqabsq_m_sv4si (__inactive, __a, __p);
8450 }
8451 
8452 __extension__ extern __inline int32x4_t
8453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m_s32(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)8454 __arm_vnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8455 {
8456   return __builtin_mve_vnegq_m_sv4si (__inactive, __a, __p);
8457 }
8458 
8459 __extension__ extern __inline int32x4_t
8460 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_s32(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)8461 __arm_vmvnq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8462 {
8463   return __builtin_mve_vmvnq_m_sv4si (__inactive, __a, __p);
8464 }
8465 
8466 __extension__ extern __inline int32_t
8467 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8468 __arm_vmlsdavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8469 {
8470   return __builtin_mve_vmlsdavxq_p_sv4si (__a, __b, __p);
8471 }
8472 
8473 __extension__ extern __inline int32_t
8474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8475 __arm_vmlsdavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8476 {
8477   return __builtin_mve_vmlsdavq_p_sv4si (__a, __b, __p);
8478 }
8479 
8480 __extension__ extern __inline int32_t
8481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8482 __arm_vmladavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8483 {
8484   return __builtin_mve_vmladavxq_p_sv4si (__a, __b, __p);
8485 }
8486 
8487 __extension__ extern __inline int32_t
8488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8489 __arm_vmladavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8490 {
8491   return __builtin_mve_vmladavq_p_sv4si (__a, __b, __p);
8492 }
8493 
8494 __extension__ extern __inline int32_t
8495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p_s32(int32_t __a,int32x4_t __b,mve_pred16_t __p)8496 __arm_vminvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p)
8497 {
8498   return __builtin_mve_vminvq_p_sv4si (__a, __b, __p);
8499 }
8500 
8501 __extension__ extern __inline int32_t
8502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p_s32(int32_t __a,int32x4_t __b,mve_pred16_t __p)8503 __arm_vmaxvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p)
8504 {
8505   return __builtin_mve_vmaxvq_p_sv4si (__a, __b, __p);
8506 }
8507 
8508 __extension__ extern __inline int32x4_t
8509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m_n_s32(int32x4_t __inactive,int32_t __a,mve_pred16_t __p)8510 __arm_vdupq_m_n_s32 (int32x4_t __inactive, int32_t __a, mve_pred16_t __p)
8511 {
8512   return __builtin_mve_vdupq_m_n_sv4si (__inactive, __a, __p);
8513 }
8514 
8515 __extension__ extern __inline int32x4_t
8516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m_s32(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)8517 __arm_vclzq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8518 {
8519   return __builtin_mve_vclzq_m_sv4si (__inactive, __a, __p);
8520 }
8521 
8522 __extension__ extern __inline int32x4_t
8523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_m_s32(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)8524 __arm_vclsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8525 {
8526   return __builtin_mve_vclsq_m_sv4si (__inactive, __a, __p);
8527 }
8528 
8529 __extension__ extern __inline int32_t
8530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p_s32(int32_t __a,int32x4_t __b,mve_pred16_t __p)8531 __arm_vaddvaq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p)
8532 {
8533   return __builtin_mve_vaddvaq_p_sv4si (__a, __b, __p);
8534 }
8535 
8536 __extension__ extern __inline int32x4_t
8537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m_s32(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)8538 __arm_vabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8539 {
8540   return __builtin_mve_vabsq_m_sv4si (__inactive, __a, __p);
8541 }
8542 
8543 __extension__ extern __inline int32x4_t
8544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)8545 __arm_vqrdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8546 {
8547   return __builtin_mve_vqrdmlsdhxq_sv4si (__inactive, __a, __b);
8548 }
8549 
8550 __extension__ extern __inline int32x4_t
8551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)8552 __arm_vqrdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8553 {
8554   return __builtin_mve_vqrdmlsdhq_sv4si (__inactive, __a, __b);
8555 }
8556 
8557 __extension__ extern __inline int32x4_t
8558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c)8559 __arm_vqrdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8560 {
8561   return __builtin_mve_vqrdmlashq_n_sv4si (__a, __b, __c);
8562 }
8563 
8564 __extension__ extern __inline int32x4_t
8565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c)8566 __arm_vqdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8567 {
8568   return __builtin_mve_vqdmlashq_n_sv4si (__a, __b, __c);
8569 }
8570 
8571 __extension__ extern __inline int32x4_t
8572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c)8573 __arm_vqrdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8574 {
8575   return __builtin_mve_vqrdmlahq_n_sv4si (__a, __b, __c);
8576 }
8577 
8578 __extension__ extern __inline int32x4_t
8579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)8580 __arm_vqrdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8581 {
8582   return __builtin_mve_vqrdmladhxq_sv4si (__inactive, __a, __b);
8583 }
8584 
8585 __extension__ extern __inline int32x4_t
8586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)8587 __arm_vqrdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8588 {
8589   return __builtin_mve_vqrdmladhq_sv4si (__inactive, __a, __b);
8590 }
8591 
8592 __extension__ extern __inline int32x4_t
8593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)8594 __arm_vqdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8595 {
8596   return __builtin_mve_vqdmlsdhxq_sv4si (__inactive, __a, __b);
8597 }
8598 
8599 __extension__ extern __inline int32x4_t
8600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)8601 __arm_vqdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8602 {
8603   return __builtin_mve_vqdmlsdhq_sv4si (__inactive, __a, __b);
8604 }
8605 
8606 __extension__ extern __inline int32x4_t
8607 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c)8608 __arm_vqdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8609 {
8610   return __builtin_mve_vqdmlahq_n_sv4si (__a, __b, __c);
8611 }
8612 
8613 __extension__ extern __inline int32x4_t
8614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)8615 __arm_vqdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8616 {
8617   return __builtin_mve_vqdmladhxq_sv4si (__inactive, __a, __b);
8618 }
8619 
8620 __extension__ extern __inline int32x4_t
8621 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)8622 __arm_vqdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8623 {
8624   return __builtin_mve_vqdmladhq_sv4si (__inactive, __a, __b);
8625 }
8626 
8627 __extension__ extern __inline int32_t
8628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq_s32(int32_t __a,int32x4_t __b,int32x4_t __c)8629 __arm_vmlsdavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
8630 {
8631   return __builtin_mve_vmlsdavaxq_sv4si (__a, __b, __c);
8632 }
8633 
8634 __extension__ extern __inline int32_t
8635 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq_s32(int32_t __a,int32x4_t __b,int32x4_t __c)8636 __arm_vmlsdavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
8637 {
8638   return __builtin_mve_vmlsdavaq_sv4si (__a, __b, __c);
8639 }
8640 
8641 __extension__ extern __inline int32x4_t
8642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c)8643 __arm_vmlasq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8644 {
8645   return __builtin_mve_vmlasq_n_sv4si (__a, __b, __c);
8646 }
8647 
8648 __extension__ extern __inline int32x4_t
8649 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c)8650 __arm_vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8651 {
8652   return __builtin_mve_vmlaq_n_sv4si (__a, __b, __c);
8653 }
8654 
8655 __extension__ extern __inline int32_t
8656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq_s32(int32_t __a,int32x4_t __b,int32x4_t __c)8657 __arm_vmladavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
8658 {
8659   return __builtin_mve_vmladavaxq_sv4si (__a, __b, __c);
8660 }
8661 
8662 __extension__ extern __inline int32_t
8663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_s32(int32_t __a,int32x4_t __b,int32x4_t __c)8664 __arm_vmladavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
8665 {
8666   return __builtin_mve_vmladavaq_sv4si (__a, __b, __c);
8667 }
8668 
8669 __extension__ extern __inline int32x4_t
8670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_n_s32(int32x4_t __a,int32x4_t __b,const int __imm)8671 __arm_vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm)
8672 {
8673   return __builtin_mve_vsriq_n_sv4si (__a, __b, __imm);
8674 }
8675 
8676 __extension__ extern __inline int32x4_t
8677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_n_s32(int32x4_t __a,int32x4_t __b,const int __imm)8678 __arm_vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm)
8679 {
8680   return __builtin_mve_vsliq_n_sv4si (__a, __b, __imm);
8681 }
8682 
8683 __extension__ extern __inline uint64x2_t
8684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_u64(uint64x2_t __a,uint64x2_t __b,mve_pred16_t __p)8685 __arm_vpselq_u64 (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p)
8686 {
8687   return __builtin_mve_vpselq_uv2di (__a, __b, __p);
8688 }
8689 
8690 __extension__ extern __inline int64x2_t
8691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_s64(int64x2_t __a,int64x2_t __b,mve_pred16_t __p)8692 __arm_vpselq_s64 (int64x2_t __a, int64x2_t __b, mve_pred16_t __p)
8693 {
8694   return __builtin_mve_vpselq_sv2di (__a, __b, __p);
8695 }
8696 
8697 __extension__ extern __inline int64_t
8698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaxq_s32(int64_t __a,int32x4_t __b,int32x4_t __c)8699 __arm_vrmlaldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
8700 {
8701   return __builtin_mve_vrmlaldavhaxq_sv4si (__a, __b, __c);
8702 }
8703 
8704 __extension__ extern __inline int64_t
8705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhaq_s32(int64_t __a,int32x4_t __b,int32x4_t __c)8706 __arm_vrmlsldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
8707 {
8708   return __builtin_mve_vrmlsldavhaq_sv4si (__a, __b, __c);
8709 }
8710 
8711 __extension__ extern __inline int64_t
8712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhaxq_s32(int64_t __a,int32x4_t __b,int32x4_t __c)8713 __arm_vrmlsldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
8714 {
8715   return __builtin_mve_vrmlsldavhaxq_sv4si (__a, __b, __c);
8716 }
8717 
8718 __extension__ extern __inline int64_t
8719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvaq_p_s32(int64_t __a,int32x4_t __b,mve_pred16_t __p)8720 __arm_vaddlvaq_p_s32 (int64_t __a, int32x4_t __b, mve_pred16_t __p)
8721 {
8722   return __builtin_mve_vaddlvaq_p_sv4si (__a, __b, __p);
8723 }
8724 
8725 __extension__ extern __inline int8x16_t
8726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)8727 __arm_vrev16q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
8728 {
8729   return __builtin_mve_vrev16q_m_sv16qi (__inactive, __a, __p);
8730 }
8731 
8732 __extension__ extern __inline int64_t
8733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8734 __arm_vrmlaldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8735 {
8736   return __builtin_mve_vrmlaldavhq_p_sv4si (__a, __b, __p);
8737 }
8738 
8739 __extension__ extern __inline int64_t
8740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhxq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8741 __arm_vrmlaldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8742 {
8743   return __builtin_mve_vrmlaldavhxq_p_sv4si (__a, __b, __p);
8744 }
8745 
8746 __extension__ extern __inline int64_t
8747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8748 __arm_vrmlsldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8749 {
8750   return __builtin_mve_vrmlsldavhq_p_sv4si (__a, __b, __p);
8751 }
8752 
8753 __extension__ extern __inline int64_t
8754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhxq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)8755 __arm_vrmlsldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8756 {
8757   return __builtin_mve_vrmlsldavhxq_p_sv4si (__a, __b, __p);
8758 }
8759 
8760 __extension__ extern __inline uint64_t
8761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvaq_p_u32(uint64_t __a,uint32x4_t __b,mve_pred16_t __p)8762 __arm_vaddlvaq_p_u32 (uint64_t __a, uint32x4_t __b, mve_pred16_t __p)
8763 {
8764   return __builtin_mve_vaddlvaq_p_uv4si (__a, __b, __p);
8765 }
8766 
8767 __extension__ extern __inline uint8x16_t
8768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_m_u8(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)8769 __arm_vrev16q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
8770 {
8771   return __builtin_mve_vrev16q_m_uv16qi (__inactive, __a, __p);
8772 }
8773 
8774 __extension__ extern __inline uint64_t
8775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhq_p_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)8776 __arm_vrmlaldavhq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8777 {
8778   return __builtin_mve_vrmlaldavhq_p_uv4si (__a, __b, __p);
8779 }
8780 
8781 __extension__ extern __inline int16x8_t
8782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_n_s16(int16x8_t __inactive,const int __imm,mve_pred16_t __p)8783 __arm_vmvnq_m_n_s16 (int16x8_t __inactive, const int __imm, mve_pred16_t __p)
8784 {
8785   return __builtin_mve_vmvnq_m_n_sv8hi (__inactive, __imm, __p);
8786 }
8787 
8788 __extension__ extern __inline int16x8_t
8789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_n_s16(int16x8_t __a,const int __imm,mve_pred16_t __p)8790 __arm_vorrq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
8791 {
8792   return __builtin_mve_vorrq_m_n_sv8hi (__a, __imm, __p);
8793 }
8794 
8795 __extension__ extern __inline int8x16_t
8796 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_n_s16(int8x16_t __a,int16x8_t __b,const int __imm)8797 __arm_vqrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8798 {
8799   return __builtin_mve_vqrshrntq_n_sv8hi (__a, __b, __imm);
8800 }
8801 
8802 __extension__ extern __inline int8x16_t
8803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_n_s16(int8x16_t __a,int16x8_t __b,const int __imm)8804 __arm_vqshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8805 {
8806   return __builtin_mve_vqshrnbq_n_sv8hi (__a, __b, __imm);
8807 }
8808 
8809 __extension__ extern __inline int8x16_t
8810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_n_s16(int8x16_t __a,int16x8_t __b,const int __imm)8811 __arm_vqshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8812 {
8813   return __builtin_mve_vqshrntq_n_sv8hi (__a, __b, __imm);
8814 }
8815 
8816 __extension__ extern __inline int8x16_t
8817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_n_s16(int8x16_t __a,int16x8_t __b,const int __imm)8818 __arm_vrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8819 {
8820   return __builtin_mve_vrshrnbq_n_sv8hi (__a, __b, __imm);
8821 }
8822 
8823 __extension__ extern __inline int8x16_t
8824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_n_s16(int8x16_t __a,int16x8_t __b,const int __imm)8825 __arm_vrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8826 {
8827   return __builtin_mve_vrshrntq_n_sv8hi (__a, __b, __imm);
8828 }
8829 
8830 __extension__ extern __inline int8x16_t
8831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_n_s16(int8x16_t __a,int16x8_t __b,const int __imm)8832 __arm_vshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8833 {
8834   return __builtin_mve_vshrnbq_n_sv8hi (__a, __b, __imm);
8835 }
8836 
8837 __extension__ extern __inline int8x16_t
8838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_n_s16(int8x16_t __a,int16x8_t __b,const int __imm)8839 __arm_vshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8840 {
8841   return __builtin_mve_vshrntq_n_sv8hi (__a, __b, __imm);
8842 }
8843 
8844 __extension__ extern __inline int64_t
8845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_s16(int64_t __a,int16x8_t __b,int16x8_t __c)8846 __arm_vmlaldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
8847 {
8848   return __builtin_mve_vmlaldavaq_sv8hi (__a, __b, __c);
8849 }
8850 
8851 __extension__ extern __inline int64_t
8852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaxq_s16(int64_t __a,int16x8_t __b,int16x8_t __c)8853 __arm_vmlaldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
8854 {
8855   return __builtin_mve_vmlaldavaxq_sv8hi (__a, __b, __c);
8856 }
8857 
8858 __extension__ extern __inline int64_t
8859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaq_s16(int64_t __a,int16x8_t __b,int16x8_t __c)8860 __arm_vmlsldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
8861 {
8862   return __builtin_mve_vmlsldavaq_sv8hi (__a, __b, __c);
8863 }
8864 
8865 __extension__ extern __inline int64_t
8866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaxq_s16(int64_t __a,int16x8_t __b,int16x8_t __c)8867 __arm_vmlsldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
8868 {
8869   return __builtin_mve_vmlsldavaxq_sv8hi (__a, __b, __c);
8870 }
8871 
8872 __extension__ extern __inline int64_t
8873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_p_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)8874 __arm_vmlaldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
8875 {
8876   return __builtin_mve_vmlaldavq_p_sv8hi (__a, __b, __p);
8877 }
8878 
8879 __extension__ extern __inline int64_t
8880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavxq_p_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)8881 __arm_vmlaldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
8882 {
8883   return __builtin_mve_vmlaldavxq_p_sv8hi (__a, __b, __p);
8884 }
8885 
8886 __extension__ extern __inline int64_t
8887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavq_p_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)8888 __arm_vmlsldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
8889 {
8890   return __builtin_mve_vmlsldavq_p_sv8hi (__a, __b, __p);
8891 }
8892 
8893 __extension__ extern __inline int64_t
8894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavxq_p_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)8895 __arm_vmlsldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
8896 {
8897   return __builtin_mve_vmlsldavxq_p_sv8hi (__a, __b, __p);
8898 }
8899 
8900 __extension__ extern __inline int16x8_t
8901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_m_s8(int16x8_t __inactive,int8x16_t __a,mve_pred16_t __p)8902 __arm_vmovlbq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
8903 {
8904   return __builtin_mve_vmovlbq_m_sv16qi (__inactive, __a, __p);
8905 }
8906 
8907 __extension__ extern __inline int16x8_t
8908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_m_s8(int16x8_t __inactive,int8x16_t __a,mve_pred16_t __p)8909 __arm_vmovltq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
8910 {
8911   return __builtin_mve_vmovltq_m_sv16qi (__inactive, __a, __p);
8912 }
8913 
8914 __extension__ extern __inline int8x16_t
8915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_m_s16(int8x16_t __a,int16x8_t __b,mve_pred16_t __p)8916 __arm_vmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8917 {
8918   return __builtin_mve_vmovnbq_m_sv8hi (__a, __b, __p);
8919 }
8920 
8921 __extension__ extern __inline int8x16_t
8922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_m_s16(int8x16_t __a,int16x8_t __b,mve_pred16_t __p)8923 __arm_vmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8924 {
8925   return __builtin_mve_vmovntq_m_sv8hi (__a, __b, __p);
8926 }
8927 
8928 __extension__ extern __inline int8x16_t
8929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_m_s16(int8x16_t __a,int16x8_t __b,mve_pred16_t __p)8930 __arm_vqmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8931 {
8932   return __builtin_mve_vqmovnbq_m_sv8hi (__a, __b, __p);
8933 }
8934 
8935 __extension__ extern __inline int8x16_t
8936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_m_s16(int8x16_t __a,int16x8_t __b,mve_pred16_t __p)8937 __arm_vqmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8938 {
8939   return __builtin_mve_vqmovntq_m_sv8hi (__a, __b, __p);
8940 }
8941 
8942 __extension__ extern __inline int8x16_t
8943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m_s8(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)8944 __arm_vrev32q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
8945 {
8946   return __builtin_mve_vrev32q_m_sv16qi (__inactive, __a, __p);
8947 }
8948 
8949 __extension__ extern __inline uint16x8_t
8950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_n_u16(uint16x8_t __inactive,const int __imm,mve_pred16_t __p)8951 __arm_vmvnq_m_n_u16 (uint16x8_t __inactive, const int __imm, mve_pred16_t __p)
8952 {
8953   return __builtin_mve_vmvnq_m_n_uv8hi (__inactive, __imm, __p);
8954 }
8955 
8956 __extension__ extern __inline uint16x8_t
8957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_n_u16(uint16x8_t __a,const int __imm,mve_pred16_t __p)8958 __arm_vorrq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
8959 {
8960   return __builtin_mve_vorrq_m_n_uv8hi (__a, __imm, __p);
8961 }
8962 
8963 __extension__ extern __inline uint8x16_t
8964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshruntq_n_s16(uint8x16_t __a,int16x8_t __b,const int __imm)8965 __arm_vqrshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
8966 {
8967   return __builtin_mve_vqrshruntq_n_sv8hi (__a, __b, __imm);
8968 }
8969 
8970 __extension__ extern __inline uint8x16_t
8971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrunbq_n_s16(uint8x16_t __a,int16x8_t __b,const int __imm)8972 __arm_vqshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
8973 {
8974   return __builtin_mve_vqshrunbq_n_sv8hi (__a, __b, __imm);
8975 }
8976 
8977 __extension__ extern __inline uint8x16_t
8978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshruntq_n_s16(uint8x16_t __a,int16x8_t __b,const int __imm)8979 __arm_vqshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
8980 {
8981   return __builtin_mve_vqshruntq_n_sv8hi (__a, __b, __imm);
8982 }
8983 
8984 __extension__ extern __inline uint8x16_t
8985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovunbq_m_s16(uint8x16_t __a,int16x8_t __b,mve_pred16_t __p)8986 __arm_vqmovunbq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8987 {
8988   return __builtin_mve_vqmovunbq_m_sv8hi (__a, __b, __p);
8989 }
8990 
8991 __extension__ extern __inline uint8x16_t
8992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovuntq_m_s16(uint8x16_t __a,int16x8_t __b,mve_pred16_t __p)8993 __arm_vqmovuntq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8994 {
8995   return __builtin_mve_vqmovuntq_m_sv8hi (__a, __b, __p);
8996 }
8997 
8998 __extension__ extern __inline uint8x16_t
8999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm)9000 __arm_vqrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9001 {
9002   return __builtin_mve_vqrshrntq_n_uv8hi (__a, __b, __imm);
9003 }
9004 
9005 __extension__ extern __inline uint8x16_t
9006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm)9007 __arm_vqshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9008 {
9009   return __builtin_mve_vqshrnbq_n_uv8hi (__a, __b, __imm);
9010 }
9011 
9012 __extension__ extern __inline uint8x16_t
9013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm)9014 __arm_vqshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9015 {
9016   return __builtin_mve_vqshrntq_n_uv8hi (__a, __b, __imm);
9017 }
9018 
9019 __extension__ extern __inline uint8x16_t
9020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm)9021 __arm_vrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9022 {
9023   return __builtin_mve_vrshrnbq_n_uv8hi (__a, __b, __imm);
9024 }
9025 
9026 __extension__ extern __inline uint8x16_t
9027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm)9028 __arm_vrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9029 {
9030   return __builtin_mve_vrshrntq_n_uv8hi (__a, __b, __imm);
9031 }
9032 
9033 __extension__ extern __inline uint8x16_t
9034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm)9035 __arm_vshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9036 {
9037   return __builtin_mve_vshrnbq_n_uv8hi (__a, __b, __imm);
9038 }
9039 
9040 __extension__ extern __inline uint8x16_t
9041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm)9042 __arm_vshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9043 {
9044   return __builtin_mve_vshrntq_n_uv8hi (__a, __b, __imm);
9045 }
9046 
9047 __extension__ extern __inline uint64_t
9048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_u16(uint64_t __a,uint16x8_t __b,uint16x8_t __c)9049 __arm_vmlaldavaq_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c)
9050 {
9051   return __builtin_mve_vmlaldavaq_uv8hi (__a, __b, __c);
9052 }
9053 
9054 __extension__ extern __inline uint64_t
9055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_p_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)9056 __arm_vmlaldavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9057 {
9058   return __builtin_mve_vmlaldavq_p_uv8hi (__a, __b, __p);
9059 }
9060 
9061 __extension__ extern __inline uint16x8_t
9062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_m_u8(uint16x8_t __inactive,uint8x16_t __a,mve_pred16_t __p)9063 __arm_vmovlbq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
9064 {
9065   return __builtin_mve_vmovlbq_m_uv16qi (__inactive, __a, __p);
9066 }
9067 
9068 __extension__ extern __inline uint16x8_t
9069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_m_u8(uint16x8_t __inactive,uint8x16_t __a,mve_pred16_t __p)9070 __arm_vmovltq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
9071 {
9072   return __builtin_mve_vmovltq_m_uv16qi (__inactive, __a, __p);
9073 }
9074 
9075 __extension__ extern __inline uint8x16_t
9076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_m_u16(uint8x16_t __a,uint16x8_t __b,mve_pred16_t __p)9077 __arm_vmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
9078 {
9079   return __builtin_mve_vmovnbq_m_uv8hi (__a, __b, __p);
9080 }
9081 
9082 __extension__ extern __inline uint8x16_t
9083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_m_u16(uint8x16_t __a,uint16x8_t __b,mve_pred16_t __p)9084 __arm_vmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
9085 {
9086   return __builtin_mve_vmovntq_m_uv8hi (__a, __b, __p);
9087 }
9088 
9089 __extension__ extern __inline uint8x16_t
9090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_m_u16(uint8x16_t __a,uint16x8_t __b,mve_pred16_t __p)9091 __arm_vqmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
9092 {
9093   return __builtin_mve_vqmovnbq_m_uv8hi (__a, __b, __p);
9094 }
9095 
9096 __extension__ extern __inline uint8x16_t
9097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_m_u16(uint8x16_t __a,uint16x8_t __b,mve_pred16_t __p)9098 __arm_vqmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
9099 {
9100   return __builtin_mve_vqmovntq_m_uv8hi (__a, __b, __p);
9101 }
9102 
9103 __extension__ extern __inline uint8x16_t
9104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m_u8(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)9105 __arm_vrev32q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
9106 {
9107   return __builtin_mve_vrev32q_m_uv16qi (__inactive, __a, __p);
9108 }
9109 
9110 __extension__ extern __inline int32x4_t
9111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_n_s32(int32x4_t __inactive,const int __imm,mve_pred16_t __p)9112 __arm_vmvnq_m_n_s32 (int32x4_t __inactive, const int __imm, mve_pred16_t __p)
9113 {
9114   return __builtin_mve_vmvnq_m_n_sv4si (__inactive, __imm, __p);
9115 }
9116 
9117 __extension__ extern __inline int32x4_t
9118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_n_s32(int32x4_t __a,const int __imm,mve_pred16_t __p)9119 __arm_vorrq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
9120 {
9121   return __builtin_mve_vorrq_m_n_sv4si (__a, __imm, __p);
9122 }
9123 
9124 __extension__ extern __inline int16x8_t
9125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_n_s32(int16x8_t __a,int32x4_t __b,const int __imm)9126 __arm_vqrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9127 {
9128   return __builtin_mve_vqrshrntq_n_sv4si (__a, __b, __imm);
9129 }
9130 
9131 __extension__ extern __inline int16x8_t
9132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_n_s32(int16x8_t __a,int32x4_t __b,const int __imm)9133 __arm_vqshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9134 {
9135   return __builtin_mve_vqshrnbq_n_sv4si (__a, __b, __imm);
9136 }
9137 
9138 __extension__ extern __inline int16x8_t
9139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_n_s32(int16x8_t __a,int32x4_t __b,const int __imm)9140 __arm_vqshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9141 {
9142   return __builtin_mve_vqshrntq_n_sv4si (__a, __b, __imm);
9143 }
9144 
9145 __extension__ extern __inline int16x8_t
9146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_n_s32(int16x8_t __a,int32x4_t __b,const int __imm)9147 __arm_vrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9148 {
9149   return __builtin_mve_vrshrnbq_n_sv4si (__a, __b, __imm);
9150 }
9151 
9152 __extension__ extern __inline int16x8_t
9153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_n_s32(int16x8_t __a,int32x4_t __b,const int __imm)9154 __arm_vrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9155 {
9156   return __builtin_mve_vrshrntq_n_sv4si (__a, __b, __imm);
9157 }
9158 
9159 __extension__ extern __inline int16x8_t
9160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_n_s32(int16x8_t __a,int32x4_t __b,const int __imm)9161 __arm_vshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9162 {
9163   return __builtin_mve_vshrnbq_n_sv4si (__a, __b, __imm);
9164 }
9165 
9166 __extension__ extern __inline int16x8_t
9167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_n_s32(int16x8_t __a,int32x4_t __b,const int __imm)9168 __arm_vshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9169 {
9170   return __builtin_mve_vshrntq_n_sv4si (__a, __b, __imm);
9171 }
9172 
9173 __extension__ extern __inline int64_t
9174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_s32(int64_t __a,int32x4_t __b,int32x4_t __c)9175 __arm_vmlaldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
9176 {
9177   return __builtin_mve_vmlaldavaq_sv4si (__a, __b, __c);
9178 }
9179 
9180 __extension__ extern __inline int64_t
9181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaxq_s32(int64_t __a,int32x4_t __b,int32x4_t __c)9182 __arm_vmlaldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
9183 {
9184   return __builtin_mve_vmlaldavaxq_sv4si (__a, __b, __c);
9185 }
9186 
9187 __extension__ extern __inline int64_t
9188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaq_s32(int64_t __a,int32x4_t __b,int32x4_t __c)9189 __arm_vmlsldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
9190 {
9191   return __builtin_mve_vmlsldavaq_sv4si (__a, __b, __c);
9192 }
9193 
9194 __extension__ extern __inline int64_t
9195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaxq_s32(int64_t __a,int32x4_t __b,int32x4_t __c)9196 __arm_vmlsldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
9197 {
9198   return __builtin_mve_vmlsldavaxq_sv4si (__a, __b, __c);
9199 }
9200 
9201 __extension__ extern __inline int64_t
9202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9203 __arm_vmlaldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9204 {
9205   return __builtin_mve_vmlaldavq_p_sv4si (__a, __b, __p);
9206 }
9207 
9208 __extension__ extern __inline int64_t
9209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavxq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9210 __arm_vmlaldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9211 {
9212   return __builtin_mve_vmlaldavxq_p_sv4si (__a, __b, __p);
9213 }
9214 
9215 __extension__ extern __inline int64_t
9216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9217 __arm_vmlsldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9218 {
9219   return __builtin_mve_vmlsldavq_p_sv4si (__a, __b, __p);
9220 }
9221 
9222 __extension__ extern __inline int64_t
9223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavxq_p_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9224 __arm_vmlsldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9225 {
9226   return __builtin_mve_vmlsldavxq_p_sv4si (__a, __b, __p);
9227 }
9228 
9229 __extension__ extern __inline int32x4_t
9230 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_m_s16(int32x4_t __inactive,int16x8_t __a,mve_pred16_t __p)9231 __arm_vmovlbq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
9232 {
9233   return __builtin_mve_vmovlbq_m_sv8hi (__inactive, __a, __p);
9234 }
9235 
9236 __extension__ extern __inline int32x4_t
9237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_m_s16(int32x4_t __inactive,int16x8_t __a,mve_pred16_t __p)9238 __arm_vmovltq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
9239 {
9240   return __builtin_mve_vmovltq_m_sv8hi (__inactive, __a, __p);
9241 }
9242 
9243 __extension__ extern __inline int16x8_t
9244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_m_s32(int16x8_t __a,int32x4_t __b,mve_pred16_t __p)9245 __arm_vmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9246 {
9247   return __builtin_mve_vmovnbq_m_sv4si (__a, __b, __p);
9248 }
9249 
9250 __extension__ extern __inline int16x8_t
9251 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_m_s32(int16x8_t __a,int32x4_t __b,mve_pred16_t __p)9252 __arm_vmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9253 {
9254   return __builtin_mve_vmovntq_m_sv4si (__a, __b, __p);
9255 }
9256 
9257 __extension__ extern __inline int16x8_t
9258 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_m_s32(int16x8_t __a,int32x4_t __b,mve_pred16_t __p)9259 __arm_vqmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9260 {
9261   return __builtin_mve_vqmovnbq_m_sv4si (__a, __b, __p);
9262 }
9263 
9264 __extension__ extern __inline int16x8_t
9265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_m_s32(int16x8_t __a,int32x4_t __b,mve_pred16_t __p)9266 __arm_vqmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9267 {
9268   return __builtin_mve_vqmovntq_m_sv4si (__a, __b, __p);
9269 }
9270 
9271 __extension__ extern __inline int16x8_t
9272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m_s16(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)9273 __arm_vrev32q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
9274 {
9275   return __builtin_mve_vrev32q_m_sv8hi (__inactive, __a, __p);
9276 }
9277 
9278 __extension__ extern __inline uint32x4_t
9279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m_n_u32(uint32x4_t __inactive,const int __imm,mve_pred16_t __p)9280 __arm_vmvnq_m_n_u32 (uint32x4_t __inactive, const int __imm, mve_pred16_t __p)
9281 {
9282   return __builtin_mve_vmvnq_m_n_uv4si (__inactive, __imm, __p);
9283 }
9284 
9285 __extension__ extern __inline uint32x4_t
9286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_n_u32(uint32x4_t __a,const int __imm,mve_pred16_t __p)9287 __arm_vorrq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
9288 {
9289   return __builtin_mve_vorrq_m_n_uv4si (__a, __imm, __p);
9290 }
9291 
9292 __extension__ extern __inline uint16x8_t
9293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshruntq_n_s32(uint16x8_t __a,int32x4_t __b,const int __imm)9294 __arm_vqrshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
9295 {
9296   return __builtin_mve_vqrshruntq_n_sv4si (__a, __b, __imm);
9297 }
9298 
9299 __extension__ extern __inline uint16x8_t
9300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrunbq_n_s32(uint16x8_t __a,int32x4_t __b,const int __imm)9301 __arm_vqshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
9302 {
9303   return __builtin_mve_vqshrunbq_n_sv4si (__a, __b, __imm);
9304 }
9305 
9306 __extension__ extern __inline uint16x8_t
9307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshruntq_n_s32(uint16x8_t __a,int32x4_t __b,const int __imm)9308 __arm_vqshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
9309 {
9310   return __builtin_mve_vqshruntq_n_sv4si (__a, __b, __imm);
9311 }
9312 
9313 __extension__ extern __inline uint16x8_t
9314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovunbq_m_s32(uint16x8_t __a,int32x4_t __b,mve_pred16_t __p)9315 __arm_vqmovunbq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9316 {
9317   return __builtin_mve_vqmovunbq_m_sv4si (__a, __b, __p);
9318 }
9319 
9320 __extension__ extern __inline uint16x8_t
9321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovuntq_m_s32(uint16x8_t __a,int32x4_t __b,mve_pred16_t __p)9322 __arm_vqmovuntq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9323 {
9324   return __builtin_mve_vqmovuntq_m_sv4si (__a, __b, __p);
9325 }
9326 
9327 __extension__ extern __inline uint16x8_t
9328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm)9329 __arm_vqrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9330 {
9331   return __builtin_mve_vqrshrntq_n_uv4si (__a, __b, __imm);
9332 }
9333 
9334 __extension__ extern __inline uint16x8_t
9335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm)9336 __arm_vqshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9337 {
9338   return __builtin_mve_vqshrnbq_n_uv4si (__a, __b, __imm);
9339 }
9340 
9341 __extension__ extern __inline uint16x8_t
9342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm)9343 __arm_vqshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9344 {
9345   return __builtin_mve_vqshrntq_n_uv4si (__a, __b, __imm);
9346 }
9347 
9348 __extension__ extern __inline uint16x8_t
9349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm)9350 __arm_vrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9351 {
9352   return __builtin_mve_vrshrnbq_n_uv4si (__a, __b, __imm);
9353 }
9354 
9355 __extension__ extern __inline uint16x8_t
9356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm)9357 __arm_vrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9358 {
9359   return __builtin_mve_vrshrntq_n_uv4si (__a, __b, __imm);
9360 }
9361 
9362 __extension__ extern __inline uint16x8_t
9363 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm)9364 __arm_vshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9365 {
9366   return __builtin_mve_vshrnbq_n_uv4si (__a, __b, __imm);
9367 }
9368 
9369 __extension__ extern __inline uint16x8_t
9370 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm)9371 __arm_vshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9372 {
9373   return __builtin_mve_vshrntq_n_uv4si (__a, __b, __imm);
9374 }
9375 
9376 __extension__ extern __inline uint64_t
9377 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_u32(uint64_t __a,uint32x4_t __b,uint32x4_t __c)9378 __arm_vmlaldavaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
9379 {
9380   return __builtin_mve_vmlaldavaq_uv4si (__a, __b, __c);
9381 }
9382 
9383 __extension__ extern __inline uint64_t
9384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_p_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)9385 __arm_vmlaldavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9386 {
9387   return __builtin_mve_vmlaldavq_p_uv4si (__a, __b, __p);
9388 }
9389 
9390 __extension__ extern __inline uint32x4_t
9391 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_m_u16(uint32x4_t __inactive,uint16x8_t __a,mve_pred16_t __p)9392 __arm_vmovlbq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
9393 {
9394   return __builtin_mve_vmovlbq_m_uv8hi (__inactive, __a, __p);
9395 }
9396 
9397 __extension__ extern __inline uint32x4_t
9398 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_m_u16(uint32x4_t __inactive,uint16x8_t __a,mve_pred16_t __p)9399 __arm_vmovltq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
9400 {
9401   return __builtin_mve_vmovltq_m_uv8hi (__inactive, __a, __p);
9402 }
9403 
9404 __extension__ extern __inline uint16x8_t
9405 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_m_u32(uint16x8_t __a,uint32x4_t __b,mve_pred16_t __p)9406 __arm_vmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
9407 {
9408   return __builtin_mve_vmovnbq_m_uv4si (__a, __b, __p);
9409 }
9410 
9411 __extension__ extern __inline uint16x8_t
9412 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_m_u32(uint16x8_t __a,uint32x4_t __b,mve_pred16_t __p)9413 __arm_vmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
9414 {
9415   return __builtin_mve_vmovntq_m_uv4si (__a, __b, __p);
9416 }
9417 
9418 __extension__ extern __inline uint16x8_t
9419 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_m_u32(uint16x8_t __a,uint32x4_t __b,mve_pred16_t __p)9420 __arm_vqmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
9421 {
9422   return __builtin_mve_vqmovnbq_m_uv4si (__a, __b, __p);
9423 }
9424 
9425 __extension__ extern __inline uint16x8_t
9426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_m_u32(uint16x8_t __a,uint32x4_t __b,mve_pred16_t __p)9427 __arm_vqmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
9428 {
9429   return __builtin_mve_vqmovntq_m_uv4si (__a, __b, __p);
9430 }
9431 
9432 __extension__ extern __inline uint16x8_t
9433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m_u16(uint16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)9434 __arm_vrev32q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
9435 {
9436   return __builtin_mve_vrev32q_m_uv8hi (__inactive, __a, __p);
9437 }
9438 
9439 __extension__ extern __inline int8x16_t
9440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m_n_s8(int8x16_t __a,int8x16_t __b,const int __imm,mve_pred16_t __p)9441 __arm_vsriq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
9442 {
9443   return __builtin_mve_vsriq_m_n_sv16qi (__a, __b, __imm, __p);
9444 }
9445 
9446 __extension__ extern __inline int8x16_t
9447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)9448 __arm_vsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9449 {
9450   return __builtin_mve_vsubq_m_sv16qi (__inactive, __a, __b, __p);
9451 }
9452 
9453 __extension__ extern __inline uint8x16_t
9454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq_m_n_s8(uint8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)9455 __arm_vqshluq_m_n_s8 (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
9456 {
9457   return __builtin_mve_vqshluq_m_n_sv16qi (__inactive, __a, __imm, __p);
9458 }
9459 
9460 __extension__ extern __inline uint32_t
9461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p_s8(uint32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)9462 __arm_vabavq_p_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
9463 {
9464   return __builtin_mve_vabavq_p_sv16qi (__a, __b, __c, __p);
9465 }
9466 
9467 __extension__ extern __inline uint8x16_t
9468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m_n_u8(uint8x16_t __a,uint8x16_t __b,const int __imm,mve_pred16_t __p)9469 __arm_vsriq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
9470 {
9471   return __builtin_mve_vsriq_m_n_uv16qi (__a, __b, __imm, __p);
9472 }
9473 
9474 __extension__ extern __inline uint8x16_t
9475 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_u8(uint8x16_t __inactive,uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)9476 __arm_vshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9477 {
9478   return __builtin_mve_vshlq_m_uv16qi (__inactive, __a, __b, __p);
9479 }
9480 
9481 __extension__ extern __inline uint8x16_t
9482 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)9483 __arm_vsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9484 {
9485   return __builtin_mve_vsubq_m_uv16qi (__inactive, __a, __b, __p);
9486 }
9487 
9488 __extension__ extern __inline uint32_t
9489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p_u8(uint32_t __a,uint8x16_t __b,uint8x16_t __c,mve_pred16_t __p)9490 __arm_vabavq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
9491 {
9492   return __builtin_mve_vabavq_p_uv16qi (__a, __b, __c, __p);
9493 }
9494 
9495 __extension__ extern __inline int8x16_t
9496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)9497 __arm_vshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9498 {
9499   return __builtin_mve_vshlq_m_sv16qi (__inactive, __a, __b, __p);
9500 }
9501 
9502 __extension__ extern __inline int16x8_t
9503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m_n_s16(int16x8_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)9504 __arm_vsriq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
9505 {
9506   return __builtin_mve_vsriq_m_n_sv8hi (__a, __b, __imm, __p);
9507 }
9508 
9509 __extension__ extern __inline int16x8_t
9510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)9511 __arm_vsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9512 {
9513   return __builtin_mve_vsubq_m_sv8hi (__inactive, __a, __b, __p);
9514 }
9515 
9516 __extension__ extern __inline uint16x8_t
9517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq_m_n_s16(uint16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)9518 __arm_vqshluq_m_n_s16 (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
9519 {
9520   return __builtin_mve_vqshluq_m_n_sv8hi (__inactive, __a, __imm, __p);
9521 }
9522 
9523 __extension__ extern __inline uint32_t
9524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p_s16(uint32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)9525 __arm_vabavq_p_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
9526 {
9527   return __builtin_mve_vabavq_p_sv8hi (__a, __b, __c, __p);
9528 }
9529 
9530 __extension__ extern __inline uint16x8_t
9531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m_n_u16(uint16x8_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)9532 __arm_vsriq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
9533 {
9534   return __builtin_mve_vsriq_m_n_uv8hi (__a, __b, __imm, __p);
9535 }
9536 
9537 __extension__ extern __inline uint16x8_t
9538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_u16(uint16x8_t __inactive,uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)9539 __arm_vshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9540 {
9541   return __builtin_mve_vshlq_m_uv8hi (__inactive, __a, __b, __p);
9542 }
9543 
9544 __extension__ extern __inline uint16x8_t
9545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)9546 __arm_vsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9547 {
9548   return __builtin_mve_vsubq_m_uv8hi (__inactive, __a, __b, __p);
9549 }
9550 
9551 __extension__ extern __inline uint32_t
9552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p_u16(uint32_t __a,uint16x8_t __b,uint16x8_t __c,mve_pred16_t __p)9553 __arm_vabavq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
9554 {
9555   return __builtin_mve_vabavq_p_uv8hi (__a, __b, __c, __p);
9556 }
9557 
9558 __extension__ extern __inline int16x8_t
9559 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)9560 __arm_vshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9561 {
9562   return __builtin_mve_vshlq_m_sv8hi (__inactive, __a, __b, __p);
9563 }
9564 
9565 __extension__ extern __inline int32x4_t
9566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m_n_s32(int32x4_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)9567 __arm_vsriq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
9568 {
9569   return __builtin_mve_vsriq_m_n_sv4si (__a, __b, __imm, __p);
9570 }
9571 
9572 __extension__ extern __inline int32x4_t
9573 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9574 __arm_vsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9575 {
9576   return __builtin_mve_vsubq_m_sv4si (__inactive, __a, __b, __p);
9577 }
9578 
9579 __extension__ extern __inline uint32x4_t
9580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq_m_n_s32(uint32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)9581 __arm_vqshluq_m_n_s32 (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
9582 {
9583   return __builtin_mve_vqshluq_m_n_sv4si (__inactive, __a, __imm, __p);
9584 }
9585 
9586 __extension__ extern __inline uint32_t
9587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p_s32(uint32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)9588 __arm_vabavq_p_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
9589 {
9590   return __builtin_mve_vabavq_p_sv4si (__a, __b, __c, __p);
9591 }
9592 
9593 __extension__ extern __inline uint32x4_t
9594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m_n_u32(uint32x4_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)9595 __arm_vsriq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
9596 {
9597   return __builtin_mve_vsriq_m_n_uv4si (__a, __b, __imm, __p);
9598 }
9599 
9600 __extension__ extern __inline uint32x4_t
9601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_u32(uint32x4_t __inactive,uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)9602 __arm_vshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9603 {
9604   return __builtin_mve_vshlq_m_uv4si (__inactive, __a, __b, __p);
9605 }
9606 
9607 __extension__ extern __inline uint32x4_t
9608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)9609 __arm_vsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9610 {
9611   return __builtin_mve_vsubq_m_uv4si (__inactive, __a, __b, __p);
9612 }
9613 
9614 __extension__ extern __inline uint32_t
9615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p_u32(uint32_t __a,uint32x4_t __b,uint32x4_t __c,mve_pred16_t __p)9616 __arm_vabavq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
9617 {
9618   return __builtin_mve_vabavq_p_uv4si (__a, __b, __c, __p);
9619 }
9620 
9621 __extension__ extern __inline int32x4_t
9622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9623 __arm_vshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9624 {
9625   return __builtin_mve_vshlq_m_sv4si (__inactive, __a, __b, __p);
9626 }
9627 
9628 __extension__ extern __inline int8x16_t
9629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)9630 __arm_vabdq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9631 {
9632   return __builtin_mve_vabdq_m_sv16qi (__inactive, __a, __b, __p);
9633 }
9634 
9635 __extension__ extern __inline int32x4_t
9636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9637 __arm_vabdq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9638 {
9639   return __builtin_mve_vabdq_m_sv4si (__inactive, __a, __b, __p);
9640 }
9641 
9642 __extension__ extern __inline int16x8_t
9643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)9644 __arm_vabdq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9645 {
9646   return __builtin_mve_vabdq_m_sv8hi (__inactive, __a, __b, __p);
9647 }
9648 
9649 __extension__ extern __inline uint8x16_t
9650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)9651 __arm_vabdq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9652 {
9653   return __builtin_mve_vabdq_m_uv16qi (__inactive, __a, __b, __p);
9654 }
9655 
9656 __extension__ extern __inline uint32x4_t
9657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)9658 __arm_vabdq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9659 {
9660   return __builtin_mve_vabdq_m_uv4si (__inactive, __a, __b, __p);
9661 }
9662 
9663 __extension__ extern __inline uint16x8_t
9664 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)9665 __arm_vabdq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9666 {
9667   return __builtin_mve_vabdq_m_uv8hi (__inactive, __a, __b, __p);
9668 }
9669 
9670 __extension__ extern __inline int8x16_t
9671 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int __b,mve_pred16_t __p)9672 __arm_vaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int __b, mve_pred16_t __p)
9673 {
9674   return __builtin_mve_vaddq_m_n_sv16qi (__inactive, __a, __b, __p);
9675 }
9676 
9677 __extension__ extern __inline int32x4_t
9678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int __b,mve_pred16_t __p)9679 __arm_vaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int __b, mve_pred16_t __p)
9680 {
9681   return __builtin_mve_vaddq_m_n_sv4si (__inactive, __a, __b, __p);
9682 }
9683 
9684 __extension__ extern __inline int16x8_t
9685 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int __b,mve_pred16_t __p)9686 __arm_vaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int __b, mve_pred16_t __p)
9687 {
9688   return __builtin_mve_vaddq_m_n_sv8hi (__inactive, __a, __b, __p);
9689 }
9690 
9691 __extension__ extern __inline uint8x16_t
9692 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,int __b,mve_pred16_t __p)9693 __arm_vaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, int __b, mve_pred16_t __p)
9694 {
9695   return __builtin_mve_vaddq_m_n_uv16qi (__inactive, __a, __b, __p);
9696 }
9697 
9698 __extension__ extern __inline uint32x4_t
9699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,int __b,mve_pred16_t __p)9700 __arm_vaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, int __b, mve_pred16_t __p)
9701 {
9702   return __builtin_mve_vaddq_m_n_uv4si (__inactive, __a, __b, __p);
9703 }
9704 
9705 __extension__ extern __inline uint16x8_t
9706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,int __b,mve_pred16_t __p)9707 __arm_vaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, int __b, mve_pred16_t __p)
9708 {
9709   return __builtin_mve_vaddq_m_n_uv8hi (__inactive, __a, __b, __p);
9710 }
9711 
9712 __extension__ extern __inline int8x16_t
9713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)9714 __arm_vaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9715 {
9716   return __builtin_mve_vaddq_m_sv16qi (__inactive, __a, __b, __p);
9717 }
9718 
9719 __extension__ extern __inline int32x4_t
9720 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9721 __arm_vaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9722 {
9723   return __builtin_mve_vaddq_m_sv4si (__inactive, __a, __b, __p);
9724 }
9725 
9726 __extension__ extern __inline int16x8_t
9727 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)9728 __arm_vaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9729 {
9730   return __builtin_mve_vaddq_m_sv8hi (__inactive, __a, __b, __p);
9731 }
9732 
9733 __extension__ extern __inline uint8x16_t
9734 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)9735 __arm_vaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9736 {
9737   return __builtin_mve_vaddq_m_uv16qi (__inactive, __a, __b, __p);
9738 }
9739 
9740 __extension__ extern __inline uint32x4_t
9741 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)9742 __arm_vaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9743 {
9744   return __builtin_mve_vaddq_m_uv4si (__inactive, __a, __b, __p);
9745 }
9746 
9747 __extension__ extern __inline uint16x8_t
9748 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)9749 __arm_vaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9750 {
9751   return __builtin_mve_vaddq_m_uv8hi (__inactive, __a, __b, __p);
9752 }
9753 
9754 __extension__ extern __inline int8x16_t
9755 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)9756 __arm_vandq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9757 {
9758   return __builtin_mve_vandq_m_sv16qi (__inactive, __a, __b, __p);
9759 }
9760 
9761 __extension__ extern __inline int32x4_t
9762 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9763 __arm_vandq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9764 {
9765   return __builtin_mve_vandq_m_sv4si (__inactive, __a, __b, __p);
9766 }
9767 
9768 __extension__ extern __inline int16x8_t
9769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)9770 __arm_vandq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9771 {
9772   return __builtin_mve_vandq_m_sv8hi (__inactive, __a, __b, __p);
9773 }
9774 
9775 __extension__ extern __inline uint8x16_t
9776 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)9777 __arm_vandq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9778 {
9779   return __builtin_mve_vandq_m_uv16qi (__inactive, __a, __b, __p);
9780 }
9781 
9782 __extension__ extern __inline uint32x4_t
9783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)9784 __arm_vandq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9785 {
9786   return __builtin_mve_vandq_m_uv4si (__inactive, __a, __b, __p);
9787 }
9788 
9789 __extension__ extern __inline uint16x8_t
9790 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)9791 __arm_vandq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9792 {
9793   return __builtin_mve_vandq_m_uv8hi (__inactive, __a, __b, __p);
9794 }
9795 
9796 __extension__ extern __inline int8x16_t
9797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)9798 __arm_vbicq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9799 {
9800   return __builtin_mve_vbicq_m_sv16qi (__inactive, __a, __b, __p);
9801 }
9802 
9803 __extension__ extern __inline int32x4_t
9804 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9805 __arm_vbicq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9806 {
9807   return __builtin_mve_vbicq_m_sv4si (__inactive, __a, __b, __p);
9808 }
9809 
9810 __extension__ extern __inline int16x8_t
9811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)9812 __arm_vbicq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9813 {
9814   return __builtin_mve_vbicq_m_sv8hi (__inactive, __a, __b, __p);
9815 }
9816 
9817 __extension__ extern __inline uint8x16_t
9818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)9819 __arm_vbicq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9820 {
9821   return __builtin_mve_vbicq_m_uv16qi (__inactive, __a, __b, __p);
9822 }
9823 
9824 __extension__ extern __inline uint32x4_t
9825 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)9826 __arm_vbicq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9827 {
9828   return __builtin_mve_vbicq_m_uv4si (__inactive, __a, __b, __p);
9829 }
9830 
9831 __extension__ extern __inline uint16x8_t
9832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)9833 __arm_vbicq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9834 {
9835   return __builtin_mve_vbicq_m_uv8hi (__inactive, __a, __b, __p);
9836 }
9837 
9838 __extension__ extern __inline int8x16_t
9839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int32_t __b,mve_pred16_t __p)9840 __arm_vbrsrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p)
9841 {
9842   return __builtin_mve_vbrsrq_m_n_sv16qi (__inactive, __a, __b, __p);
9843 }
9844 
9845 __extension__ extern __inline int32x4_t
9846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)9847 __arm_vbrsrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
9848 {
9849   return __builtin_mve_vbrsrq_m_n_sv4si (__inactive, __a, __b, __p);
9850 }
9851 
9852 __extension__ extern __inline int16x8_t
9853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int32_t __b,mve_pred16_t __p)9854 __arm_vbrsrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p)
9855 {
9856   return __builtin_mve_vbrsrq_m_n_sv8hi (__inactive, __a, __b, __p);
9857 }
9858 
9859 __extension__ extern __inline uint8x16_t
9860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,int32_t __b,mve_pred16_t __p)9861 __arm_vbrsrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p)
9862 {
9863   return __builtin_mve_vbrsrq_m_n_uv16qi (__inactive, __a, __b, __p);
9864 }
9865 
9866 __extension__ extern __inline uint32x4_t
9867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,int32_t __b,mve_pred16_t __p)9868 __arm_vbrsrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p)
9869 {
9870   return __builtin_mve_vbrsrq_m_n_uv4si (__inactive, __a, __b, __p);
9871 }
9872 
9873 __extension__ extern __inline uint16x8_t
9874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,int32_t __b,mve_pred16_t __p)9875 __arm_vbrsrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p)
9876 {
9877   return __builtin_mve_vbrsrq_m_n_uv8hi (__inactive, __a, __b, __p);
9878 }
9879 
9880 __extension__ extern __inline int8x16_t
9881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)9882 __arm_vcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9883 {
9884   return __builtin_mve_vcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p);
9885 }
9886 
9887 __extension__ extern __inline int32x4_t
9888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9889 __arm_vcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9890 {
9891   return __builtin_mve_vcaddq_rot270_m_sv4si (__inactive, __a, __b, __p);
9892 }
9893 
9894 __extension__ extern __inline int16x8_t
9895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)9896 __arm_vcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9897 {
9898   return __builtin_mve_vcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p);
9899 }
9900 
9901 __extension__ extern __inline uint8x16_t
9902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)9903 __arm_vcaddq_rot270_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9904 {
9905   return __builtin_mve_vcaddq_rot270_m_uv16qi (__inactive, __a, __b, __p);
9906 }
9907 
9908 __extension__ extern __inline uint32x4_t
9909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)9910 __arm_vcaddq_rot270_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9911 {
9912   return __builtin_mve_vcaddq_rot270_m_uv4si (__inactive, __a, __b, __p);
9913 }
9914 
9915 __extension__ extern __inline uint16x8_t
9916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)9917 __arm_vcaddq_rot270_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9918 {
9919   return __builtin_mve_vcaddq_rot270_m_uv8hi (__inactive, __a, __b, __p);
9920 }
9921 
9922 __extension__ extern __inline int8x16_t
9923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)9924 __arm_vcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9925 {
9926   return __builtin_mve_vcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p);
9927 }
9928 
9929 __extension__ extern __inline int32x4_t
9930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9931 __arm_vcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9932 {
9933   return __builtin_mve_vcaddq_rot90_m_sv4si (__inactive, __a, __b, __p);
9934 }
9935 
9936 __extension__ extern __inline int16x8_t
9937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)9938 __arm_vcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9939 {
9940   return __builtin_mve_vcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p);
9941 }
9942 
9943 __extension__ extern __inline uint8x16_t
9944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)9945 __arm_vcaddq_rot90_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9946 {
9947   return __builtin_mve_vcaddq_rot90_m_uv16qi (__inactive, __a, __b, __p);
9948 }
9949 
9950 __extension__ extern __inline uint32x4_t
9951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)9952 __arm_vcaddq_rot90_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9953 {
9954   return __builtin_mve_vcaddq_rot90_m_uv4si (__inactive, __a, __b, __p);
9955 }
9956 
9957 __extension__ extern __inline uint16x8_t
9958 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)9959 __arm_vcaddq_rot90_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9960 {
9961   return __builtin_mve_vcaddq_rot90_m_uv8hi (__inactive, __a, __b, __p);
9962 }
9963 
9964 __extension__ extern __inline int8x16_t
9965 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)9966 __arm_veorq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9967 {
9968   return __builtin_mve_veorq_m_sv16qi (__inactive, __a, __b, __p);
9969 }
9970 
9971 __extension__ extern __inline int32x4_t
9972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)9973 __arm_veorq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9974 {
9975   return __builtin_mve_veorq_m_sv4si (__inactive, __a, __b, __p);
9976 }
9977 
9978 __extension__ extern __inline int16x8_t
9979 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)9980 __arm_veorq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9981 {
9982   return __builtin_mve_veorq_m_sv8hi (__inactive, __a, __b, __p);
9983 }
9984 
9985 __extension__ extern __inline uint8x16_t
9986 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)9987 __arm_veorq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9988 {
9989   return __builtin_mve_veorq_m_uv16qi (__inactive, __a, __b, __p);
9990 }
9991 
9992 __extension__ extern __inline uint32x4_t
9993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)9994 __arm_veorq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9995 {
9996   return __builtin_mve_veorq_m_uv4si (__inactive, __a, __b, __p);
9997 }
9998 
9999 __extension__ extern __inline uint16x8_t
10000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10001 __arm_veorq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10002 {
10003   return __builtin_mve_veorq_m_uv8hi (__inactive, __a, __b, __p);
10004 }
10005 
10006 __extension__ extern __inline int8x16_t
10007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)10008 __arm_vhaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10009 {
10010   return __builtin_mve_vhaddq_m_n_sv16qi (__inactive, __a, __b, __p);
10011 }
10012 
10013 __extension__ extern __inline int32x4_t
10014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)10015 __arm_vhaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10016 {
10017   return __builtin_mve_vhaddq_m_n_sv4si (__inactive, __a, __b, __p);
10018 }
10019 
10020 __extension__ extern __inline int16x8_t
10021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)10022 __arm_vhaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10023 {
10024   return __builtin_mve_vhaddq_m_n_sv8hi (__inactive, __a, __b, __p);
10025 }
10026 
10027 __extension__ extern __inline uint8x16_t
10028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)10029 __arm_vhaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
10030 {
10031   return __builtin_mve_vhaddq_m_n_uv16qi (__inactive, __a, __b, __p);
10032 }
10033 
10034 __extension__ extern __inline uint32x4_t
10035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)10036 __arm_vhaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
10037 {
10038   return __builtin_mve_vhaddq_m_n_uv4si (__inactive, __a, __b, __p);
10039 }
10040 
10041 __extension__ extern __inline uint16x8_t
10042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)10043 __arm_vhaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
10044 {
10045   return __builtin_mve_vhaddq_m_n_uv8hi (__inactive, __a, __b, __p);
10046 }
10047 
10048 __extension__ extern __inline int8x16_t
10049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10050 __arm_vhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10051 {
10052   return __builtin_mve_vhaddq_m_sv16qi (__inactive, __a, __b, __p);
10053 }
10054 
10055 __extension__ extern __inline int32x4_t
10056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10057 __arm_vhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10058 {
10059   return __builtin_mve_vhaddq_m_sv4si (__inactive, __a, __b, __p);
10060 }
10061 
10062 __extension__ extern __inline int16x8_t
10063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10064 __arm_vhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10065 {
10066   return __builtin_mve_vhaddq_m_sv8hi (__inactive, __a, __b, __p);
10067 }
10068 
10069 __extension__ extern __inline uint8x16_t
10070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10071 __arm_vhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10072 {
10073   return __builtin_mve_vhaddq_m_uv16qi (__inactive, __a, __b, __p);
10074 }
10075 
10076 __extension__ extern __inline uint32x4_t
10077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10078 __arm_vhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10079 {
10080   return __builtin_mve_vhaddq_m_uv4si (__inactive, __a, __b, __p);
10081 }
10082 
10083 __extension__ extern __inline uint16x8_t
10084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10085 __arm_vhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10086 {
10087   return __builtin_mve_vhaddq_m_uv8hi (__inactive, __a, __b, __p);
10088 }
10089 
10090 __extension__ extern __inline int8x16_t
10091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10092 __arm_vhcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10093 {
10094   return __builtin_mve_vhcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p);
10095 }
10096 
10097 __extension__ extern __inline int32x4_t
10098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10099 __arm_vhcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10100 {
10101   return __builtin_mve_vhcaddq_rot270_m_sv4si (__inactive, __a, __b, __p);
10102 }
10103 
10104 __extension__ extern __inline int16x8_t
10105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10106 __arm_vhcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10107 {
10108   return __builtin_mve_vhcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p);
10109 }
10110 
10111 __extension__ extern __inline int8x16_t
10112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10113 __arm_vhcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10114 {
10115   return __builtin_mve_vhcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p);
10116 }
10117 
10118 __extension__ extern __inline int32x4_t
10119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10120 __arm_vhcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10121 {
10122   return __builtin_mve_vhcaddq_rot90_m_sv4si (__inactive, __a, __b, __p);
10123 }
10124 
10125 __extension__ extern __inline int16x8_t
10126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10127 __arm_vhcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10128 {
10129   return __builtin_mve_vhcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p);
10130 }
10131 
10132 __extension__ extern __inline int8x16_t
10133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)10134 __arm_vhsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10135 {
10136   return __builtin_mve_vhsubq_m_n_sv16qi (__inactive, __a, __b, __p);
10137 }
10138 
10139 __extension__ extern __inline int32x4_t
10140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)10141 __arm_vhsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10142 {
10143   return __builtin_mve_vhsubq_m_n_sv4si (__inactive, __a, __b, __p);
10144 }
10145 
10146 __extension__ extern __inline int16x8_t
10147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)10148 __arm_vhsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10149 {
10150   return __builtin_mve_vhsubq_m_n_sv8hi (__inactive, __a, __b, __p);
10151 }
10152 
10153 __extension__ extern __inline uint8x16_t
10154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)10155 __arm_vhsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
10156 {
10157   return __builtin_mve_vhsubq_m_n_uv16qi (__inactive, __a, __b, __p);
10158 }
10159 
10160 __extension__ extern __inline uint32x4_t
10161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)10162 __arm_vhsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
10163 {
10164   return __builtin_mve_vhsubq_m_n_uv4si (__inactive, __a, __b, __p);
10165 }
10166 
10167 __extension__ extern __inline uint16x8_t
10168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)10169 __arm_vhsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
10170 {
10171   return __builtin_mve_vhsubq_m_n_uv8hi (__inactive, __a, __b, __p);
10172 }
10173 
10174 __extension__ extern __inline int8x16_t
10175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10176 __arm_vhsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10177 {
10178   return __builtin_mve_vhsubq_m_sv16qi (__inactive, __a, __b, __p);
10179 }
10180 
10181 __extension__ extern __inline int32x4_t
10182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10183 __arm_vhsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10184 {
10185   return __builtin_mve_vhsubq_m_sv4si (__inactive, __a, __b, __p);
10186 }
10187 
10188 __extension__ extern __inline int16x8_t
10189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10190 __arm_vhsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10191 {
10192   return __builtin_mve_vhsubq_m_sv8hi (__inactive, __a, __b, __p);
10193 }
10194 
10195 __extension__ extern __inline uint8x16_t
10196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10197 __arm_vhsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10198 {
10199   return __builtin_mve_vhsubq_m_uv16qi (__inactive, __a, __b, __p);
10200 }
10201 
10202 __extension__ extern __inline uint32x4_t
10203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10204 __arm_vhsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10205 {
10206   return __builtin_mve_vhsubq_m_uv4si (__inactive, __a, __b, __p);
10207 }
10208 
10209 __extension__ extern __inline uint16x8_t
10210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10211 __arm_vhsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10212 {
10213   return __builtin_mve_vhsubq_m_uv8hi (__inactive, __a, __b, __p);
10214 }
10215 
10216 __extension__ extern __inline int8x16_t
10217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10218 __arm_vmaxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10219 {
10220   return __builtin_mve_vmaxq_m_sv16qi (__inactive, __a, __b, __p);
10221 }
10222 
10223 __extension__ extern __inline int32x4_t
10224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10225 __arm_vmaxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10226 {
10227   return __builtin_mve_vmaxq_m_sv4si (__inactive, __a, __b, __p);
10228 }
10229 
10230 __extension__ extern __inline int16x8_t
10231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10232 __arm_vmaxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10233 {
10234   return __builtin_mve_vmaxq_m_sv8hi (__inactive, __a, __b, __p);
10235 }
10236 
10237 __extension__ extern __inline uint8x16_t
10238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10239 __arm_vmaxq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10240 {
10241   return __builtin_mve_vmaxq_m_uv16qi (__inactive, __a, __b, __p);
10242 }
10243 
10244 __extension__ extern __inline uint32x4_t
10245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10246 __arm_vmaxq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10247 {
10248   return __builtin_mve_vmaxq_m_uv4si (__inactive, __a, __b, __p);
10249 }
10250 
10251 __extension__ extern __inline uint16x8_t
10252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10253 __arm_vmaxq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10254 {
10255   return __builtin_mve_vmaxq_m_uv8hi (__inactive, __a, __b, __p);
10256 }
10257 
10258 __extension__ extern __inline int8x16_t
10259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10260 __arm_vminq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10261 {
10262   return __builtin_mve_vminq_m_sv16qi (__inactive, __a, __b, __p);
10263 }
10264 
10265 __extension__ extern __inline int32x4_t
10266 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10267 __arm_vminq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10268 {
10269   return __builtin_mve_vminq_m_sv4si (__inactive, __a, __b, __p);
10270 }
10271 
10272 __extension__ extern __inline int16x8_t
10273 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10274 __arm_vminq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10275 {
10276   return __builtin_mve_vminq_m_sv8hi (__inactive, __a, __b, __p);
10277 }
10278 
10279 __extension__ extern __inline uint8x16_t
10280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10281 __arm_vminq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10282 {
10283   return __builtin_mve_vminq_m_uv16qi (__inactive, __a, __b, __p);
10284 }
10285 
10286 __extension__ extern __inline uint32x4_t
10287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10288 __arm_vminq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10289 {
10290   return __builtin_mve_vminq_m_uv4si (__inactive, __a, __b, __p);
10291 }
10292 
10293 __extension__ extern __inline uint16x8_t
10294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10295 __arm_vminq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10296 {
10297   return __builtin_mve_vminq_m_uv8hi (__inactive, __a, __b, __p);
10298 }
10299 
10300 __extension__ extern __inline int32_t
10301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p_s8(int32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)10302 __arm_vmladavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
10303 {
10304   return __builtin_mve_vmladavaq_p_sv16qi (__a, __b, __c, __p);
10305 }
10306 
10307 __extension__ extern __inline int32_t
10308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p_s32(int32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)10309 __arm_vmladavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
10310 {
10311   return __builtin_mve_vmladavaq_p_sv4si (__a, __b, __c, __p);
10312 }
10313 
10314 __extension__ extern __inline int32_t
10315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p_s16(int32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)10316 __arm_vmladavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
10317 {
10318   return __builtin_mve_vmladavaq_p_sv8hi (__a, __b, __c, __p);
10319 }
10320 
10321 __extension__ extern __inline uint32_t
10322 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p_u8(uint32_t __a,uint8x16_t __b,uint8x16_t __c,mve_pred16_t __p)10323 __arm_vmladavaq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
10324 {
10325   return __builtin_mve_vmladavaq_p_uv16qi (__a, __b, __c, __p);
10326 }
10327 
10328 __extension__ extern __inline uint32_t
10329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p_u32(uint32_t __a,uint32x4_t __b,uint32x4_t __c,mve_pred16_t __p)10330 __arm_vmladavaq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
10331 {
10332   return __builtin_mve_vmladavaq_p_uv4si (__a, __b, __c, __p);
10333 }
10334 
10335 __extension__ extern __inline uint32_t
10336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p_u16(uint32_t __a,uint16x8_t __b,uint16x8_t __c,mve_pred16_t __p)10337 __arm_vmladavaq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
10338 {
10339   return __builtin_mve_vmladavaq_p_uv8hi (__a, __b, __c, __p);
10340 }
10341 
10342 __extension__ extern __inline int32_t
10343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq_p_s8(int32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)10344 __arm_vmladavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
10345 {
10346   return __builtin_mve_vmladavaxq_p_sv16qi (__a, __b, __c, __p);
10347 }
10348 
10349 __extension__ extern __inline int32_t
10350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq_p_s32(int32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)10351 __arm_vmladavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
10352 {
10353   return __builtin_mve_vmladavaxq_p_sv4si (__a, __b, __c, __p);
10354 }
10355 
10356 __extension__ extern __inline int32_t
10357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq_p_s16(int32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)10358 __arm_vmladavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
10359 {
10360   return __builtin_mve_vmladavaxq_p_sv8hi (__a, __b, __c, __p);
10361 }
10362 
10363 __extension__ extern __inline int8x16_t
10364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)10365 __arm_vmlaq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
10366 {
10367   return __builtin_mve_vmlaq_m_n_sv16qi (__a, __b, __c, __p);
10368 }
10369 
10370 __extension__ extern __inline int32x4_t
10371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)10372 __arm_vmlaq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
10373 {
10374   return __builtin_mve_vmlaq_m_n_sv4si (__a, __b, __c, __p);
10375 }
10376 
10377 __extension__ extern __inline int16x8_t
10378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)10379 __arm_vmlaq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
10380 {
10381   return __builtin_mve_vmlaq_m_n_sv8hi (__a, __b, __c, __p);
10382 }
10383 
10384 __extension__ extern __inline uint8x16_t
10385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m_n_u8(uint8x16_t __a,uint8x16_t __b,uint8_t __c,mve_pred16_t __p)10386 __arm_vmlaq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
10387 {
10388   return __builtin_mve_vmlaq_m_n_uv16qi (__a, __b, __c, __p);
10389 }
10390 
10391 __extension__ extern __inline uint32x4_t
10392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m_n_u32(uint32x4_t __a,uint32x4_t __b,uint32_t __c,mve_pred16_t __p)10393 __arm_vmlaq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
10394 {
10395   return __builtin_mve_vmlaq_m_n_uv4si (__a, __b, __c, __p);
10396 }
10397 
10398 __extension__ extern __inline uint16x8_t
10399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m_n_u16(uint16x8_t __a,uint16x8_t __b,uint16_t __c,mve_pred16_t __p)10400 __arm_vmlaq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
10401 {
10402   return __builtin_mve_vmlaq_m_n_uv8hi (__a, __b, __c, __p);
10403 }
10404 
10405 __extension__ extern __inline int8x16_t
10406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)10407 __arm_vmlasq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
10408 {
10409   return __builtin_mve_vmlasq_m_n_sv16qi (__a, __b, __c, __p);
10410 }
10411 
10412 __extension__ extern __inline int32x4_t
10413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)10414 __arm_vmlasq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
10415 {
10416   return __builtin_mve_vmlasq_m_n_sv4si (__a, __b, __c, __p);
10417 }
10418 
10419 __extension__ extern __inline int16x8_t
10420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)10421 __arm_vmlasq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
10422 {
10423   return __builtin_mve_vmlasq_m_n_sv8hi (__a, __b, __c, __p);
10424 }
10425 
10426 __extension__ extern __inline uint8x16_t
10427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m_n_u8(uint8x16_t __a,uint8x16_t __b,uint8_t __c,mve_pred16_t __p)10428 __arm_vmlasq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
10429 {
10430   return __builtin_mve_vmlasq_m_n_uv16qi (__a, __b, __c, __p);
10431 }
10432 
10433 __extension__ extern __inline uint32x4_t
10434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m_n_u32(uint32x4_t __a,uint32x4_t __b,uint32_t __c,mve_pred16_t __p)10435 __arm_vmlasq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
10436 {
10437   return __builtin_mve_vmlasq_m_n_uv4si (__a, __b, __c, __p);
10438 }
10439 
10440 __extension__ extern __inline uint16x8_t
10441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m_n_u16(uint16x8_t __a,uint16x8_t __b,uint16_t __c,mve_pred16_t __p)10442 __arm_vmlasq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
10443 {
10444   return __builtin_mve_vmlasq_m_n_uv8hi (__a, __b, __c, __p);
10445 }
10446 
10447 __extension__ extern __inline int32_t
10448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq_p_s8(int32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)10449 __arm_vmlsdavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
10450 {
10451   return __builtin_mve_vmlsdavaq_p_sv16qi (__a, __b, __c, __p);
10452 }
10453 
10454 __extension__ extern __inline int32_t
10455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq_p_s32(int32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)10456 __arm_vmlsdavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
10457 {
10458   return __builtin_mve_vmlsdavaq_p_sv4si (__a, __b, __c, __p);
10459 }
10460 
10461 __extension__ extern __inline int32_t
10462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq_p_s16(int32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)10463 __arm_vmlsdavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
10464 {
10465   return __builtin_mve_vmlsdavaq_p_sv8hi (__a, __b, __c, __p);
10466 }
10467 
10468 __extension__ extern __inline int32_t
10469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq_p_s8(int32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)10470 __arm_vmlsdavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
10471 {
10472   return __builtin_mve_vmlsdavaxq_p_sv16qi (__a, __b, __c, __p);
10473 }
10474 
10475 __extension__ extern __inline int32_t
10476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq_p_s32(int32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)10477 __arm_vmlsdavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
10478 {
10479   return __builtin_mve_vmlsdavaxq_p_sv4si (__a, __b, __c, __p);
10480 }
10481 
10482 __extension__ extern __inline int32_t
10483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq_p_s16(int32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)10484 __arm_vmlsdavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
10485 {
10486   return __builtin_mve_vmlsdavaxq_p_sv8hi (__a, __b, __c, __p);
10487 }
10488 
10489 __extension__ extern __inline int8x16_t
10490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10491 __arm_vmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10492 {
10493   return __builtin_mve_vmulhq_m_sv16qi (__inactive, __a, __b, __p);
10494 }
10495 
10496 __extension__ extern __inline int32x4_t
10497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10498 __arm_vmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10499 {
10500   return __builtin_mve_vmulhq_m_sv4si (__inactive, __a, __b, __p);
10501 }
10502 
10503 __extension__ extern __inline int16x8_t
10504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10505 __arm_vmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10506 {
10507   return __builtin_mve_vmulhq_m_sv8hi (__inactive, __a, __b, __p);
10508 }
10509 
10510 __extension__ extern __inline uint8x16_t
10511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10512 __arm_vmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10513 {
10514   return __builtin_mve_vmulhq_m_uv16qi (__inactive, __a, __b, __p);
10515 }
10516 
10517 __extension__ extern __inline uint32x4_t
10518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10519 __arm_vmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10520 {
10521   return __builtin_mve_vmulhq_m_uv4si (__inactive, __a, __b, __p);
10522 }
10523 
10524 __extension__ extern __inline uint16x8_t
10525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10526 __arm_vmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10527 {
10528   return __builtin_mve_vmulhq_m_uv8hi (__inactive, __a, __b, __p);
10529 }
10530 
10531 __extension__ extern __inline int16x8_t
10532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m_s8(int16x8_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10533 __arm_vmullbq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10534 {
10535   return __builtin_mve_vmullbq_int_m_sv16qi (__inactive, __a, __b, __p);
10536 }
10537 
10538 __extension__ extern __inline int64x2_t
10539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m_s32(int64x2_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10540 __arm_vmullbq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10541 {
10542   return __builtin_mve_vmullbq_int_m_sv4si (__inactive, __a, __b, __p);
10543 }
10544 
10545 __extension__ extern __inline int32x4_t
10546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m_s16(int32x4_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10547 __arm_vmullbq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10548 {
10549   return __builtin_mve_vmullbq_int_m_sv8hi (__inactive, __a, __b, __p);
10550 }
10551 
10552 __extension__ extern __inline uint16x8_t
10553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m_u8(uint16x8_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10554 __arm_vmullbq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10555 {
10556   return __builtin_mve_vmullbq_int_m_uv16qi (__inactive, __a, __b, __p);
10557 }
10558 
10559 __extension__ extern __inline uint64x2_t
10560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m_u32(uint64x2_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10561 __arm_vmullbq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10562 {
10563   return __builtin_mve_vmullbq_int_m_uv4si (__inactive, __a, __b, __p);
10564 }
10565 
10566 __extension__ extern __inline uint32x4_t
10567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m_u16(uint32x4_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10568 __arm_vmullbq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10569 {
10570   return __builtin_mve_vmullbq_int_m_uv8hi (__inactive, __a, __b, __p);
10571 }
10572 
10573 __extension__ extern __inline int16x8_t
10574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m_s8(int16x8_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10575 __arm_vmulltq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10576 {
10577   return __builtin_mve_vmulltq_int_m_sv16qi (__inactive, __a, __b, __p);
10578 }
10579 
10580 __extension__ extern __inline int64x2_t
10581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m_s32(int64x2_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10582 __arm_vmulltq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10583 {
10584   return __builtin_mve_vmulltq_int_m_sv4si (__inactive, __a, __b, __p);
10585 }
10586 
10587 __extension__ extern __inline int32x4_t
10588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m_s16(int32x4_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10589 __arm_vmulltq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10590 {
10591   return __builtin_mve_vmulltq_int_m_sv8hi (__inactive, __a, __b, __p);
10592 }
10593 
10594 __extension__ extern __inline uint16x8_t
10595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m_u8(uint16x8_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10596 __arm_vmulltq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10597 {
10598   return __builtin_mve_vmulltq_int_m_uv16qi (__inactive, __a, __b, __p);
10599 }
10600 
10601 __extension__ extern __inline uint64x2_t
10602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m_u32(uint64x2_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10603 __arm_vmulltq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10604 {
10605   return __builtin_mve_vmulltq_int_m_uv4si (__inactive, __a, __b, __p);
10606 }
10607 
10608 __extension__ extern __inline uint32x4_t
10609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m_u16(uint32x4_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10610 __arm_vmulltq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10611 {
10612   return __builtin_mve_vmulltq_int_m_uv8hi (__inactive, __a, __b, __p);
10613 }
10614 
10615 __extension__ extern __inline int8x16_t
10616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)10617 __arm_vmulq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10618 {
10619   return __builtin_mve_vmulq_m_n_sv16qi (__inactive, __a, __b, __p);
10620 }
10621 
10622 __extension__ extern __inline int32x4_t
10623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)10624 __arm_vmulq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10625 {
10626   return __builtin_mve_vmulq_m_n_sv4si (__inactive, __a, __b, __p);
10627 }
10628 
10629 __extension__ extern __inline int16x8_t
10630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)10631 __arm_vmulq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10632 {
10633   return __builtin_mve_vmulq_m_n_sv8hi (__inactive, __a, __b, __p);
10634 }
10635 
10636 __extension__ extern __inline uint8x16_t
10637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)10638 __arm_vmulq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
10639 {
10640   return __builtin_mve_vmulq_m_n_uv16qi (__inactive, __a, __b, __p);
10641 }
10642 
10643 __extension__ extern __inline uint32x4_t
10644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)10645 __arm_vmulq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
10646 {
10647   return __builtin_mve_vmulq_m_n_uv4si (__inactive, __a, __b, __p);
10648 }
10649 
10650 __extension__ extern __inline uint16x8_t
10651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)10652 __arm_vmulq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
10653 {
10654   return __builtin_mve_vmulq_m_n_uv8hi (__inactive, __a, __b, __p);
10655 }
10656 
10657 __extension__ extern __inline int8x16_t
10658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10659 __arm_vmulq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10660 {
10661   return __builtin_mve_vmulq_m_sv16qi (__inactive, __a, __b, __p);
10662 }
10663 
10664 __extension__ extern __inline int32x4_t
10665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10666 __arm_vmulq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10667 {
10668   return __builtin_mve_vmulq_m_sv4si (__inactive, __a, __b, __p);
10669 }
10670 
10671 __extension__ extern __inline int16x8_t
10672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10673 __arm_vmulq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10674 {
10675   return __builtin_mve_vmulq_m_sv8hi (__inactive, __a, __b, __p);
10676 }
10677 
10678 __extension__ extern __inline uint8x16_t
10679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10680 __arm_vmulq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10681 {
10682   return __builtin_mve_vmulq_m_uv16qi (__inactive, __a, __b, __p);
10683 }
10684 
10685 __extension__ extern __inline uint32x4_t
10686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10687 __arm_vmulq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10688 {
10689   return __builtin_mve_vmulq_m_uv4si (__inactive, __a, __b, __p);
10690 }
10691 
10692 __extension__ extern __inline uint16x8_t
10693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10694 __arm_vmulq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10695 {
10696   return __builtin_mve_vmulq_m_uv8hi (__inactive, __a, __b, __p);
10697 }
10698 
10699 __extension__ extern __inline int8x16_t
10700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10701 __arm_vornq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10702 {
10703   return __builtin_mve_vornq_m_sv16qi (__inactive, __a, __b, __p);
10704 }
10705 
10706 __extension__ extern __inline int32x4_t
10707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10708 __arm_vornq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10709 {
10710   return __builtin_mve_vornq_m_sv4si (__inactive, __a, __b, __p);
10711 }
10712 
10713 __extension__ extern __inline int16x8_t
10714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10715 __arm_vornq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10716 {
10717   return __builtin_mve_vornq_m_sv8hi (__inactive, __a, __b, __p);
10718 }
10719 
10720 __extension__ extern __inline uint8x16_t
10721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10722 __arm_vornq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10723 {
10724   return __builtin_mve_vornq_m_uv16qi (__inactive, __a, __b, __p);
10725 }
10726 
10727 __extension__ extern __inline uint32x4_t
10728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10729 __arm_vornq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10730 {
10731   return __builtin_mve_vornq_m_uv4si (__inactive, __a, __b, __p);
10732 }
10733 
10734 __extension__ extern __inline uint16x8_t
10735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10736 __arm_vornq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10737 {
10738   return __builtin_mve_vornq_m_uv8hi (__inactive, __a, __b, __p);
10739 }
10740 
10741 __extension__ extern __inline int8x16_t
10742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10743 __arm_vorrq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10744 {
10745   return __builtin_mve_vorrq_m_sv16qi (__inactive, __a, __b, __p);
10746 }
10747 
10748 __extension__ extern __inline int32x4_t
10749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10750 __arm_vorrq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10751 {
10752   return __builtin_mve_vorrq_m_sv4si (__inactive, __a, __b, __p);
10753 }
10754 
10755 __extension__ extern __inline int16x8_t
10756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10757 __arm_vorrq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10758 {
10759   return __builtin_mve_vorrq_m_sv8hi (__inactive, __a, __b, __p);
10760 }
10761 
10762 __extension__ extern __inline uint8x16_t
10763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10764 __arm_vorrq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10765 {
10766   return __builtin_mve_vorrq_m_uv16qi (__inactive, __a, __b, __p);
10767 }
10768 
10769 __extension__ extern __inline uint32x4_t
10770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10771 __arm_vorrq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10772 {
10773   return __builtin_mve_vorrq_m_uv4si (__inactive, __a, __b, __p);
10774 }
10775 
10776 __extension__ extern __inline uint16x8_t
10777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10778 __arm_vorrq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10779 {
10780   return __builtin_mve_vorrq_m_uv8hi (__inactive, __a, __b, __p);
10781 }
10782 
10783 __extension__ extern __inline int8x16_t
10784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)10785 __arm_vqaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10786 {
10787   return __builtin_mve_vqaddq_m_n_sv16qi (__inactive, __a, __b, __p);
10788 }
10789 
10790 __extension__ extern __inline int32x4_t
10791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)10792 __arm_vqaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10793 {
10794   return __builtin_mve_vqaddq_m_n_sv4si (__inactive, __a, __b, __p);
10795 }
10796 
10797 __extension__ extern __inline int16x8_t
10798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)10799 __arm_vqaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10800 {
10801   return __builtin_mve_vqaddq_m_n_sv8hi (__inactive, __a, __b, __p);
10802 }
10803 
10804 __extension__ extern __inline uint8x16_t
10805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)10806 __arm_vqaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
10807 {
10808   return __builtin_mve_vqaddq_m_n_uv16qi (__inactive, __a, __b, __p);
10809 }
10810 
10811 __extension__ extern __inline uint32x4_t
10812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)10813 __arm_vqaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
10814 {
10815   return __builtin_mve_vqaddq_m_n_uv4si (__inactive, __a, __b, __p);
10816 }
10817 
10818 __extension__ extern __inline uint16x8_t
10819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)10820 __arm_vqaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
10821 {
10822   return __builtin_mve_vqaddq_m_n_uv8hi (__inactive, __a, __b, __p);
10823 }
10824 
10825 __extension__ extern __inline int8x16_t
10826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10827 __arm_vqaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10828 {
10829   return __builtin_mve_vqaddq_m_sv16qi (__inactive, __a, __b, __p);
10830 }
10831 
10832 __extension__ extern __inline int32x4_t
10833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10834 __arm_vqaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10835 {
10836   return __builtin_mve_vqaddq_m_sv4si (__inactive, __a, __b, __p);
10837 }
10838 
10839 __extension__ extern __inline int16x8_t
10840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10841 __arm_vqaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10842 {
10843   return __builtin_mve_vqaddq_m_sv8hi (__inactive, __a, __b, __p);
10844 }
10845 
10846 __extension__ extern __inline uint8x16_t
10847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)10848 __arm_vqaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10849 {
10850   return __builtin_mve_vqaddq_m_uv16qi (__inactive, __a, __b, __p);
10851 }
10852 
10853 __extension__ extern __inline uint32x4_t
10854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)10855 __arm_vqaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10856 {
10857   return __builtin_mve_vqaddq_m_uv4si (__inactive, __a, __b, __p);
10858 }
10859 
10860 __extension__ extern __inline uint16x8_t
10861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)10862 __arm_vqaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10863 {
10864   return __builtin_mve_vqaddq_m_uv8hi (__inactive, __a, __b, __p);
10865 }
10866 
10867 __extension__ extern __inline int8x16_t
10868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10869 __arm_vqdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10870 {
10871   return __builtin_mve_vqdmladhq_m_sv16qi (__inactive, __a, __b, __p);
10872 }
10873 
10874 __extension__ extern __inline int32x4_t
10875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10876 __arm_vqdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10877 {
10878   return __builtin_mve_vqdmladhq_m_sv4si (__inactive, __a, __b, __p);
10879 }
10880 
10881 __extension__ extern __inline int16x8_t
10882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10883 __arm_vqdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10884 {
10885   return __builtin_mve_vqdmladhq_m_sv8hi (__inactive, __a, __b, __p);
10886 }
10887 
10888 __extension__ extern __inline int8x16_t
10889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10890 __arm_vqdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10891 {
10892   return __builtin_mve_vqdmladhxq_m_sv16qi (__inactive, __a, __b, __p);
10893 }
10894 
10895 __extension__ extern __inline int32x4_t
10896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10897 __arm_vqdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10898 {
10899   return __builtin_mve_vqdmladhxq_m_sv4si (__inactive, __a, __b, __p);
10900 }
10901 
10902 __extension__ extern __inline int16x8_t
10903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10904 __arm_vqdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10905 {
10906   return __builtin_mve_vqdmladhxq_m_sv8hi (__inactive, __a, __b, __p);
10907 }
10908 
10909 __extension__ extern __inline int8x16_t
10910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq_m_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)10911 __arm_vqdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
10912 {
10913   return __builtin_mve_vqdmlahq_m_n_sv16qi (__a, __b, __c, __p);
10914 }
10915 
10916 __extension__ extern __inline int32x4_t
10917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq_m_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)10918 __arm_vqdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
10919 {
10920   return __builtin_mve_vqdmlahq_m_n_sv4si (__a, __b, __c, __p);
10921 }
10922 
10923 __extension__ extern __inline int16x8_t
10924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq_m_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)10925 __arm_vqdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
10926 {
10927   return __builtin_mve_vqdmlahq_m_n_sv8hi (__a, __b, __c, __p);
10928 }
10929 
10930 __extension__ extern __inline int8x16_t
10931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10932 __arm_vqdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10933 {
10934   return __builtin_mve_vqdmlsdhq_m_sv16qi (__inactive, __a, __b, __p);
10935 }
10936 
10937 __extension__ extern __inline int32x4_t
10938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10939 __arm_vqdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10940 {
10941   return __builtin_mve_vqdmlsdhq_m_sv4si (__inactive, __a, __b, __p);
10942 }
10943 
10944 __extension__ extern __inline int16x8_t
10945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10946 __arm_vqdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10947 {
10948   return __builtin_mve_vqdmlsdhq_m_sv8hi (__inactive, __a, __b, __p);
10949 }
10950 
10951 __extension__ extern __inline int8x16_t
10952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10953 __arm_vqdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10954 {
10955   return __builtin_mve_vqdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p);
10956 }
10957 
10958 __extension__ extern __inline int32x4_t
10959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)10960 __arm_vqdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10961 {
10962   return __builtin_mve_vqdmlsdhxq_m_sv4si (__inactive, __a, __b, __p);
10963 }
10964 
10965 __extension__ extern __inline int16x8_t
10966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)10967 __arm_vqdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10968 {
10969   return __builtin_mve_vqdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p);
10970 }
10971 
10972 __extension__ extern __inline int8x16_t
10973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)10974 __arm_vqdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10975 {
10976   return __builtin_mve_vqdmulhq_m_n_sv16qi (__inactive, __a, __b, __p);
10977 }
10978 
10979 __extension__ extern __inline int32x4_t
10980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)10981 __arm_vqdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10982 {
10983   return __builtin_mve_vqdmulhq_m_n_sv4si (__inactive, __a, __b, __p);
10984 }
10985 
10986 __extension__ extern __inline int16x8_t
10987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)10988 __arm_vqdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10989 {
10990   return __builtin_mve_vqdmulhq_m_n_sv8hi (__inactive, __a, __b, __p);
10991 }
10992 
10993 __extension__ extern __inline int8x16_t
10994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)10995 __arm_vqdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10996 {
10997   return __builtin_mve_vqdmulhq_m_sv16qi (__inactive, __a, __b, __p);
10998 }
10999 
11000 __extension__ extern __inline int32x4_t
11001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11002 __arm_vqdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11003 {
11004   return __builtin_mve_vqdmulhq_m_sv4si (__inactive, __a, __b, __p);
11005 }
11006 
11007 __extension__ extern __inline int16x8_t
11008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11009 __arm_vqdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11010 {
11011   return __builtin_mve_vqdmulhq_m_sv8hi (__inactive, __a, __b, __p);
11012 }
11013 
11014 __extension__ extern __inline int8x16_t
11015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11016 __arm_vqrdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11017 {
11018   return __builtin_mve_vqrdmladhq_m_sv16qi (__inactive, __a, __b, __p);
11019 }
11020 
11021 __extension__ extern __inline int32x4_t
11022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11023 __arm_vqrdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11024 {
11025   return __builtin_mve_vqrdmladhq_m_sv4si (__inactive, __a, __b, __p);
11026 }
11027 
11028 __extension__ extern __inline int16x8_t
11029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11030 __arm_vqrdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11031 {
11032   return __builtin_mve_vqrdmladhq_m_sv8hi (__inactive, __a, __b, __p);
11033 }
11034 
11035 __extension__ extern __inline int8x16_t
11036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11037 __arm_vqrdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11038 {
11039   return __builtin_mve_vqrdmladhxq_m_sv16qi (__inactive, __a, __b, __p);
11040 }
11041 
11042 __extension__ extern __inline int32x4_t
11043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11044 __arm_vqrdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11045 {
11046   return __builtin_mve_vqrdmladhxq_m_sv4si (__inactive, __a, __b, __p);
11047 }
11048 
11049 __extension__ extern __inline int16x8_t
11050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11051 __arm_vqrdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11052 {
11053   return __builtin_mve_vqrdmladhxq_m_sv8hi (__inactive, __a, __b, __p);
11054 }
11055 
11056 __extension__ extern __inline int8x16_t
11057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq_m_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)11058 __arm_vqrdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
11059 {
11060   return __builtin_mve_vqrdmlahq_m_n_sv16qi (__a, __b, __c, __p);
11061 }
11062 
11063 __extension__ extern __inline int32x4_t
11064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq_m_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)11065 __arm_vqrdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
11066 {
11067   return __builtin_mve_vqrdmlahq_m_n_sv4si (__a, __b, __c, __p);
11068 }
11069 
11070 __extension__ extern __inline int16x8_t
11071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq_m_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)11072 __arm_vqrdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
11073 {
11074   return __builtin_mve_vqrdmlahq_m_n_sv8hi (__a, __b, __c, __p);
11075 }
11076 
11077 __extension__ extern __inline int8x16_t
11078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq_m_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)11079 __arm_vqrdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
11080 {
11081   return __builtin_mve_vqrdmlashq_m_n_sv16qi (__a, __b, __c, __p);
11082 }
11083 
11084 __extension__ extern __inline int32x4_t
11085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq_m_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)11086 __arm_vqrdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
11087 {
11088   return __builtin_mve_vqrdmlashq_m_n_sv4si (__a, __b, __c, __p);
11089 }
11090 
11091 __extension__ extern __inline int16x8_t
11092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq_m_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)11093 __arm_vqrdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
11094 {
11095   return __builtin_mve_vqrdmlashq_m_n_sv8hi (__a, __b, __c, __p);
11096 }
11097 
11098 __extension__ extern __inline int8x16_t
11099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq_m_n_s8(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)11100 __arm_vqdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
11101 {
11102   return __builtin_mve_vqdmlashq_m_n_sv16qi (__a, __b, __c, __p);
11103 }
11104 
11105 __extension__ extern __inline int16x8_t
11106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq_m_n_s16(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)11107 __arm_vqdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
11108 {
11109   return __builtin_mve_vqdmlashq_m_n_sv8hi (__a, __b, __c, __p);
11110 }
11111 
11112 __extension__ extern __inline int32x4_t
11113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq_m_n_s32(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)11114 __arm_vqdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
11115 {
11116   return __builtin_mve_vqdmlashq_m_n_sv4si (__a, __b, __c, __p);
11117 }
11118 
11119 __extension__ extern __inline int8x16_t
11120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11121 __arm_vqrdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11122 {
11123   return __builtin_mve_vqrdmlsdhq_m_sv16qi (__inactive, __a, __b, __p);
11124 }
11125 
11126 __extension__ extern __inline int32x4_t
11127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11128 __arm_vqrdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11129 {
11130   return __builtin_mve_vqrdmlsdhq_m_sv4si (__inactive, __a, __b, __p);
11131 }
11132 
11133 __extension__ extern __inline int16x8_t
11134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11135 __arm_vqrdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11136 {
11137   return __builtin_mve_vqrdmlsdhq_m_sv8hi (__inactive, __a, __b, __p);
11138 }
11139 
11140 __extension__ extern __inline int8x16_t
11141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11142 __arm_vqrdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11143 {
11144   return __builtin_mve_vqrdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p);
11145 }
11146 
11147 __extension__ extern __inline int32x4_t
11148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11149 __arm_vqrdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11150 {
11151   return __builtin_mve_vqrdmlsdhxq_m_sv4si (__inactive, __a, __b, __p);
11152 }
11153 
11154 __extension__ extern __inline int16x8_t
11155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11156 __arm_vqrdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11157 {
11158   return __builtin_mve_vqrdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p);
11159 }
11160 
11161 __extension__ extern __inline int8x16_t
11162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)11163 __arm_vqrdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
11164 {
11165   return __builtin_mve_vqrdmulhq_m_n_sv16qi (__inactive, __a, __b, __p);
11166 }
11167 
11168 __extension__ extern __inline int32x4_t
11169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)11170 __arm_vqrdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11171 {
11172   return __builtin_mve_vqrdmulhq_m_n_sv4si (__inactive, __a, __b, __p);
11173 }
11174 
11175 __extension__ extern __inline int16x8_t
11176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)11177 __arm_vqrdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11178 {
11179   return __builtin_mve_vqrdmulhq_m_n_sv8hi (__inactive, __a, __b, __p);
11180 }
11181 
11182 __extension__ extern __inline int8x16_t
11183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11184 __arm_vqrdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11185 {
11186   return __builtin_mve_vqrdmulhq_m_sv16qi (__inactive, __a, __b, __p);
11187 }
11188 
11189 __extension__ extern __inline int32x4_t
11190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11191 __arm_vqrdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11192 {
11193   return __builtin_mve_vqrdmulhq_m_sv4si (__inactive, __a, __b, __p);
11194 }
11195 
11196 __extension__ extern __inline int16x8_t
11197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11198 __arm_vqrdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11199 {
11200   return __builtin_mve_vqrdmulhq_m_sv8hi (__inactive, __a, __b, __p);
11201 }
11202 
11203 __extension__ extern __inline int8x16_t
11204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11205 __arm_vqrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11206 {
11207   return __builtin_mve_vqrshlq_m_sv16qi (__inactive, __a, __b, __p);
11208 }
11209 
11210 __extension__ extern __inline int32x4_t
11211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11212 __arm_vqrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11213 {
11214   return __builtin_mve_vqrshlq_m_sv4si (__inactive, __a, __b, __p);
11215 }
11216 
11217 __extension__ extern __inline int16x8_t
11218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11219 __arm_vqrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11220 {
11221   return __builtin_mve_vqrshlq_m_sv8hi (__inactive, __a, __b, __p);
11222 }
11223 
11224 __extension__ extern __inline uint8x16_t
11225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_u8(uint8x16_t __inactive,uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)11226 __arm_vqrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11227 {
11228   return __builtin_mve_vqrshlq_m_uv16qi (__inactive, __a, __b, __p);
11229 }
11230 
11231 __extension__ extern __inline uint32x4_t
11232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_u32(uint32x4_t __inactive,uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)11233 __arm_vqrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11234 {
11235   return __builtin_mve_vqrshlq_m_uv4si (__inactive, __a, __b, __p);
11236 }
11237 
11238 __extension__ extern __inline uint16x8_t
11239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_u16(uint16x8_t __inactive,uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)11240 __arm_vqrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11241 {
11242   return __builtin_mve_vqrshlq_m_uv8hi (__inactive, __a, __b, __p);
11243 }
11244 
11245 __extension__ extern __inline int8x16_t
11246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n_s8(int8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)11247 __arm_vqshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
11248 {
11249   return __builtin_mve_vqshlq_m_n_sv16qi (__inactive, __a, __imm, __p);
11250 }
11251 
11252 __extension__ extern __inline int32x4_t
11253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n_s32(int32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)11254 __arm_vqshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
11255 {
11256   return __builtin_mve_vqshlq_m_n_sv4si (__inactive, __a, __imm, __p);
11257 }
11258 
11259 __extension__ extern __inline int16x8_t
11260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n_s16(int16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)11261 __arm_vqshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
11262 {
11263   return __builtin_mve_vqshlq_m_n_sv8hi (__inactive, __a, __imm, __p);
11264 }
11265 
11266 __extension__ extern __inline uint8x16_t
11267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)11268 __arm_vqshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
11269 {
11270   return __builtin_mve_vqshlq_m_n_uv16qi (__inactive, __a, __imm, __p);
11271 }
11272 
11273 __extension__ extern __inline uint32x4_t
11274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,const int __imm,mve_pred16_t __p)11275 __arm_vqshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
11276 {
11277   return __builtin_mve_vqshlq_m_n_uv4si (__inactive, __a, __imm, __p);
11278 }
11279 
11280 __extension__ extern __inline uint16x8_t
11281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)11282 __arm_vqshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
11283 {
11284   return __builtin_mve_vqshlq_m_n_uv8hi (__inactive, __a, __imm, __p);
11285 }
11286 
11287 __extension__ extern __inline int8x16_t
11288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11289 __arm_vqshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11290 {
11291   return __builtin_mve_vqshlq_m_sv16qi (__inactive, __a, __b, __p);
11292 }
11293 
11294 __extension__ extern __inline int32x4_t
11295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11296 __arm_vqshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11297 {
11298   return __builtin_mve_vqshlq_m_sv4si (__inactive, __a, __b, __p);
11299 }
11300 
11301 __extension__ extern __inline int16x8_t
11302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11303 __arm_vqshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11304 {
11305   return __builtin_mve_vqshlq_m_sv8hi (__inactive, __a, __b, __p);
11306 }
11307 
11308 __extension__ extern __inline uint8x16_t
11309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_u8(uint8x16_t __inactive,uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)11310 __arm_vqshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11311 {
11312   return __builtin_mve_vqshlq_m_uv16qi (__inactive, __a, __b, __p);
11313 }
11314 
11315 __extension__ extern __inline uint32x4_t
11316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_u32(uint32x4_t __inactive,uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)11317 __arm_vqshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11318 {
11319   return __builtin_mve_vqshlq_m_uv4si (__inactive, __a, __b, __p);
11320 }
11321 
11322 __extension__ extern __inline uint16x8_t
11323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_u16(uint16x8_t __inactive,uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)11324 __arm_vqshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11325 {
11326   return __builtin_mve_vqshlq_m_uv8hi (__inactive, __a, __b, __p);
11327 }
11328 
11329 __extension__ extern __inline int8x16_t
11330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)11331 __arm_vqsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
11332 {
11333   return __builtin_mve_vqsubq_m_n_sv16qi (__inactive, __a, __b, __p);
11334 }
11335 
11336 __extension__ extern __inline int32x4_t
11337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)11338 __arm_vqsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11339 {
11340   return __builtin_mve_vqsubq_m_n_sv4si (__inactive, __a, __b, __p);
11341 }
11342 
11343 __extension__ extern __inline int16x8_t
11344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)11345 __arm_vqsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11346 {
11347   return __builtin_mve_vqsubq_m_n_sv8hi (__inactive, __a, __b, __p);
11348 }
11349 
11350 __extension__ extern __inline uint8x16_t
11351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)11352 __arm_vqsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
11353 {
11354   return __builtin_mve_vqsubq_m_n_uv16qi (__inactive, __a, __b, __p);
11355 }
11356 
11357 __extension__ extern __inline uint32x4_t
11358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)11359 __arm_vqsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
11360 {
11361   return __builtin_mve_vqsubq_m_n_uv4si (__inactive, __a, __b, __p);
11362 }
11363 
11364 __extension__ extern __inline uint16x8_t
11365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)11366 __arm_vqsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
11367 {
11368   return __builtin_mve_vqsubq_m_n_uv8hi (__inactive, __a, __b, __p);
11369 }
11370 
11371 __extension__ extern __inline int8x16_t
11372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11373 __arm_vqsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11374 {
11375   return __builtin_mve_vqsubq_m_sv16qi (__inactive, __a, __b, __p);
11376 }
11377 
11378 __extension__ extern __inline int32x4_t
11379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11380 __arm_vqsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11381 {
11382   return __builtin_mve_vqsubq_m_sv4si (__inactive, __a, __b, __p);
11383 }
11384 
11385 __extension__ extern __inline int16x8_t
11386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11387 __arm_vqsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11388 {
11389   return __builtin_mve_vqsubq_m_sv8hi (__inactive, __a, __b, __p);
11390 }
11391 
11392 __extension__ extern __inline uint8x16_t
11393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)11394 __arm_vqsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11395 {
11396   return __builtin_mve_vqsubq_m_uv16qi (__inactive, __a, __b, __p);
11397 }
11398 
11399 __extension__ extern __inline uint32x4_t
11400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)11401 __arm_vqsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
11402 {
11403   return __builtin_mve_vqsubq_m_uv4si (__inactive, __a, __b, __p);
11404 }
11405 
11406 __extension__ extern __inline uint16x8_t
11407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)11408 __arm_vqsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11409 {
11410   return __builtin_mve_vqsubq_m_uv8hi (__inactive, __a, __b, __p);
11411 }
11412 
11413 __extension__ extern __inline int8x16_t
11414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11415 __arm_vrhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11416 {
11417   return __builtin_mve_vrhaddq_m_sv16qi (__inactive, __a, __b, __p);
11418 }
11419 
11420 __extension__ extern __inline int32x4_t
11421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11422 __arm_vrhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11423 {
11424   return __builtin_mve_vrhaddq_m_sv4si (__inactive, __a, __b, __p);
11425 }
11426 
11427 __extension__ extern __inline int16x8_t
11428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11429 __arm_vrhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11430 {
11431   return __builtin_mve_vrhaddq_m_sv8hi (__inactive, __a, __b, __p);
11432 }
11433 
11434 __extension__ extern __inline uint8x16_t
11435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)11436 __arm_vrhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11437 {
11438   return __builtin_mve_vrhaddq_m_uv16qi (__inactive, __a, __b, __p);
11439 }
11440 
11441 __extension__ extern __inline uint32x4_t
11442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)11443 __arm_vrhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
11444 {
11445   return __builtin_mve_vrhaddq_m_uv4si (__inactive, __a, __b, __p);
11446 }
11447 
11448 __extension__ extern __inline uint16x8_t
11449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)11450 __arm_vrhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11451 {
11452   return __builtin_mve_vrhaddq_m_uv8hi (__inactive, __a, __b, __p);
11453 }
11454 
11455 __extension__ extern __inline int8x16_t
11456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11457 __arm_vrmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11458 {
11459   return __builtin_mve_vrmulhq_m_sv16qi (__inactive, __a, __b, __p);
11460 }
11461 
11462 __extension__ extern __inline int32x4_t
11463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11464 __arm_vrmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11465 {
11466   return __builtin_mve_vrmulhq_m_sv4si (__inactive, __a, __b, __p);
11467 }
11468 
11469 __extension__ extern __inline int16x8_t
11470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11471 __arm_vrmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11472 {
11473   return __builtin_mve_vrmulhq_m_sv8hi (__inactive, __a, __b, __p);
11474 }
11475 
11476 __extension__ extern __inline uint8x16_t
11477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m_u8(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)11478 __arm_vrmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11479 {
11480   return __builtin_mve_vrmulhq_m_uv16qi (__inactive, __a, __b, __p);
11481 }
11482 
11483 __extension__ extern __inline uint32x4_t
11484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)11485 __arm_vrmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
11486 {
11487   return __builtin_mve_vrmulhq_m_uv4si (__inactive, __a, __b, __p);
11488 }
11489 
11490 __extension__ extern __inline uint16x8_t
11491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m_u16(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)11492 __arm_vrmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11493 {
11494   return __builtin_mve_vrmulhq_m_uv8hi (__inactive, __a, __b, __p);
11495 }
11496 
11497 __extension__ extern __inline int8x16_t
11498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_s8(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)11499 __arm_vrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11500 {
11501   return __builtin_mve_vrshlq_m_sv16qi (__inactive, __a, __b, __p);
11502 }
11503 
11504 __extension__ extern __inline int32x4_t
11505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11506 __arm_vrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11507 {
11508   return __builtin_mve_vrshlq_m_sv4si (__inactive, __a, __b, __p);
11509 }
11510 
11511 __extension__ extern __inline int16x8_t
11512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_s16(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11513 __arm_vrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11514 {
11515   return __builtin_mve_vrshlq_m_sv8hi (__inactive, __a, __b, __p);
11516 }
11517 
11518 __extension__ extern __inline uint8x16_t
11519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_u8(uint8x16_t __inactive,uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)11520 __arm_vrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11521 {
11522   return __builtin_mve_vrshlq_m_uv16qi (__inactive, __a, __b, __p);
11523 }
11524 
11525 __extension__ extern __inline uint32x4_t
11526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_u32(uint32x4_t __inactive,uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)11527 __arm_vrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11528 {
11529   return __builtin_mve_vrshlq_m_uv4si (__inactive, __a, __b, __p);
11530 }
11531 
11532 __extension__ extern __inline uint16x8_t
11533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_u16(uint16x8_t __inactive,uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)11534 __arm_vrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11535 {
11536   return __builtin_mve_vrshlq_m_uv8hi (__inactive, __a, __b, __p);
11537 }
11538 
11539 __extension__ extern __inline int8x16_t
11540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m_n_s8(int8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)11541 __arm_vrshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
11542 {
11543   return __builtin_mve_vrshrq_m_n_sv16qi (__inactive, __a, __imm, __p);
11544 }
11545 
11546 __extension__ extern __inline int32x4_t
11547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m_n_s32(int32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)11548 __arm_vrshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
11549 {
11550   return __builtin_mve_vrshrq_m_n_sv4si (__inactive, __a, __imm, __p);
11551 }
11552 
11553 __extension__ extern __inline int16x8_t
11554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m_n_s16(int16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)11555 __arm_vrshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
11556 {
11557   return __builtin_mve_vrshrq_m_n_sv8hi (__inactive, __a, __imm, __p);
11558 }
11559 
11560 __extension__ extern __inline uint8x16_t
11561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)11562 __arm_vrshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
11563 {
11564   return __builtin_mve_vrshrq_m_n_uv16qi (__inactive, __a, __imm, __p);
11565 }
11566 
11567 __extension__ extern __inline uint32x4_t
11568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,const int __imm,mve_pred16_t __p)11569 __arm_vrshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
11570 {
11571   return __builtin_mve_vrshrq_m_n_uv4si (__inactive, __a, __imm, __p);
11572 }
11573 
11574 __extension__ extern __inline uint16x8_t
11575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)11576 __arm_vrshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
11577 {
11578   return __builtin_mve_vrshrq_m_n_uv8hi (__inactive, __a, __imm, __p);
11579 }
11580 
11581 __extension__ extern __inline int8x16_t
11582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n_s8(int8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)11583 __arm_vshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
11584 {
11585   return __builtin_mve_vshlq_m_n_sv16qi (__inactive, __a, __imm, __p);
11586 }
11587 
11588 __extension__ extern __inline int32x4_t
11589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n_s32(int32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)11590 __arm_vshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
11591 {
11592   return __builtin_mve_vshlq_m_n_sv4si (__inactive, __a, __imm, __p);
11593 }
11594 
11595 __extension__ extern __inline int16x8_t
11596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n_s16(int16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)11597 __arm_vshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
11598 {
11599   return __builtin_mve_vshlq_m_n_sv8hi (__inactive, __a, __imm, __p);
11600 }
11601 
11602 __extension__ extern __inline uint8x16_t
11603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)11604 __arm_vshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
11605 {
11606   return __builtin_mve_vshlq_m_n_uv16qi (__inactive, __a, __imm, __p);
11607 }
11608 
11609 __extension__ extern __inline uint32x4_t
11610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,const int __imm,mve_pred16_t __p)11611 __arm_vshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
11612 {
11613   return __builtin_mve_vshlq_m_n_uv4si (__inactive, __a, __imm, __p);
11614 }
11615 
11616 __extension__ extern __inline uint16x8_t
11617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)11618 __arm_vshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
11619 {
11620   return __builtin_mve_vshlq_m_n_uv8hi (__inactive, __a, __imm, __p);
11621 }
11622 
11623 __extension__ extern __inline int8x16_t
11624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m_n_s8(int8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)11625 __arm_vshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
11626 {
11627   return __builtin_mve_vshrq_m_n_sv16qi (__inactive, __a, __imm, __p);
11628 }
11629 
11630 __extension__ extern __inline int32x4_t
11631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m_n_s32(int32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)11632 __arm_vshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
11633 {
11634   return __builtin_mve_vshrq_m_n_sv4si (__inactive, __a, __imm, __p);
11635 }
11636 
11637 __extension__ extern __inline int16x8_t
11638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m_n_s16(int16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)11639 __arm_vshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
11640 {
11641   return __builtin_mve_vshrq_m_n_sv8hi (__inactive, __a, __imm, __p);
11642 }
11643 
11644 __extension__ extern __inline uint8x16_t
11645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)11646 __arm_vshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
11647 {
11648   return __builtin_mve_vshrq_m_n_uv16qi (__inactive, __a, __imm, __p);
11649 }
11650 
11651 __extension__ extern __inline uint32x4_t
11652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,const int __imm,mve_pred16_t __p)11653 __arm_vshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
11654 {
11655   return __builtin_mve_vshrq_m_n_uv4si (__inactive, __a, __imm, __p);
11656 }
11657 
11658 __extension__ extern __inline uint16x8_t
11659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)11660 __arm_vshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
11661 {
11662   return __builtin_mve_vshrq_m_n_uv8hi (__inactive, __a, __imm, __p);
11663 }
11664 
11665 __extension__ extern __inline int8x16_t
11666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m_n_s8(int8x16_t __a,int8x16_t __b,const int __imm,mve_pred16_t __p)11667 __arm_vsliq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
11668 {
11669   return __builtin_mve_vsliq_m_n_sv16qi (__a, __b, __imm, __p);
11670 }
11671 
11672 __extension__ extern __inline int32x4_t
11673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m_n_s32(int32x4_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)11674 __arm_vsliq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11675 {
11676   return __builtin_mve_vsliq_m_n_sv4si (__a, __b, __imm, __p);
11677 }
11678 
11679 __extension__ extern __inline int16x8_t
11680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m_n_s16(int16x8_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)11681 __arm_vsliq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11682 {
11683   return __builtin_mve_vsliq_m_n_sv8hi (__a, __b, __imm, __p);
11684 }
11685 
11686 __extension__ extern __inline uint8x16_t
11687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m_n_u8(uint8x16_t __a,uint8x16_t __b,const int __imm,mve_pred16_t __p)11688 __arm_vsliq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
11689 {
11690   return __builtin_mve_vsliq_m_n_uv16qi (__a, __b, __imm, __p);
11691 }
11692 
11693 __extension__ extern __inline uint32x4_t
11694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m_n_u32(uint32x4_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)11695 __arm_vsliq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
11696 {
11697   return __builtin_mve_vsliq_m_n_uv4si (__a, __b, __imm, __p);
11698 }
11699 
11700 __extension__ extern __inline uint16x8_t
11701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m_n_u16(uint16x8_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)11702 __arm_vsliq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
11703 {
11704   return __builtin_mve_vsliq_m_n_uv8hi (__a, __b, __imm, __p);
11705 }
11706 
11707 __extension__ extern __inline int8x16_t
11708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_n_s8(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)11709 __arm_vsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
11710 {
11711   return __builtin_mve_vsubq_m_n_sv16qi (__inactive, __a, __b, __p);
11712 }
11713 
11714 __extension__ extern __inline int32x4_t
11715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_n_s32(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)11716 __arm_vsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11717 {
11718   return __builtin_mve_vsubq_m_n_sv4si (__inactive, __a, __b, __p);
11719 }
11720 
11721 __extension__ extern __inline int16x8_t
11722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_n_s16(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)11723 __arm_vsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11724 {
11725   return __builtin_mve_vsubq_m_n_sv8hi (__inactive, __a, __b, __p);
11726 }
11727 
11728 __extension__ extern __inline uint8x16_t
11729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_n_u8(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)11730 __arm_vsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
11731 {
11732   return __builtin_mve_vsubq_m_n_uv16qi (__inactive, __a, __b, __p);
11733 }
11734 
11735 __extension__ extern __inline uint32x4_t
11736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_n_u32(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)11737 __arm_vsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
11738 {
11739   return __builtin_mve_vsubq_m_n_uv4si (__inactive, __a, __b, __p);
11740 }
11741 
11742 __extension__ extern __inline uint16x8_t
11743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_n_u16(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)11744 __arm_vsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
11745 {
11746   return __builtin_mve_vsubq_m_n_uv8hi (__inactive, __a, __b, __p);
11747 }
11748 
11749 __extension__ extern __inline int64_t
11750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_p_s32(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)11751 __arm_vmlaldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
11752 {
11753   return __builtin_mve_vmlaldavaq_p_sv4si (__a, __b, __c, __p);
11754 }
11755 
11756 __extension__ extern __inline int64_t
11757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_p_s16(int64_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)11758 __arm_vmlaldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
11759 {
11760   return __builtin_mve_vmlaldavaq_p_sv8hi (__a, __b, __c, __p);
11761 }
11762 
11763 __extension__ extern __inline uint64_t
11764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_p_u32(uint64_t __a,uint32x4_t __b,uint32x4_t __c,mve_pred16_t __p)11765 __arm_vmlaldavaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
11766 {
11767   return __builtin_mve_vmlaldavaq_p_uv4si (__a, __b, __c, __p);
11768 }
11769 
11770 __extension__ extern __inline uint64_t
11771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_p_u16(uint64_t __a,uint16x8_t __b,uint16x8_t __c,mve_pred16_t __p)11772 __arm_vmlaldavaq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
11773 {
11774   return __builtin_mve_vmlaldavaq_p_uv8hi (__a, __b, __c, __p);
11775 }
11776 
11777 __extension__ extern __inline int64_t
11778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaxq_p_s32(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)11779 __arm_vmlaldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
11780 {
11781   return __builtin_mve_vmlaldavaxq_p_sv4si (__a, __b, __c, __p);
11782 }
11783 
11784 __extension__ extern __inline int64_t
11785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaxq_p_s16(int64_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)11786 __arm_vmlaldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
11787 {
11788   return __builtin_mve_vmlaldavaxq_p_sv8hi (__a, __b, __c, __p);
11789 }
11790 
11791 __extension__ extern __inline int64_t
11792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaq_p_s32(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)11793 __arm_vmlsldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
11794 {
11795   return __builtin_mve_vmlsldavaq_p_sv4si (__a, __b, __c, __p);
11796 }
11797 
11798 __extension__ extern __inline int64_t
11799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaq_p_s16(int64_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)11800 __arm_vmlsldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
11801 {
11802   return __builtin_mve_vmlsldavaq_p_sv8hi (__a, __b, __c, __p);
11803 }
11804 
11805 __extension__ extern __inline int64_t
11806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaxq_p_s32(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)11807 __arm_vmlsldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
11808 {
11809   return __builtin_mve_vmlsldavaxq_p_sv4si (__a, __b, __c, __p);
11810 }
11811 
11812 __extension__ extern __inline int64_t
11813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaxq_p_s16(int64_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)11814 __arm_vmlsldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
11815 {
11816   return __builtin_mve_vmlsldavaxq_p_sv8hi (__a, __b, __c, __p);
11817 }
11818 
11819 __extension__ extern __inline uint16x8_t
11820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_m_p8(uint16x8_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)11821 __arm_vmullbq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11822 {
11823   return __builtin_mve_vmullbq_poly_m_pv16qi (__inactive, __a, __b, __p);
11824 }
11825 
11826 __extension__ extern __inline uint32x4_t
11827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_m_p16(uint32x4_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)11828 __arm_vmullbq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11829 {
11830   return __builtin_mve_vmullbq_poly_m_pv8hi (__inactive, __a, __b, __p);
11831 }
11832 
11833 __extension__ extern __inline uint16x8_t
11834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_m_p8(uint16x8_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)11835 __arm_vmulltq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11836 {
11837   return __builtin_mve_vmulltq_poly_m_pv16qi (__inactive, __a, __b, __p);
11838 }
11839 
11840 __extension__ extern __inline uint32x4_t
11841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_m_p16(uint32x4_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)11842 __arm_vmulltq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11843 {
11844   return __builtin_mve_vmulltq_poly_m_pv8hi (__inactive, __a, __b, __p);
11845 }
11846 
11847 __extension__ extern __inline int64x2_t
11848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_m_n_s32(int64x2_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)11849 __arm_vqdmullbq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11850 {
11851   return __builtin_mve_vqdmullbq_m_n_sv4si (__inactive, __a, __b, __p);
11852 }
11853 
11854 __extension__ extern __inline int32x4_t
11855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_m_n_s16(int32x4_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)11856 __arm_vqdmullbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11857 {
11858   return __builtin_mve_vqdmullbq_m_n_sv8hi (__inactive, __a, __b, __p);
11859 }
11860 
11861 __extension__ extern __inline int64x2_t
11862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_m_s32(int64x2_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11863 __arm_vqdmullbq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11864 {
11865   return __builtin_mve_vqdmullbq_m_sv4si (__inactive, __a, __b, __p);
11866 }
11867 
11868 __extension__ extern __inline int32x4_t
11869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_m_s16(int32x4_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11870 __arm_vqdmullbq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11871 {
11872   return __builtin_mve_vqdmullbq_m_sv8hi (__inactive, __a, __b, __p);
11873 }
11874 
11875 __extension__ extern __inline int64x2_t
11876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_m_n_s32(int64x2_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)11877 __arm_vqdmulltq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11878 {
11879   return __builtin_mve_vqdmulltq_m_n_sv4si (__inactive, __a, __b, __p);
11880 }
11881 
11882 __extension__ extern __inline int32x4_t
11883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_m_n_s16(int32x4_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)11884 __arm_vqdmulltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11885 {
11886   return __builtin_mve_vqdmulltq_m_n_sv8hi (__inactive, __a, __b, __p);
11887 }
11888 
11889 __extension__ extern __inline int64x2_t
11890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_m_s32(int64x2_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)11891 __arm_vqdmulltq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11892 {
11893   return __builtin_mve_vqdmulltq_m_sv4si (__inactive, __a, __b, __p);
11894 }
11895 
11896 __extension__ extern __inline int32x4_t
11897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_m_s16(int32x4_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)11898 __arm_vqdmulltq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11899 {
11900   return __builtin_mve_vqdmulltq_m_sv8hi (__inactive, __a, __b, __p);
11901 }
11902 
11903 __extension__ extern __inline int16x8_t
11904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_m_n_s32(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)11905 __arm_vqrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11906 {
11907   return __builtin_mve_vqrshrnbq_m_n_sv4si (__a, __b, __imm, __p);
11908 }
11909 
11910 __extension__ extern __inline int8x16_t
11911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_m_n_s16(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)11912 __arm_vqrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11913 {
11914   return __builtin_mve_vqrshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
11915 }
11916 
11917 __extension__ extern __inline uint16x8_t
11918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_m_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)11919 __arm_vqrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
11920 {
11921   return __builtin_mve_vqrshrnbq_m_n_uv4si (__a, __b, __imm, __p);
11922 }
11923 
11924 __extension__ extern __inline uint8x16_t
11925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_m_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)11926 __arm_vqrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
11927 {
11928   return __builtin_mve_vqrshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
11929 }
11930 
11931 __extension__ extern __inline int16x8_t
11932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_m_n_s32(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)11933 __arm_vqrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11934 {
11935   return __builtin_mve_vqrshrntq_m_n_sv4si (__a, __b, __imm, __p);
11936 }
11937 
11938 __extension__ extern __inline int8x16_t
11939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_m_n_s16(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)11940 __arm_vqrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11941 {
11942   return __builtin_mve_vqrshrntq_m_n_sv8hi (__a, __b, __imm, __p);
11943 }
11944 
11945 __extension__ extern __inline uint16x8_t
11946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_m_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)11947 __arm_vqrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
11948 {
11949   return __builtin_mve_vqrshrntq_m_n_uv4si (__a, __b, __imm, __p);
11950 }
11951 
11952 __extension__ extern __inline uint8x16_t
11953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_m_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)11954 __arm_vqrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
11955 {
11956   return __builtin_mve_vqrshrntq_m_n_uv8hi (__a, __b, __imm, __p);
11957 }
11958 
11959 __extension__ extern __inline uint16x8_t
11960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrunbq_m_n_s32(uint16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)11961 __arm_vqrshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11962 {
11963   return __builtin_mve_vqrshrunbq_m_n_sv4si (__a, __b, __imm, __p);
11964 }
11965 
11966 __extension__ extern __inline uint8x16_t
11967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrunbq_m_n_s16(uint8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)11968 __arm_vqrshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11969 {
11970   return __builtin_mve_vqrshrunbq_m_n_sv8hi (__a, __b, __imm, __p);
11971 }
11972 
11973 __extension__ extern __inline uint16x8_t
11974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshruntq_m_n_s32(uint16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)11975 __arm_vqrshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11976 {
11977   return __builtin_mve_vqrshruntq_m_n_sv4si (__a, __b, __imm, __p);
11978 }
11979 
11980 __extension__ extern __inline uint8x16_t
11981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshruntq_m_n_s16(uint8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)11982 __arm_vqrshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11983 {
11984   return __builtin_mve_vqrshruntq_m_n_sv8hi (__a, __b, __imm, __p);
11985 }
11986 
11987 __extension__ extern __inline int16x8_t
11988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_m_n_s32(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)11989 __arm_vqshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11990 {
11991   return __builtin_mve_vqshrnbq_m_n_sv4si (__a, __b, __imm, __p);
11992 }
11993 
11994 __extension__ extern __inline int8x16_t
11995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_m_n_s16(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)11996 __arm_vqshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11997 {
11998   return __builtin_mve_vqshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
11999 }
12000 
12001 __extension__ extern __inline uint16x8_t
12002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_m_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)12003 __arm_vqshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12004 {
12005   return __builtin_mve_vqshrnbq_m_n_uv4si (__a, __b, __imm, __p);
12006 }
12007 
12008 __extension__ extern __inline uint8x16_t
12009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_m_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)12010 __arm_vqshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12011 {
12012   return __builtin_mve_vqshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
12013 }
12014 
12015 __extension__ extern __inline int16x8_t
12016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_m_n_s32(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)12017 __arm_vqshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12018 {
12019   return __builtin_mve_vqshrntq_m_n_sv4si (__a, __b, __imm, __p);
12020 }
12021 
12022 __extension__ extern __inline int8x16_t
12023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_m_n_s16(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)12024 __arm_vqshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12025 {
12026   return __builtin_mve_vqshrntq_m_n_sv8hi (__a, __b, __imm, __p);
12027 }
12028 
12029 __extension__ extern __inline uint16x8_t
12030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_m_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)12031 __arm_vqshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12032 {
12033   return __builtin_mve_vqshrntq_m_n_uv4si (__a, __b, __imm, __p);
12034 }
12035 
12036 __extension__ extern __inline uint8x16_t
12037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_m_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)12038 __arm_vqshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12039 {
12040   return __builtin_mve_vqshrntq_m_n_uv8hi (__a, __b, __imm, __p);
12041 }
12042 
12043 __extension__ extern __inline uint16x8_t
12044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrunbq_m_n_s32(uint16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)12045 __arm_vqshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12046 {
12047   return __builtin_mve_vqshrunbq_m_n_sv4si (__a, __b, __imm, __p);
12048 }
12049 
12050 __extension__ extern __inline uint8x16_t
12051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrunbq_m_n_s16(uint8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)12052 __arm_vqshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12053 {
12054   return __builtin_mve_vqshrunbq_m_n_sv8hi (__a, __b, __imm, __p);
12055 }
12056 
12057 __extension__ extern __inline uint16x8_t
12058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshruntq_m_n_s32(uint16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)12059 __arm_vqshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12060 {
12061   return __builtin_mve_vqshruntq_m_n_sv4si (__a, __b, __imm, __p);
12062 }
12063 
12064 __extension__ extern __inline uint8x16_t
12065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshruntq_m_n_s16(uint8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)12066 __arm_vqshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12067 {
12068   return __builtin_mve_vqshruntq_m_n_sv8hi (__a, __b, __imm, __p);
12069 }
12070 
12071 __extension__ extern __inline int64_t
12072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaq_p_s32(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)12073 __arm_vrmlaldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
12074 {
12075   return __builtin_mve_vrmlaldavhaq_p_sv4si (__a, __b, __c, __p);
12076 }
12077 
12078 __extension__ extern __inline uint64_t
12079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaq_p_u32(uint64_t __a,uint32x4_t __b,uint32x4_t __c,mve_pred16_t __p)12080 __arm_vrmlaldavhaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
12081 {
12082   return __builtin_mve_vrmlaldavhaq_p_uv4si (__a, __b, __c, __p);
12083 }
12084 
12085 __extension__ extern __inline int64_t
12086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaxq_p_s32(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)12087 __arm_vrmlaldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
12088 {
12089   return __builtin_mve_vrmlaldavhaxq_p_sv4si (__a, __b, __c, __p);
12090 }
12091 
12092 __extension__ extern __inline int64_t
12093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhaq_p_s32(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)12094 __arm_vrmlsldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
12095 {
12096   return __builtin_mve_vrmlsldavhaq_p_sv4si (__a, __b, __c, __p);
12097 }
12098 
12099 __extension__ extern __inline int64_t
12100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhaxq_p_s32(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)12101 __arm_vrmlsldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
12102 {
12103   return __builtin_mve_vrmlsldavhaxq_p_sv4si (__a, __b, __c, __p);
12104 }
12105 
12106 __extension__ extern __inline int16x8_t
12107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_m_n_s32(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)12108 __arm_vrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12109 {
12110   return __builtin_mve_vrshrnbq_m_n_sv4si (__a, __b, __imm, __p);
12111 }
12112 
12113 __extension__ extern __inline int8x16_t
12114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_m_n_s16(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)12115 __arm_vrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12116 {
12117   return __builtin_mve_vrshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
12118 }
12119 
12120 __extension__ extern __inline uint16x8_t
12121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_m_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)12122 __arm_vrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12123 {
12124   return __builtin_mve_vrshrnbq_m_n_uv4si (__a, __b, __imm, __p);
12125 }
12126 
12127 __extension__ extern __inline uint8x16_t
12128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_m_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)12129 __arm_vrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12130 {
12131   return __builtin_mve_vrshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
12132 }
12133 
12134 __extension__ extern __inline int16x8_t
12135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_m_n_s32(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)12136 __arm_vrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12137 {
12138   return __builtin_mve_vrshrntq_m_n_sv4si (__a, __b, __imm, __p);
12139 }
12140 
12141 __extension__ extern __inline int8x16_t
12142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_m_n_s16(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)12143 __arm_vrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12144 {
12145   return __builtin_mve_vrshrntq_m_n_sv8hi (__a, __b, __imm, __p);
12146 }
12147 
12148 __extension__ extern __inline uint16x8_t
12149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_m_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)12150 __arm_vrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12151 {
12152   return __builtin_mve_vrshrntq_m_n_uv4si (__a, __b, __imm, __p);
12153 }
12154 
12155 __extension__ extern __inline uint8x16_t
12156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_m_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)12157 __arm_vrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12158 {
12159   return __builtin_mve_vrshrntq_m_n_uv8hi (__a, __b, __imm, __p);
12160 }
12161 
12162 __extension__ extern __inline int16x8_t
12163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_m_n_s8(int16x8_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)12164 __arm_vshllbq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
12165 {
12166   return __builtin_mve_vshllbq_m_n_sv16qi (__inactive, __a, __imm, __p);
12167 }
12168 
12169 __extension__ extern __inline int32x4_t
12170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_m_n_s16(int32x4_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)12171 __arm_vshllbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
12172 {
12173   return __builtin_mve_vshllbq_m_n_sv8hi (__inactive, __a, __imm, __p);
12174 }
12175 
12176 __extension__ extern __inline uint16x8_t
12177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_m_n_u8(uint16x8_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)12178 __arm_vshllbq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
12179 {
12180   return __builtin_mve_vshllbq_m_n_uv16qi (__inactive, __a, __imm, __p);
12181 }
12182 
12183 __extension__ extern __inline uint32x4_t
12184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_m_n_u16(uint32x4_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)12185 __arm_vshllbq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
12186 {
12187   return __builtin_mve_vshllbq_m_n_uv8hi (__inactive, __a, __imm, __p);
12188 }
12189 
12190 __extension__ extern __inline int16x8_t
12191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_m_n_s8(int16x8_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)12192 __arm_vshlltq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
12193 {
12194   return __builtin_mve_vshlltq_m_n_sv16qi (__inactive, __a, __imm, __p);
12195 }
12196 
12197 __extension__ extern __inline int32x4_t
12198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_m_n_s16(int32x4_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)12199 __arm_vshlltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
12200 {
12201   return __builtin_mve_vshlltq_m_n_sv8hi (__inactive, __a, __imm, __p);
12202 }
12203 
12204 __extension__ extern __inline uint16x8_t
12205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_m_n_u8(uint16x8_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)12206 __arm_vshlltq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
12207 {
12208   return __builtin_mve_vshlltq_m_n_uv16qi (__inactive, __a, __imm, __p);
12209 }
12210 
12211 __extension__ extern __inline uint32x4_t
12212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_m_n_u16(uint32x4_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)12213 __arm_vshlltq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
12214 {
12215   return __builtin_mve_vshlltq_m_n_uv8hi (__inactive, __a, __imm, __p);
12216 }
12217 
12218 __extension__ extern __inline int16x8_t
12219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_m_n_s32(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)12220 __arm_vshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12221 {
12222   return __builtin_mve_vshrnbq_m_n_sv4si (__a, __b, __imm, __p);
12223 }
12224 
12225 __extension__ extern __inline int8x16_t
12226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_m_n_s16(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)12227 __arm_vshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12228 {
12229   return __builtin_mve_vshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
12230 }
12231 
12232 __extension__ extern __inline uint16x8_t
12233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_m_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)12234 __arm_vshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12235 {
12236   return __builtin_mve_vshrnbq_m_n_uv4si (__a, __b, __imm, __p);
12237 }
12238 
12239 __extension__ extern __inline uint8x16_t
12240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_m_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)12241 __arm_vshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12242 {
12243   return __builtin_mve_vshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
12244 }
12245 
12246 __extension__ extern __inline int16x8_t
12247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_m_n_s32(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)12248 __arm_vshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12249 {
12250   return __builtin_mve_vshrntq_m_n_sv4si (__a, __b, __imm, __p);
12251 }
12252 
12253 __extension__ extern __inline int8x16_t
12254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_m_n_s16(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)12255 __arm_vshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12256 {
12257   return __builtin_mve_vshrntq_m_n_sv8hi (__a, __b, __imm, __p);
12258 }
12259 
12260 __extension__ extern __inline uint16x8_t
12261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_m_n_u32(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)12262 __arm_vshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12263 {
12264   return __builtin_mve_vshrntq_m_n_uv4si (__a, __b, __imm, __p);
12265 }
12266 
12267 __extension__ extern __inline uint8x16_t
12268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_m_n_u16(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)12269 __arm_vshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12270 {
12271   return __builtin_mve_vshrntq_m_n_uv8hi (__a, __b, __imm, __p);
12272 }
12273 
12274 __extension__ extern __inline void
12275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_s8(int8_t * __base,uint8x16_t __offset,int8x16_t __value)12276 __arm_vstrbq_scatter_offset_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value)
12277 {
12278   __builtin_mve_vstrbq_scatter_offset_sv16qi ((__builtin_neon_qi *) __base, __offset, __value);
12279 }
12280 
12281 __extension__ extern __inline void
12282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_s32(int8_t * __base,uint32x4_t __offset,int32x4_t __value)12283 __arm_vstrbq_scatter_offset_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value)
12284 {
12285   __builtin_mve_vstrbq_scatter_offset_sv4si ((__builtin_neon_qi *) __base, __offset, __value);
12286 }
12287 
12288 __extension__ extern __inline void
12289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_s16(int8_t * __base,uint16x8_t __offset,int16x8_t __value)12290 __arm_vstrbq_scatter_offset_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value)
12291 {
12292   __builtin_mve_vstrbq_scatter_offset_sv8hi ((__builtin_neon_qi *) __base, __offset, __value);
12293 }
12294 
12295 __extension__ extern __inline void
12296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_u8(uint8_t * __base,uint8x16_t __offset,uint8x16_t __value)12297 __arm_vstrbq_scatter_offset_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value)
12298 {
12299   __builtin_mve_vstrbq_scatter_offset_uv16qi ((__builtin_neon_qi *) __base, __offset, __value);
12300 }
12301 
12302 __extension__ extern __inline void
12303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_u32(uint8_t * __base,uint32x4_t __offset,uint32x4_t __value)12304 __arm_vstrbq_scatter_offset_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value)
12305 {
12306   __builtin_mve_vstrbq_scatter_offset_uv4si ((__builtin_neon_qi *) __base, __offset, __value);
12307 }
12308 
12309 __extension__ extern __inline void
12310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_u16(uint8_t * __base,uint16x8_t __offset,uint16x8_t __value)12311 __arm_vstrbq_scatter_offset_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value)
12312 {
12313   __builtin_mve_vstrbq_scatter_offset_uv8hi ((__builtin_neon_qi *) __base, __offset, __value);
12314 }
12315 
12316 __extension__ extern __inline void
12317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_s8(int8_t * __addr,int8x16_t __value)12318 __arm_vstrbq_s8 (int8_t * __addr, int8x16_t __value)
12319 {
12320   __builtin_mve_vstrbq_sv16qi ((__builtin_neon_qi *) __addr, __value);
12321 }
12322 
12323 __extension__ extern __inline void
12324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_s32(int8_t * __addr,int32x4_t __value)12325 __arm_vstrbq_s32 (int8_t * __addr, int32x4_t __value)
12326 {
12327   __builtin_mve_vstrbq_sv4si ((__builtin_neon_qi *) __addr, __value);
12328 }
12329 
12330 __extension__ extern __inline void
12331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_s16(int8_t * __addr,int16x8_t __value)12332 __arm_vstrbq_s16 (int8_t * __addr, int16x8_t __value)
12333 {
12334   __builtin_mve_vstrbq_sv8hi ((__builtin_neon_qi *) __addr, __value);
12335 }
12336 
12337 __extension__ extern __inline void
12338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_u8(uint8_t * __addr,uint8x16_t __value)12339 __arm_vstrbq_u8 (uint8_t * __addr, uint8x16_t __value)
12340 {
12341   __builtin_mve_vstrbq_uv16qi ((__builtin_neon_qi *) __addr, __value);
12342 }
12343 
12344 __extension__ extern __inline void
12345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_u32(uint8_t * __addr,uint32x4_t __value)12346 __arm_vstrbq_u32 (uint8_t * __addr, uint32x4_t __value)
12347 {
12348   __builtin_mve_vstrbq_uv4si ((__builtin_neon_qi *) __addr, __value);
12349 }
12350 
12351 __extension__ extern __inline void
12352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_u16(uint8_t * __addr,uint16x8_t __value)12353 __arm_vstrbq_u16 (uint8_t * __addr, uint16x8_t __value)
12354 {
12355   __builtin_mve_vstrbq_uv8hi ((__builtin_neon_qi *) __addr, __value);
12356 }
12357 
12358 __extension__ extern __inline void
12359 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_s32(uint32x4_t __addr,const int __offset,int32x4_t __value)12360 __arm_vstrwq_scatter_base_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value)
12361 {
12362   __builtin_mve_vstrwq_scatter_base_sv4si (__addr, __offset, __value);
12363 }
12364 
12365 __extension__ extern __inline void
12366 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_u32(uint32x4_t __addr,const int __offset,uint32x4_t __value)12367 __arm_vstrwq_scatter_base_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value)
12368 {
12369   __builtin_mve_vstrwq_scatter_base_uv4si (__addr, __offset, __value);
12370 }
12371 
12372 __extension__ extern __inline uint8x16_t
12373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_u8(uint8_t const * __base,uint8x16_t __offset)12374 __arm_vldrbq_gather_offset_u8 (uint8_t const * __base, uint8x16_t __offset)
12375 {
12376   return __builtin_mve_vldrbq_gather_offset_uv16qi ((__builtin_neon_qi *) __base, __offset);
12377 }
12378 
12379 __extension__ extern __inline int8x16_t
12380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_s8(int8_t const * __base,uint8x16_t __offset)12381 __arm_vldrbq_gather_offset_s8 (int8_t const * __base, uint8x16_t __offset)
12382 {
12383   return __builtin_mve_vldrbq_gather_offset_sv16qi ((__builtin_neon_qi *) __base, __offset);
12384 }
12385 
12386 __extension__ extern __inline int8x16_t
12387 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_s8(int8_t const * __base)12388 __arm_vldrbq_s8 (int8_t const * __base)
12389 {
12390   return __builtin_mve_vldrbq_sv16qi ((__builtin_neon_qi *) __base);
12391 }
12392 
12393 __extension__ extern __inline uint8x16_t
12394 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_u8(uint8_t const * __base)12395 __arm_vldrbq_u8 (uint8_t const * __base)
12396 {
12397   return __builtin_mve_vldrbq_uv16qi ((__builtin_neon_qi *) __base);
12398 }
12399 
12400 __extension__ extern __inline uint16x8_t
12401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_u16(uint8_t const * __base,uint16x8_t __offset)12402 __arm_vldrbq_gather_offset_u16 (uint8_t const * __base, uint16x8_t __offset)
12403 {
12404   return __builtin_mve_vldrbq_gather_offset_uv8hi ((__builtin_neon_qi *) __base, __offset);
12405 }
12406 
12407 __extension__ extern __inline int16x8_t
12408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_s16(int8_t const * __base,uint16x8_t __offset)12409 __arm_vldrbq_gather_offset_s16 (int8_t const * __base, uint16x8_t __offset)
12410 {
12411   return __builtin_mve_vldrbq_gather_offset_sv8hi ((__builtin_neon_qi *) __base, __offset);
12412 }
12413 
12414 __extension__ extern __inline int16x8_t
12415 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_s16(int8_t const * __base)12416 __arm_vldrbq_s16 (int8_t const * __base)
12417 {
12418   return __builtin_mve_vldrbq_sv8hi ((__builtin_neon_qi *) __base);
12419 }
12420 
12421 __extension__ extern __inline uint16x8_t
12422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_u16(uint8_t const * __base)12423 __arm_vldrbq_u16 (uint8_t const * __base)
12424 {
12425   return __builtin_mve_vldrbq_uv8hi ((__builtin_neon_qi *) __base);
12426 }
12427 
12428 __extension__ extern __inline uint32x4_t
12429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_u32(uint8_t const * __base,uint32x4_t __offset)12430 __arm_vldrbq_gather_offset_u32 (uint8_t const * __base, uint32x4_t __offset)
12431 {
12432   return __builtin_mve_vldrbq_gather_offset_uv4si ((__builtin_neon_qi *) __base, __offset);
12433 }
12434 
12435 __extension__ extern __inline int32x4_t
12436 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_s32(int8_t const * __base,uint32x4_t __offset)12437 __arm_vldrbq_gather_offset_s32 (int8_t const * __base, uint32x4_t __offset)
12438 {
12439   return __builtin_mve_vldrbq_gather_offset_sv4si ((__builtin_neon_qi *) __base, __offset);
12440 }
12441 
12442 __extension__ extern __inline int32x4_t
12443 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_s32(int8_t const * __base)12444 __arm_vldrbq_s32 (int8_t const * __base)
12445 {
12446   return __builtin_mve_vldrbq_sv4si ((__builtin_neon_qi *) __base);
12447 }
12448 
12449 __extension__ extern __inline uint32x4_t
12450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_u32(uint8_t const * __base)12451 __arm_vldrbq_u32 (uint8_t const * __base)
12452 {
12453   return __builtin_mve_vldrbq_uv4si ((__builtin_neon_qi *) __base);
12454 }
12455 
12456 __extension__ extern __inline int32x4_t
12457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_s32(uint32x4_t __addr,const int __offset)12458 __arm_vldrwq_gather_base_s32 (uint32x4_t __addr, const int __offset)
12459 {
12460   return __builtin_mve_vldrwq_gather_base_sv4si (__addr, __offset);
12461 }
12462 
12463 __extension__ extern __inline uint32x4_t
12464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_u32(uint32x4_t __addr,const int __offset)12465 __arm_vldrwq_gather_base_u32 (uint32x4_t __addr, const int __offset)
12466 {
12467   return __builtin_mve_vldrwq_gather_base_uv4si (__addr, __offset);
12468 }
12469 
12470 __extension__ extern __inline void
12471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p_s8(int8_t * __addr,int8x16_t __value,mve_pred16_t __p)12472 __arm_vstrbq_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
12473 {
12474   __builtin_mve_vstrbq_p_sv16qi ((__builtin_neon_qi *) __addr, __value, __p);
12475 }
12476 
12477 __extension__ extern __inline void
12478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p_s32(int8_t * __addr,int32x4_t __value,mve_pred16_t __p)12479 __arm_vstrbq_p_s32 (int8_t * __addr, int32x4_t __value, mve_pred16_t __p)
12480 {
12481   __builtin_mve_vstrbq_p_sv4si ((__builtin_neon_qi *) __addr, __value, __p);
12482 }
12483 
12484 __extension__ extern __inline void
12485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p_s16(int8_t * __addr,int16x8_t __value,mve_pred16_t __p)12486 __arm_vstrbq_p_s16 (int8_t * __addr, int16x8_t __value, mve_pred16_t __p)
12487 {
12488   __builtin_mve_vstrbq_p_sv8hi ((__builtin_neon_qi *) __addr, __value, __p);
12489 }
12490 
12491 __extension__ extern __inline void
12492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p_u8(uint8_t * __addr,uint8x16_t __value,mve_pred16_t __p)12493 __arm_vstrbq_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
12494 {
12495   __builtin_mve_vstrbq_p_uv16qi ((__builtin_neon_qi *) __addr, __value, __p);
12496 }
12497 
12498 __extension__ extern __inline void
12499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p_u32(uint8_t * __addr,uint32x4_t __value,mve_pred16_t __p)12500 __arm_vstrbq_p_u32 (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p)
12501 {
12502   __builtin_mve_vstrbq_p_uv4si ((__builtin_neon_qi *) __addr, __value, __p);
12503 }
12504 
12505 __extension__ extern __inline void
12506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p_u16(uint8_t * __addr,uint16x8_t __value,mve_pred16_t __p)12507 __arm_vstrbq_p_u16 (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p)
12508 {
12509   __builtin_mve_vstrbq_p_uv8hi ((__builtin_neon_qi *) __addr, __value, __p);
12510 }
12511 
12512 __extension__ extern __inline void
12513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p_s8(int8_t * __base,uint8x16_t __offset,int8x16_t __value,mve_pred16_t __p)12514 __arm_vstrbq_scatter_offset_p_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p)
12515 {
12516   __builtin_mve_vstrbq_scatter_offset_p_sv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p);
12517 }
12518 
12519 __extension__ extern __inline void
12520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p_s32(int8_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)12521 __arm_vstrbq_scatter_offset_p_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
12522 {
12523   __builtin_mve_vstrbq_scatter_offset_p_sv4si ((__builtin_neon_qi *) __base, __offset, __value, __p);
12524 }
12525 
12526 __extension__ extern __inline void
12527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p_s16(int8_t * __base,uint16x8_t __offset,int16x8_t __value,mve_pred16_t __p)12528 __arm_vstrbq_scatter_offset_p_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
12529 {
12530   __builtin_mve_vstrbq_scatter_offset_p_sv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p);
12531 }
12532 
12533 __extension__ extern __inline void
12534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p_u8(uint8_t * __base,uint8x16_t __offset,uint8x16_t __value,mve_pred16_t __p)12535 __arm_vstrbq_scatter_offset_p_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p)
12536 {
12537   __builtin_mve_vstrbq_scatter_offset_p_uv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p);
12538 }
12539 
12540 __extension__ extern __inline void
12541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p_u32(uint8_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)12542 __arm_vstrbq_scatter_offset_p_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
12543 {
12544   __builtin_mve_vstrbq_scatter_offset_p_uv4si ((__builtin_neon_qi *) __base, __offset, __value, __p);
12545 }
12546 
12547 __extension__ extern __inline void
12548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p_u16(uint8_t * __base,uint16x8_t __offset,uint16x8_t __value,mve_pred16_t __p)12549 __arm_vstrbq_scatter_offset_p_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
12550 {
12551   __builtin_mve_vstrbq_scatter_offset_p_uv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p);
12552 }
12553 
12554 __extension__ extern __inline void
12555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_p_s32(uint32x4_t __addr,const int __offset,int32x4_t __value,mve_pred16_t __p)12556 __arm_vstrwq_scatter_base_p_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
12557 {
12558   __builtin_mve_vstrwq_scatter_base_p_sv4si (__addr, __offset, __value, __p);
12559 }
12560 
12561 __extension__ extern __inline void
12562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_p_u32(uint32x4_t __addr,const int __offset,uint32x4_t __value,mve_pred16_t __p)12563 __arm_vstrwq_scatter_base_p_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
12564 {
12565   __builtin_mve_vstrwq_scatter_base_p_uv4si (__addr, __offset, __value, __p);
12566 }
12567 
12568 __extension__ extern __inline int8x16_t
12569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z_s8(int8_t const * __base,uint8x16_t __offset,mve_pred16_t __p)12570 __arm_vldrbq_gather_offset_z_s8 (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
12571 {
12572   return __builtin_mve_vldrbq_gather_offset_z_sv16qi ((__builtin_neon_qi *) __base, __offset, __p);
12573 }
12574 
12575 __extension__ extern __inline int32x4_t
12576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z_s32(int8_t const * __base,uint32x4_t __offset,mve_pred16_t __p)12577 __arm_vldrbq_gather_offset_z_s32 (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12578 {
12579   return __builtin_mve_vldrbq_gather_offset_z_sv4si ((__builtin_neon_qi *) __base, __offset, __p);
12580 }
12581 
12582 __extension__ extern __inline int16x8_t
12583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z_s16(int8_t const * __base,uint16x8_t __offset,mve_pred16_t __p)12584 __arm_vldrbq_gather_offset_z_s16 (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12585 {
12586   return __builtin_mve_vldrbq_gather_offset_z_sv8hi ((__builtin_neon_qi *) __base, __offset, __p);
12587 }
12588 
12589 __extension__ extern __inline uint8x16_t
12590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z_u8(uint8_t const * __base,uint8x16_t __offset,mve_pred16_t __p)12591 __arm_vldrbq_gather_offset_z_u8 (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
12592 {
12593   return __builtin_mve_vldrbq_gather_offset_z_uv16qi ((__builtin_neon_qi *) __base, __offset, __p);
12594 }
12595 
12596 __extension__ extern __inline uint32x4_t
12597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z_u32(uint8_t const * __base,uint32x4_t __offset,mve_pred16_t __p)12598 __arm_vldrbq_gather_offset_z_u32 (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12599 {
12600   return __builtin_mve_vldrbq_gather_offset_z_uv4si ((__builtin_neon_qi *) __base, __offset, __p);
12601 }
12602 
12603 __extension__ extern __inline uint16x8_t
12604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z_u16(uint8_t const * __base,uint16x8_t __offset,mve_pred16_t __p)12605 __arm_vldrbq_gather_offset_z_u16 (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12606 {
12607   return __builtin_mve_vldrbq_gather_offset_z_uv8hi ((__builtin_neon_qi *) __base, __offset, __p);
12608 }
12609 
12610 __extension__ extern __inline int8x16_t
12611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_z_s8(int8_t const * __base,mve_pred16_t __p)12612 __arm_vldrbq_z_s8 (int8_t const * __base, mve_pred16_t __p)
12613 {
12614   return __builtin_mve_vldrbq_z_sv16qi ((__builtin_neon_qi *) __base, __p);
12615 }
12616 
12617 __extension__ extern __inline int32x4_t
12618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_z_s32(int8_t const * __base,mve_pred16_t __p)12619 __arm_vldrbq_z_s32 (int8_t const * __base, mve_pred16_t __p)
12620 {
12621   return __builtin_mve_vldrbq_z_sv4si ((__builtin_neon_qi *) __base, __p);
12622 }
12623 
12624 __extension__ extern __inline int16x8_t
12625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_z_s16(int8_t const * __base,mve_pred16_t __p)12626 __arm_vldrbq_z_s16 (int8_t const * __base, mve_pred16_t __p)
12627 {
12628   return __builtin_mve_vldrbq_z_sv8hi ((__builtin_neon_qi *) __base, __p);
12629 }
12630 
12631 __extension__ extern __inline uint8x16_t
12632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_z_u8(uint8_t const * __base,mve_pred16_t __p)12633 __arm_vldrbq_z_u8 (uint8_t const * __base, mve_pred16_t __p)
12634 {
12635   return __builtin_mve_vldrbq_z_uv16qi ((__builtin_neon_qi *) __base, __p);
12636 }
12637 
12638 __extension__ extern __inline uint32x4_t
12639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_z_u32(uint8_t const * __base,mve_pred16_t __p)12640 __arm_vldrbq_z_u32 (uint8_t const * __base, mve_pred16_t __p)
12641 {
12642   return __builtin_mve_vldrbq_z_uv4si ((__builtin_neon_qi *) __base, __p);
12643 }
12644 
12645 __extension__ extern __inline uint16x8_t
12646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_z_u16(uint8_t const * __base,mve_pred16_t __p)12647 __arm_vldrbq_z_u16 (uint8_t const * __base, mve_pred16_t __p)
12648 {
12649   return __builtin_mve_vldrbq_z_uv8hi ((__builtin_neon_qi *) __base, __p);
12650 }
12651 
12652 __extension__ extern __inline int32x4_t
12653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_z_s32(uint32x4_t __addr,const int __offset,mve_pred16_t __p)12654 __arm_vldrwq_gather_base_z_s32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
12655 {
12656   return __builtin_mve_vldrwq_gather_base_z_sv4si (__addr, __offset, __p);
12657 }
12658 
12659 __extension__ extern __inline uint32x4_t
12660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_z_u32(uint32x4_t __addr,const int __offset,mve_pred16_t __p)12661 __arm_vldrwq_gather_base_z_u32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
12662 {
12663   return __builtin_mve_vldrwq_gather_base_z_uv4si (__addr, __offset, __p);
12664 }
12665 
12666 __extension__ extern __inline int8x16_t
12667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_s8(int8_t const * __base)12668 __arm_vld1q_s8 (int8_t const * __base)
12669 {
12670   return __builtin_mve_vld1q_sv16qi ((__builtin_neon_qi *) __base);
12671 }
12672 
12673 __extension__ extern __inline int32x4_t
12674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_s32(int32_t const * __base)12675 __arm_vld1q_s32 (int32_t const * __base)
12676 {
12677   return __builtin_mve_vld1q_sv4si ((__builtin_neon_si *) __base);
12678 }
12679 
12680 __extension__ extern __inline int16x8_t
12681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_s16(int16_t const * __base)12682 __arm_vld1q_s16 (int16_t const * __base)
12683 {
12684   return __builtin_mve_vld1q_sv8hi ((__builtin_neon_hi *) __base);
12685 }
12686 
12687 __extension__ extern __inline uint8x16_t
12688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_u8(uint8_t const * __base)12689 __arm_vld1q_u8 (uint8_t const * __base)
12690 {
12691   return __builtin_mve_vld1q_uv16qi ((__builtin_neon_qi *) __base);
12692 }
12693 
12694 __extension__ extern __inline uint32x4_t
12695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_u32(uint32_t const * __base)12696 __arm_vld1q_u32 (uint32_t const * __base)
12697 {
12698   return __builtin_mve_vld1q_uv4si ((__builtin_neon_si *) __base);
12699 }
12700 
12701 __extension__ extern __inline uint16x8_t
12702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_u16(uint16_t const * __base)12703 __arm_vld1q_u16 (uint16_t const * __base)
12704 {
12705   return __builtin_mve_vld1q_uv8hi ((__builtin_neon_hi *) __base);
12706 }
12707 
12708 __extension__ extern __inline int32x4_t
12709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_s32(int16_t const * __base,uint32x4_t __offset)12710 __arm_vldrhq_gather_offset_s32 (int16_t const * __base, uint32x4_t __offset)
12711 {
12712   return __builtin_mve_vldrhq_gather_offset_sv4si ((__builtin_neon_hi *) __base, __offset);
12713 }
12714 
12715 __extension__ extern __inline int16x8_t
12716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_s16(int16_t const * __base,uint16x8_t __offset)12717 __arm_vldrhq_gather_offset_s16 (int16_t const * __base, uint16x8_t __offset)
12718 {
12719   return __builtin_mve_vldrhq_gather_offset_sv8hi ((__builtin_neon_hi *) __base, __offset);
12720 }
12721 
12722 __extension__ extern __inline uint32x4_t
12723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_u32(uint16_t const * __base,uint32x4_t __offset)12724 __arm_vldrhq_gather_offset_u32 (uint16_t const * __base, uint32x4_t __offset)
12725 {
12726   return __builtin_mve_vldrhq_gather_offset_uv4si ((__builtin_neon_hi *) __base, __offset);
12727 }
12728 
12729 __extension__ extern __inline uint16x8_t
12730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_u16(uint16_t const * __base,uint16x8_t __offset)12731 __arm_vldrhq_gather_offset_u16 (uint16_t const * __base, uint16x8_t __offset)
12732 {
12733   return __builtin_mve_vldrhq_gather_offset_uv8hi ((__builtin_neon_hi *) __base, __offset);
12734 }
12735 
12736 __extension__ extern __inline int32x4_t
12737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z_s32(int16_t const * __base,uint32x4_t __offset,mve_pred16_t __p)12738 __arm_vldrhq_gather_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12739 {
12740   return __builtin_mve_vldrhq_gather_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p);
12741 }
12742 
12743 __extension__ extern __inline int16x8_t
12744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z_s16(int16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)12745 __arm_vldrhq_gather_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12746 {
12747   return __builtin_mve_vldrhq_gather_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p);
12748 }
12749 
12750 __extension__ extern __inline uint32x4_t
12751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z_u32(uint16_t const * __base,uint32x4_t __offset,mve_pred16_t __p)12752 __arm_vldrhq_gather_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12753 {
12754   return __builtin_mve_vldrhq_gather_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p);
12755 }
12756 
12757 __extension__ extern __inline uint16x8_t
12758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z_u16(uint16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)12759 __arm_vldrhq_gather_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12760 {
12761   return __builtin_mve_vldrhq_gather_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p);
12762 }
12763 
12764 __extension__ extern __inline int32x4_t
12765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_s32(int16_t const * __base,uint32x4_t __offset)12766 __arm_vldrhq_gather_shifted_offset_s32 (int16_t const * __base, uint32x4_t __offset)
12767 {
12768   return __builtin_mve_vldrhq_gather_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset);
12769 }
12770 
12771 __extension__ extern __inline int16x8_t
12772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_s16(int16_t const * __base,uint16x8_t __offset)12773 __arm_vldrhq_gather_shifted_offset_s16 (int16_t const * __base, uint16x8_t __offset)
12774 {
12775   return __builtin_mve_vldrhq_gather_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset);
12776 }
12777 
12778 __extension__ extern __inline uint32x4_t
12779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_u32(uint16_t const * __base,uint32x4_t __offset)12780 __arm_vldrhq_gather_shifted_offset_u32 (uint16_t const * __base, uint32x4_t __offset)
12781 {
12782   return __builtin_mve_vldrhq_gather_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset);
12783 }
12784 
12785 __extension__ extern __inline uint16x8_t
12786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_u16(uint16_t const * __base,uint16x8_t __offset)12787 __arm_vldrhq_gather_shifted_offset_u16 (uint16_t const * __base, uint16x8_t __offset)
12788 {
12789   return __builtin_mve_vldrhq_gather_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset);
12790 }
12791 
12792 __extension__ extern __inline int32x4_t
12793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z_s32(int16_t const * __base,uint32x4_t __offset,mve_pred16_t __p)12794 __arm_vldrhq_gather_shifted_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12795 {
12796   return __builtin_mve_vldrhq_gather_shifted_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p);
12797 }
12798 
12799 __extension__ extern __inline int16x8_t
12800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z_s16(int16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)12801 __arm_vldrhq_gather_shifted_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12802 {
12803   return __builtin_mve_vldrhq_gather_shifted_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p);
12804 }
12805 
12806 __extension__ extern __inline uint32x4_t
12807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z_u32(uint16_t const * __base,uint32x4_t __offset,mve_pred16_t __p)12808 __arm_vldrhq_gather_shifted_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12809 {
12810   return __builtin_mve_vldrhq_gather_shifted_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p);
12811 }
12812 
12813 __extension__ extern __inline uint16x8_t
12814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z_u16(uint16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)12815 __arm_vldrhq_gather_shifted_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12816 {
12817   return __builtin_mve_vldrhq_gather_shifted_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p);
12818 }
12819 
12820 __extension__ extern __inline int32x4_t
12821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_s32(int16_t const * __base)12822 __arm_vldrhq_s32 (int16_t const * __base)
12823 {
12824   return __builtin_mve_vldrhq_sv4si ((__builtin_neon_hi *) __base);
12825 }
12826 
12827 __extension__ extern __inline int16x8_t
12828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_s16(int16_t const * __base)12829 __arm_vldrhq_s16 (int16_t const * __base)
12830 {
12831   return __builtin_mve_vldrhq_sv8hi ((__builtin_neon_hi *) __base);
12832 }
12833 
12834 __extension__ extern __inline uint32x4_t
12835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_u32(uint16_t const * __base)12836 __arm_vldrhq_u32 (uint16_t const * __base)
12837 {
12838   return __builtin_mve_vldrhq_uv4si ((__builtin_neon_hi *) __base);
12839 }
12840 
12841 __extension__ extern __inline uint16x8_t
12842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_u16(uint16_t const * __base)12843 __arm_vldrhq_u16 (uint16_t const * __base)
12844 {
12845   return __builtin_mve_vldrhq_uv8hi ((__builtin_neon_hi *) __base);
12846 }
12847 
12848 __extension__ extern __inline int32x4_t
12849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_z_s32(int16_t const * __base,mve_pred16_t __p)12850 __arm_vldrhq_z_s32 (int16_t const * __base, mve_pred16_t __p)
12851 {
12852   return __builtin_mve_vldrhq_z_sv4si ((__builtin_neon_hi *) __base, __p);
12853 }
12854 
12855 __extension__ extern __inline int16x8_t
12856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_z_s16(int16_t const * __base,mve_pred16_t __p)12857 __arm_vldrhq_z_s16 (int16_t const * __base, mve_pred16_t __p)
12858 {
12859   return __builtin_mve_vldrhq_z_sv8hi ((__builtin_neon_hi *) __base, __p);
12860 }
12861 
12862 __extension__ extern __inline uint32x4_t
12863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_z_u32(uint16_t const * __base,mve_pred16_t __p)12864 __arm_vldrhq_z_u32 (uint16_t const * __base, mve_pred16_t __p)
12865 {
12866   return __builtin_mve_vldrhq_z_uv4si ((__builtin_neon_hi *) __base, __p);
12867 }
12868 
12869 __extension__ extern __inline uint16x8_t
12870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_z_u16(uint16_t const * __base,mve_pred16_t __p)12871 __arm_vldrhq_z_u16 (uint16_t const * __base, mve_pred16_t __p)
12872 {
12873   return __builtin_mve_vldrhq_z_uv8hi ((__builtin_neon_hi *) __base, __p);
12874 }
12875 
12876 __extension__ extern __inline int32x4_t
12877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_s32(int32_t const * __base)12878 __arm_vldrwq_s32 (int32_t const * __base)
12879 {
12880   return __builtin_mve_vldrwq_sv4si ((__builtin_neon_si *) __base);
12881 }
12882 
12883 __extension__ extern __inline uint32x4_t
12884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_u32(uint32_t const * __base)12885 __arm_vldrwq_u32 (uint32_t const * __base)
12886 {
12887   return __builtin_mve_vldrwq_uv4si ((__builtin_neon_si *) __base);
12888 }
12889 
12890 
12891 __extension__ extern __inline int32x4_t
12892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_z_s32(int32_t const * __base,mve_pred16_t __p)12893 __arm_vldrwq_z_s32 (int32_t const * __base, mve_pred16_t __p)
12894 {
12895   return __builtin_mve_vldrwq_z_sv4si ((__builtin_neon_si *) __base, __p);
12896 }
12897 
12898 __extension__ extern __inline uint32x4_t
12899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_z_u32(uint32_t const * __base,mve_pred16_t __p)12900 __arm_vldrwq_z_u32 (uint32_t const * __base, mve_pred16_t __p)
12901 {
12902   return __builtin_mve_vldrwq_z_uv4si ((__builtin_neon_si *) __base, __p);
12903 }
12904 
12905 __extension__ extern __inline int64x2_t
12906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_s64(uint64x2_t __addr,const int __offset)12907 __arm_vldrdq_gather_base_s64 (uint64x2_t __addr, const int __offset)
12908 {
12909   return __builtin_mve_vldrdq_gather_base_sv2di (__addr, __offset);
12910 }
12911 
12912 __extension__ extern __inline uint64x2_t
12913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_u64(uint64x2_t __addr,const int __offset)12914 __arm_vldrdq_gather_base_u64 (uint64x2_t __addr, const int __offset)
12915 {
12916   return __builtin_mve_vldrdq_gather_base_uv2di (__addr, __offset);
12917 }
12918 
12919 __extension__ extern __inline int64x2_t
12920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_z_s64(uint64x2_t __addr,const int __offset,mve_pred16_t __p)12921 __arm_vldrdq_gather_base_z_s64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p)
12922 {
12923   return __builtin_mve_vldrdq_gather_base_z_sv2di (__addr, __offset, __p);
12924 }
12925 
12926 __extension__ extern __inline uint64x2_t
12927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_z_u64(uint64x2_t __addr,const int __offset,mve_pred16_t __p)12928 __arm_vldrdq_gather_base_z_u64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p)
12929 {
12930   return __builtin_mve_vldrdq_gather_base_z_uv2di (__addr, __offset, __p);
12931 }
12932 
12933 __extension__ extern __inline int64x2_t
12934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_offset_s64(int64_t const * __base,uint64x2_t __offset)12935 __arm_vldrdq_gather_offset_s64 (int64_t const * __base, uint64x2_t __offset)
12936 {
12937   return __builtin_mve_vldrdq_gather_offset_sv2di ((__builtin_neon_di *) __base, __offset);
12938 }
12939 
12940 __extension__ extern __inline uint64x2_t
12941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_offset_u64(uint64_t const * __base,uint64x2_t __offset)12942 __arm_vldrdq_gather_offset_u64 (uint64_t const * __base, uint64x2_t __offset)
12943 {
12944   return __builtin_mve_vldrdq_gather_offset_uv2di ((__builtin_neon_di *) __base, __offset);
12945 }
12946 
12947 __extension__ extern __inline int64x2_t
12948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_offset_z_s64(int64_t const * __base,uint64x2_t __offset,mve_pred16_t __p)12949 __arm_vldrdq_gather_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
12950 {
12951   return __builtin_mve_vldrdq_gather_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p);
12952 }
12953 
12954 
12955 __extension__ extern __inline uint64x2_t
12956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_offset_z_u64(uint64_t const * __base,uint64x2_t __offset,mve_pred16_t __p)12957 __arm_vldrdq_gather_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
12958 {
12959   return __builtin_mve_vldrdq_gather_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p);
12960 }
12961 
12962 __extension__ extern __inline int64x2_t
12963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_shifted_offset_s64(int64_t const * __base,uint64x2_t __offset)12964 __arm_vldrdq_gather_shifted_offset_s64 (int64_t const * __base, uint64x2_t __offset)
12965 {
12966   return __builtin_mve_vldrdq_gather_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset);
12967 }
12968 
12969 __extension__ extern __inline uint64x2_t
12970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_shifted_offset_u64(uint64_t const * __base,uint64x2_t __offset)12971 __arm_vldrdq_gather_shifted_offset_u64 (uint64_t const * __base, uint64x2_t __offset)
12972 {
12973   return __builtin_mve_vldrdq_gather_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset);
12974 }
12975 
12976 __extension__ extern __inline int64x2_t
12977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_shifted_offset_z_s64(int64_t const * __base,uint64x2_t __offset,mve_pred16_t __p)12978 __arm_vldrdq_gather_shifted_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
12979 {
12980   return __builtin_mve_vldrdq_gather_shifted_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p);
12981 }
12982 
12983 __extension__ extern __inline uint64x2_t
12984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_shifted_offset_z_u64(uint64_t const * __base,uint64x2_t __offset,mve_pred16_t __p)12985 __arm_vldrdq_gather_shifted_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
12986 {
12987   return __builtin_mve_vldrdq_gather_shifted_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p);
12988 }
12989 
12990 __extension__ extern __inline int32x4_t
12991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset_s32(int32_t const * __base,uint32x4_t __offset)12992 __arm_vldrwq_gather_offset_s32 (int32_t const * __base, uint32x4_t __offset)
12993 {
12994   return __builtin_mve_vldrwq_gather_offset_sv4si ((__builtin_neon_si *) __base, __offset);
12995 }
12996 
12997 __extension__ extern __inline uint32x4_t
12998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset_u32(uint32_t const * __base,uint32x4_t __offset)12999 __arm_vldrwq_gather_offset_u32 (uint32_t const * __base, uint32x4_t __offset)
13000 {
13001   return __builtin_mve_vldrwq_gather_offset_uv4si ((__builtin_neon_si *) __base, __offset);
13002 }
13003 
13004 __extension__ extern __inline int32x4_t
13005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset_z_s32(int32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)13006 __arm_vldrwq_gather_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
13007 {
13008   return __builtin_mve_vldrwq_gather_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p);
13009 }
13010 
13011 __extension__ extern __inline uint32x4_t
13012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset_z_u32(uint32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)13013 __arm_vldrwq_gather_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
13014 {
13015   return __builtin_mve_vldrwq_gather_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p);
13016 }
13017 
13018 __extension__ extern __inline int32x4_t
13019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset_s32(int32_t const * __base,uint32x4_t __offset)13020 __arm_vldrwq_gather_shifted_offset_s32 (int32_t const * __base, uint32x4_t __offset)
13021 {
13022   return __builtin_mve_vldrwq_gather_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset);
13023 }
13024 
13025 __extension__ extern __inline uint32x4_t
13026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset_u32(uint32_t const * __base,uint32x4_t __offset)13027 __arm_vldrwq_gather_shifted_offset_u32 (uint32_t const * __base, uint32x4_t __offset)
13028 {
13029   return __builtin_mve_vldrwq_gather_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset);
13030 }
13031 
13032 __extension__ extern __inline int32x4_t
13033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset_z_s32(int32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)13034 __arm_vldrwq_gather_shifted_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
13035 {
13036   return __builtin_mve_vldrwq_gather_shifted_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p);
13037 }
13038 
13039 __extension__ extern __inline uint32x4_t
13040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset_z_u32(uint32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)13041 __arm_vldrwq_gather_shifted_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
13042 {
13043   return __builtin_mve_vldrwq_gather_shifted_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p);
13044 }
13045 
13046 __extension__ extern __inline void
13047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_s8(int8_t * __addr,int8x16_t __value)13048 __arm_vst1q_s8 (int8_t * __addr, int8x16_t __value)
13049 {
13050   __builtin_mve_vst1q_sv16qi ((__builtin_neon_qi *) __addr, __value);
13051 }
13052 
13053 __extension__ extern __inline void
13054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_s32(int32_t * __addr,int32x4_t __value)13055 __arm_vst1q_s32 (int32_t * __addr, int32x4_t __value)
13056 {
13057   __builtin_mve_vst1q_sv4si ((__builtin_neon_si *) __addr, __value);
13058 }
13059 
13060 __extension__ extern __inline void
13061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_s16(int16_t * __addr,int16x8_t __value)13062 __arm_vst1q_s16 (int16_t * __addr, int16x8_t __value)
13063 {
13064   __builtin_mve_vst1q_sv8hi ((__builtin_neon_hi *) __addr, __value);
13065 }
13066 
13067 __extension__ extern __inline void
13068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_u8(uint8_t * __addr,uint8x16_t __value)13069 __arm_vst1q_u8 (uint8_t * __addr, uint8x16_t __value)
13070 {
13071   __builtin_mve_vst1q_uv16qi ((__builtin_neon_qi *) __addr, __value);
13072 }
13073 
13074 __extension__ extern __inline void
13075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_u32(uint32_t * __addr,uint32x4_t __value)13076 __arm_vst1q_u32 (uint32_t * __addr, uint32x4_t __value)
13077 {
13078   __builtin_mve_vst1q_uv4si ((__builtin_neon_si *) __addr, __value);
13079 }
13080 
13081 __extension__ extern __inline void
13082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_u16(uint16_t * __addr,uint16x8_t __value)13083 __arm_vst1q_u16 (uint16_t * __addr, uint16x8_t __value)
13084 {
13085   __builtin_mve_vst1q_uv8hi ((__builtin_neon_hi *) __addr, __value);
13086 }
13087 
13088 __extension__ extern __inline void
13089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_s32(int16_t * __base,uint32x4_t __offset,int32x4_t __value)13090 __arm_vstrhq_scatter_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
13091 {
13092   __builtin_mve_vstrhq_scatter_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value);
13093 }
13094 
13095 __extension__ extern __inline void
13096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_s16(int16_t * __base,uint16x8_t __offset,int16x8_t __value)13097 __arm_vstrhq_scatter_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
13098 {
13099   __builtin_mve_vstrhq_scatter_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value);
13100 }
13101 
13102 __extension__ extern __inline void
13103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_u32(uint16_t * __base,uint32x4_t __offset,uint32x4_t __value)13104 __arm_vstrhq_scatter_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
13105 {
13106   __builtin_mve_vstrhq_scatter_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value);
13107 }
13108 
13109 __extension__ extern __inline void
13110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_u16(uint16_t * __base,uint16x8_t __offset,uint16x8_t __value)13111 __arm_vstrhq_scatter_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
13112 {
13113   __builtin_mve_vstrhq_scatter_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value);
13114 }
13115 
13116 __extension__ extern __inline void
13117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p_s32(int16_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)13118 __arm_vstrhq_scatter_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
13119 {
13120   __builtin_mve_vstrhq_scatter_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
13121 }
13122 
13123 __extension__ extern __inline void
13124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p_s16(int16_t * __base,uint16x8_t __offset,int16x8_t __value,mve_pred16_t __p)13125 __arm_vstrhq_scatter_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
13126 {
13127   __builtin_mve_vstrhq_scatter_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
13128 }
13129 
13130 __extension__ extern __inline void
13131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p_u32(uint16_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)13132 __arm_vstrhq_scatter_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
13133 {
13134   __builtin_mve_vstrhq_scatter_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
13135 }
13136 
13137 __extension__ extern __inline void
13138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p_u16(uint16_t * __base,uint16x8_t __offset,uint16x8_t __value,mve_pred16_t __p)13139 __arm_vstrhq_scatter_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
13140 {
13141   __builtin_mve_vstrhq_scatter_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
13142 }
13143 
13144 __extension__ extern __inline void
13145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_s32(int16_t * __base,uint32x4_t __offset,int32x4_t __value)13146 __arm_vstrhq_scatter_shifted_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
13147 {
13148   __builtin_mve_vstrhq_scatter_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value);
13149 }
13150 
13151 __extension__ extern __inline void
13152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_s16(int16_t * __base,uint16x8_t __offset,int16x8_t __value)13153 __arm_vstrhq_scatter_shifted_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
13154 {
13155   __builtin_mve_vstrhq_scatter_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value);
13156 }
13157 
13158 __extension__ extern __inline void
13159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_u32(uint16_t * __base,uint32x4_t __offset,uint32x4_t __value)13160 __arm_vstrhq_scatter_shifted_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
13161 {
13162   __builtin_mve_vstrhq_scatter_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value);
13163 }
13164 
13165 __extension__ extern __inline void
13166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_u16(uint16_t * __base,uint16x8_t __offset,uint16x8_t __value)13167 __arm_vstrhq_scatter_shifted_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
13168 {
13169   __builtin_mve_vstrhq_scatter_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value);
13170 }
13171 
13172 __extension__ extern __inline void
13173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p_s32(int16_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)13174 __arm_vstrhq_scatter_shifted_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
13175 {
13176   __builtin_mve_vstrhq_scatter_shifted_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
13177 }
13178 
13179 __extension__ extern __inline void
13180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p_s16(int16_t * __base,uint16x8_t __offset,int16x8_t __value,mve_pred16_t __p)13181 __arm_vstrhq_scatter_shifted_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
13182 {
13183   __builtin_mve_vstrhq_scatter_shifted_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
13184 }
13185 
13186 __extension__ extern __inline void
13187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)13188 __arm_vstrhq_scatter_shifted_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
13189 {
13190   __builtin_mve_vstrhq_scatter_shifted_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
13191 }
13192 
13193 __extension__ extern __inline void
13194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t * __base,uint16x8_t __offset,uint16x8_t __value,mve_pred16_t __p)13195 __arm_vstrhq_scatter_shifted_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
13196 {
13197   __builtin_mve_vstrhq_scatter_shifted_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
13198 }
13199 
13200 __extension__ extern __inline void
13201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_s32(int16_t * __addr,int32x4_t __value)13202 __arm_vstrhq_s32 (int16_t * __addr, int32x4_t __value)
13203 {
13204   __builtin_mve_vstrhq_sv4si ((__builtin_neon_hi *) __addr, __value);
13205 }
13206 
13207 __extension__ extern __inline void
13208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_s16(int16_t * __addr,int16x8_t __value)13209 __arm_vstrhq_s16 (int16_t * __addr, int16x8_t __value)
13210 {
13211   __builtin_mve_vstrhq_sv8hi ((__builtin_neon_hi *) __addr, __value);
13212 }
13213 
13214 __extension__ extern __inline void
13215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_u32(uint16_t * __addr,uint32x4_t __value)13216 __arm_vstrhq_u32 (uint16_t * __addr, uint32x4_t __value)
13217 {
13218   __builtin_mve_vstrhq_uv4si ((__builtin_neon_hi *) __addr, __value);
13219 }
13220 
13221 __extension__ extern __inline void
13222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_u16(uint16_t * __addr,uint16x8_t __value)13223 __arm_vstrhq_u16 (uint16_t * __addr, uint16x8_t __value)
13224 {
13225   __builtin_mve_vstrhq_uv8hi ((__builtin_neon_hi *) __addr, __value);
13226 }
13227 
13228 __extension__ extern __inline void
13229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p_s32(int16_t * __addr,int32x4_t __value,mve_pred16_t __p)13230 __arm_vstrhq_p_s32 (int16_t * __addr, int32x4_t __value, mve_pred16_t __p)
13231 {
13232   __builtin_mve_vstrhq_p_sv4si ((__builtin_neon_hi *) __addr, __value, __p);
13233 }
13234 
13235 __extension__ extern __inline void
13236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p_s16(int16_t * __addr,int16x8_t __value,mve_pred16_t __p)13237 __arm_vstrhq_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
13238 {
13239   __builtin_mve_vstrhq_p_sv8hi ((__builtin_neon_hi *) __addr, __value, __p);
13240 }
13241 
13242 __extension__ extern __inline void
13243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p_u32(uint16_t * __addr,uint32x4_t __value,mve_pred16_t __p)13244 __arm_vstrhq_p_u32 (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p)
13245 {
13246   __builtin_mve_vstrhq_p_uv4si ((__builtin_neon_hi *) __addr, __value, __p);
13247 }
13248 
13249 __extension__ extern __inline void
13250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p_u16(uint16_t * __addr,uint16x8_t __value,mve_pred16_t __p)13251 __arm_vstrhq_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
13252 {
13253   __builtin_mve_vstrhq_p_uv8hi ((__builtin_neon_hi *) __addr, __value, __p);
13254 }
13255 
13256 __extension__ extern __inline void
13257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_s32(int32_t * __addr,int32x4_t __value)13258 __arm_vstrwq_s32 (int32_t * __addr, int32x4_t __value)
13259 {
13260   __builtin_mve_vstrwq_sv4si ((__builtin_neon_si *) __addr, __value);
13261 }
13262 
13263 __extension__ extern __inline void
13264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_u32(uint32_t * __addr,uint32x4_t __value)13265 __arm_vstrwq_u32 (uint32_t * __addr, uint32x4_t __value)
13266 {
13267   __builtin_mve_vstrwq_uv4si ((__builtin_neon_si *) __addr, __value);
13268 }
13269 
13270 __extension__ extern __inline void
13271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_p_s32(int32_t * __addr,int32x4_t __value,mve_pred16_t __p)13272 __arm_vstrwq_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
13273 {
13274   __builtin_mve_vstrwq_p_sv4si ((__builtin_neon_si *) __addr, __value, __p);
13275 }
13276 
13277 __extension__ extern __inline void
13278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_p_u32(uint32_t * __addr,uint32x4_t __value,mve_pred16_t __p)13279 __arm_vstrwq_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
13280 {
13281   __builtin_mve_vstrwq_p_uv4si ((__builtin_neon_si *) __addr, __value, __p);
13282 }
13283 
13284 __extension__ extern __inline void
13285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_p_s64(uint64x2_t __addr,const int __offset,int64x2_t __value,mve_pred16_t __p)13286 __arm_vstrdq_scatter_base_p_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
13287 {
13288   __builtin_mve_vstrdq_scatter_base_p_sv2di (__addr, __offset, __value, __p);
13289 }
13290 
13291 __extension__ extern __inline void
13292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_p_u64(uint64x2_t __addr,const int __offset,uint64x2_t __value,mve_pred16_t __p)13293 __arm_vstrdq_scatter_base_p_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
13294 {
13295   __builtin_mve_vstrdq_scatter_base_p_uv2di (__addr, __offset, __value, __p);
13296 }
13297 
13298 __extension__ extern __inline void
13299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_s64(uint64x2_t __addr,const int __offset,int64x2_t __value)13300 __arm_vstrdq_scatter_base_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value)
13301 {
13302   __builtin_mve_vstrdq_scatter_base_sv2di (__addr, __offset, __value);
13303 }
13304 
13305 __extension__ extern __inline void
13306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_u64(uint64x2_t __addr,const int __offset,uint64x2_t __value)13307 __arm_vstrdq_scatter_base_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value)
13308 {
13309   __builtin_mve_vstrdq_scatter_base_uv2di (__addr, __offset, __value);
13310 }
13311 
13312 __extension__ extern __inline void
13313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_offset_p_s64(int64_t * __base,uint64x2_t __offset,int64x2_t __value,mve_pred16_t __p)13314 __arm_vstrdq_scatter_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
13315 {
13316   __builtin_mve_vstrdq_scatter_offset_p_sv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
13317 }
13318 
13319 __extension__ extern __inline void
13320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_offset_p_u64(uint64_t * __base,uint64x2_t __offset,uint64x2_t __value,mve_pred16_t __p)13321 __arm_vstrdq_scatter_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
13322 {
13323   __builtin_mve_vstrdq_scatter_offset_p_uv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
13324 }
13325 
13326 __extension__ extern __inline void
13327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_offset_s64(int64_t * __base,uint64x2_t __offset,int64x2_t __value)13328 __arm_vstrdq_scatter_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
13329 {
13330   __builtin_mve_vstrdq_scatter_offset_sv2di ((__builtin_neon_di *) __base, __offset, __value);
13331 }
13332 
13333 __extension__ extern __inline void
13334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_offset_u64(uint64_t * __base,uint64x2_t __offset,uint64x2_t __value)13335 __arm_vstrdq_scatter_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
13336 {
13337   __builtin_mve_vstrdq_scatter_offset_uv2di ((__builtin_neon_di *) __base, __offset, __value);
13338 }
13339 
13340 __extension__ extern __inline void
13341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_shifted_offset_p_s64(int64_t * __base,uint64x2_t __offset,int64x2_t __value,mve_pred16_t __p)13342 __arm_vstrdq_scatter_shifted_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
13343 {
13344   __builtin_mve_vstrdq_scatter_shifted_offset_p_sv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
13345 }
13346 
13347 __extension__ extern __inline void
13348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t * __base,uint64x2_t __offset,uint64x2_t __value,mve_pred16_t __p)13349 __arm_vstrdq_scatter_shifted_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
13350 {
13351   __builtin_mve_vstrdq_scatter_shifted_offset_p_uv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
13352 }
13353 
13354 __extension__ extern __inline void
13355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_shifted_offset_s64(int64_t * __base,uint64x2_t __offset,int64x2_t __value)13356 __arm_vstrdq_scatter_shifted_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
13357 {
13358   __builtin_mve_vstrdq_scatter_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset, __value);
13359 }
13360 
13361 __extension__ extern __inline void
13362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_shifted_offset_u64(uint64_t * __base,uint64x2_t __offset,uint64x2_t __value)13363 __arm_vstrdq_scatter_shifted_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
13364 {
13365   __builtin_mve_vstrdq_scatter_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset, __value);
13366 }
13367 
13368 __extension__ extern __inline void
13369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset_p_s32(int32_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)13370 __arm_vstrwq_scatter_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
13371 {
13372   __builtin_mve_vstrwq_scatter_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
13373 }
13374 
13375 __extension__ extern __inline void
13376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset_p_u32(uint32_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)13377 __arm_vstrwq_scatter_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
13378 {
13379   __builtin_mve_vstrwq_scatter_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
13380 }
13381 
13382 __extension__ extern __inline void
13383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset_s32(int32_t * __base,uint32x4_t __offset,int32x4_t __value)13384 __arm_vstrwq_scatter_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
13385 {
13386   __builtin_mve_vstrwq_scatter_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value);
13387 }
13388 
13389 __extension__ extern __inline void
13390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset_u32(uint32_t * __base,uint32x4_t __offset,uint32x4_t __value)13391 __arm_vstrwq_scatter_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
13392 {
13393   __builtin_mve_vstrwq_scatter_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
13394 }
13395 
13396 __extension__ extern __inline void
13397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset_p_s32(int32_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)13398 __arm_vstrwq_scatter_shifted_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
13399 {
13400   __builtin_mve_vstrwq_scatter_shifted_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
13401 }
13402 
13403 __extension__ extern __inline void
13404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)13405 __arm_vstrwq_scatter_shifted_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
13406 {
13407   __builtin_mve_vstrwq_scatter_shifted_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
13408 }
13409 
13410 __extension__ extern __inline void
13411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset_s32(int32_t * __base,uint32x4_t __offset,int32x4_t __value)13412 __arm_vstrwq_scatter_shifted_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
13413 {
13414   __builtin_mve_vstrwq_scatter_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value);
13415 }
13416 
13417 __extension__ extern __inline void
13418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset_u32(uint32_t * __base,uint32x4_t __offset,uint32x4_t __value)13419 __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
13420 {
13421   __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
13422 }
13423 
13424 __extension__ extern __inline int8x16_t
13425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_s8(int8x16_t __a,int8x16_t __b)13426 __arm_vaddq_s8 (int8x16_t __a, int8x16_t __b)
13427 {
13428   return __a + __b;
13429 }
13430 
13431 __extension__ extern __inline int16x8_t
13432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_s16(int16x8_t __a,int16x8_t __b)13433 __arm_vaddq_s16 (int16x8_t __a, int16x8_t __b)
13434 {
13435   return __a + __b;
13436 }
13437 
13438 __extension__ extern __inline int32x4_t
13439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_s32(int32x4_t __a,int32x4_t __b)13440 __arm_vaddq_s32 (int32x4_t __a, int32x4_t __b)
13441 {
13442   return __a + __b;
13443 }
13444 
13445 __extension__ extern __inline uint8x16_t
13446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_u8(uint8x16_t __a,uint8x16_t __b)13447 __arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
13448 {
13449   return __a + __b;
13450 }
13451 
13452 __extension__ extern __inline uint16x8_t
13453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_u16(uint16x8_t __a,uint16x8_t __b)13454 __arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
13455 {
13456   return __a + __b;
13457 }
13458 
13459 __extension__ extern __inline uint32x4_t
13460 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_u32(uint32x4_t __a,uint32x4_t __b)13461 __arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
13462 {
13463   return __a + __b;
13464 }
13465 
13466 __extension__ extern __inline uint8x16_t
13467 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m_n_u8(uint8x16_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)13468 __arm_vddupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13469 {
13470   return __builtin_mve_vddupq_m_n_uv16qi (__inactive, __a, __imm, __p);
13471 }
13472 
13473 __extension__ extern __inline uint32x4_t
13474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m_n_u32(uint32x4_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)13475 __arm_vddupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13476 {
13477   return __builtin_mve_vddupq_m_n_uv4si (__inactive, __a, __imm, __p);
13478 }
13479 
13480 __extension__ extern __inline uint16x8_t
13481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m_n_u16(uint16x8_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)13482 __arm_vddupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13483 {
13484   return __builtin_mve_vddupq_m_n_uv8hi (__inactive, __a, __imm, __p);
13485 }
13486 
13487 __extension__ extern __inline uint8x16_t
13488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m_wb_u8(uint8x16_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)13489 __arm_vddupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13490 {
13491   uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__inactive, * __a, __imm, __p);
13492   *__a -= __imm * 16u;
13493   return __res;
13494 }
13495 
13496 __extension__ extern __inline uint16x8_t
13497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m_wb_u16(uint16x8_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)13498 __arm_vddupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13499 {
13500   uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__inactive, *__a, __imm, __p);
13501   *__a -= __imm * 8u;
13502   return __res;
13503 }
13504 
13505 __extension__ extern __inline uint32x4_t
13506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m_wb_u32(uint32x4_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)13507 __arm_vddupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13508 {
13509   uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__inactive, *__a, __imm, __p);
13510   *__a -= __imm * 4u;
13511   return __res;
13512 }
13513 
13514 __extension__ extern __inline uint8x16_t
13515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_n_u8(uint32_t __a,const int __imm)13516 __arm_vddupq_n_u8 (uint32_t __a, const int __imm)
13517 {
13518   return __builtin_mve_vddupq_n_uv16qi (__a, __imm);
13519 }
13520 
13521 __extension__ extern __inline uint32x4_t
13522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_n_u32(uint32_t __a,const int __imm)13523 __arm_vddupq_n_u32 (uint32_t __a, const int __imm)
13524 {
13525   return __builtin_mve_vddupq_n_uv4si (__a, __imm);
13526 }
13527 
13528 __extension__ extern __inline uint16x8_t
13529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_n_u16(uint32_t __a,const int __imm)13530 __arm_vddupq_n_u16 (uint32_t __a, const int __imm)
13531 {
13532   return __builtin_mve_vddupq_n_uv8hi (__a, __imm);
13533 }
13534 
13535 __extension__ extern __inline uint8x16_t
13536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m_n_u8(uint8x16_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)13537 __arm_vdwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13538 {
13539   uint64_t __c = ((uint64_t) __b) << 32;
13540   return __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, __a, __c, __imm, __p);
13541 }
13542 
13543 __extension__ extern __inline uint32x4_t
13544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m_n_u32(uint32x4_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)13545 __arm_vdwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13546 {
13547   uint64_t __c = ((uint64_t) __b) << 32;
13548   return __builtin_mve_vdwdupq_m_n_uv4si (__inactive, __a, __c, __imm, __p);
13549 }
13550 
13551 __extension__ extern __inline uint16x8_t
13552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m_n_u16(uint16x8_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)13553 __arm_vdwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13554 {
13555   uint64_t __c = ((uint64_t) __b) << 32;
13556   return __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, __a, __c, __imm, __p);
13557 }
13558 
13559 __extension__ extern __inline uint8x16_t
13560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m_wb_u8(uint8x16_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)13561 __arm_vdwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13562 {
13563   uint64_t __c = ((uint64_t) __b) << 32;
13564   uint8x16_t __res =  __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, *__a, __c, __imm, __p);
13565   *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__inactive, *__a, __c, __imm, __p);
13566   return __res;
13567 }
13568 
13569 __extension__ extern __inline uint32x4_t
13570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m_wb_u32(uint32x4_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)13571 __arm_vdwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13572 {
13573   uint64_t __c = ((uint64_t) __b) << 32;
13574   uint32x4_t __res =  __builtin_mve_vdwdupq_m_n_uv4si (__inactive, *__a, __c, __imm, __p);
13575   *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__inactive, *__a, __c, __imm, __p);
13576   return __res;
13577 }
13578 
13579 __extension__ extern __inline uint16x8_t
13580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m_wb_u16(uint16x8_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)13581 __arm_vdwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13582 {
13583   uint64_t __c = ((uint64_t) __b) << 32;
13584   uint16x8_t __res =  __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, *__a, __c, __imm, __p);
13585   *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__inactive, *__a, __c, __imm, __p);
13586   return __res;
13587 }
13588 
13589 __extension__ extern __inline uint8x16_t
13590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_n_u8(uint32_t __a,uint32_t __b,const int __imm)13591 __arm_vdwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm)
13592 {
13593   uint64_t __c = ((uint64_t) __b) << 32;
13594   return __builtin_mve_vdwdupq_n_uv16qi (__a, __c, __imm);
13595 }
13596 
13597 __extension__ extern __inline uint32x4_t
13598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_n_u32(uint32_t __a,uint32_t __b,const int __imm)13599 __arm_vdwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm)
13600 {
13601   uint64_t __c = ((uint64_t) __b) << 32;
13602   return __builtin_mve_vdwdupq_n_uv4si (__a, __c, __imm);
13603 }
13604 
13605 __extension__ extern __inline uint16x8_t
13606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_n_u16(uint32_t __a,uint32_t __b,const int __imm)13607 __arm_vdwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm)
13608 {
13609   uint64_t __c = ((uint64_t) __b) << 32;
13610   return __builtin_mve_vdwdupq_n_uv8hi (__a, __c, __imm);
13611 }
13612 
13613 __extension__ extern __inline uint8x16_t
13614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_wb_u8(uint32_t * __a,uint32_t __b,const int __imm)13615 __arm_vdwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm)
13616 {
13617   uint64_t __c = ((uint64_t) __b) << 32;
13618   uint8x16_t __res = __builtin_mve_vdwdupq_n_uv16qi (*__a, __c, __imm);
13619   *__a = __builtin_mve_vdwdupq_wb_uv16qi (*__a, __c, __imm);
13620   return __res;
13621 }
13622 
13623 __extension__ extern __inline uint32x4_t
13624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_wb_u32(uint32_t * __a,uint32_t __b,const int __imm)13625 __arm_vdwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm)
13626 {
13627   uint64_t __c = ((uint64_t) __b) << 32;
13628   uint32x4_t __res = __builtin_mve_vdwdupq_n_uv4si (*__a, __c, __imm);
13629   *__a = __builtin_mve_vdwdupq_wb_uv4si (*__a, __c, __imm);
13630   return __res;
13631 }
13632 
13633 __extension__ extern __inline uint16x8_t
13634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_wb_u16(uint32_t * __a,uint32_t __b,const int __imm)13635 __arm_vdwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm)
13636 {
13637   uint64_t __c = ((uint64_t) __b) << 32;
13638   uint16x8_t __res = __builtin_mve_vdwdupq_n_uv8hi (*__a, __c, __imm);
13639   *__a = __builtin_mve_vdwdupq_wb_uv8hi (*__a, __c, __imm);
13640   return __res;
13641 }
13642 
13643 __extension__ extern __inline uint8x16_t
13644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m_n_u8(uint8x16_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)13645 __arm_vidupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13646 {
13647   return __builtin_mve_vidupq_m_n_uv16qi (__inactive, __a, __imm, __p);
13648 }
13649 
13650 __extension__ extern __inline uint32x4_t
13651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m_n_u32(uint32x4_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)13652 __arm_vidupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13653 {
13654   return __builtin_mve_vidupq_m_n_uv4si (__inactive, __a, __imm, __p);
13655 }
13656 
13657 __extension__ extern __inline uint16x8_t
13658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m_n_u16(uint16x8_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)13659 __arm_vidupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13660 {
13661   return __builtin_mve_vidupq_m_n_uv8hi (__inactive, __a, __imm, __p);
13662 }
13663 
13664 __extension__ extern __inline uint8x16_t
13665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_n_u8(uint32_t __a,const int __imm)13666 __arm_vidupq_n_u8 (uint32_t __a, const int __imm)
13667 {
13668   return __builtin_mve_vidupq_n_uv16qi (__a, __imm);
13669 }
13670 
13671 __extension__ extern __inline uint8x16_t
13672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m_wb_u8(uint8x16_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)13673 __arm_vidupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13674 {
13675   uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__inactive, *__a, __imm, __p);
13676   *__a += __imm * 16u;
13677   return __res;
13678 }
13679 
13680 __extension__ extern __inline uint16x8_t
13681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m_wb_u16(uint16x8_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)13682 __arm_vidupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13683 {
13684   uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__inactive, *__a, __imm, __p);
13685   *__a += __imm * 8u;
13686   return __res;
13687 }
13688 
13689 __extension__ extern __inline uint32x4_t
13690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m_wb_u32(uint32x4_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)13691 __arm_vidupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13692 {
13693   uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__inactive, *__a, __imm, __p);
13694   *__a += __imm * 4u;
13695   return __res;
13696 }
13697 
13698 __extension__ extern __inline uint32x4_t
13699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_n_u32(uint32_t __a,const int __imm)13700 __arm_vidupq_n_u32 (uint32_t __a, const int __imm)
13701 {
13702   return __builtin_mve_vidupq_n_uv4si (__a, __imm);
13703 }
13704 
13705 __extension__ extern __inline uint16x8_t
13706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_n_u16(uint32_t __a,const int __imm)13707 __arm_vidupq_n_u16 (uint32_t __a, const int __imm)
13708 {
13709   return __builtin_mve_vidupq_n_uv8hi (__a, __imm);
13710 }
13711 
13712 __extension__ extern __inline uint8x16_t
13713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_wb_u8(uint32_t * __a,const int __imm)13714 __arm_vidupq_wb_u8 (uint32_t * __a, const int __imm)
13715 {
13716   uint8x16_t __res = __builtin_mve_vidupq_n_uv16qi (*__a, __imm);
13717   *__a += __imm * 16u;
13718   return __res;
13719 }
13720 
13721 __extension__ extern __inline uint16x8_t
13722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_wb_u16(uint32_t * __a,const int __imm)13723 __arm_vidupq_wb_u16 (uint32_t * __a, const int __imm)
13724 {
13725   uint16x8_t __res = __builtin_mve_vidupq_n_uv8hi (*__a, __imm);
13726   *__a += __imm * 8u;
13727   return __res;
13728 }
13729 
13730 __extension__ extern __inline uint32x4_t
13731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_wb_u32(uint32_t * __a,const int __imm)13732 __arm_vidupq_wb_u32 (uint32_t * __a, const int __imm)
13733 {
13734   uint32x4_t __res = __builtin_mve_vidupq_n_uv4si (*__a, __imm);
13735   *__a += __imm * 4u;
13736   return __res;
13737 }
13738 
13739 __extension__ extern __inline uint8x16_t
13740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_wb_u8(uint32_t * __a,const int __imm)13741 __arm_vddupq_wb_u8 (uint32_t * __a, const int __imm)
13742 {
13743   uint8x16_t __res = __builtin_mve_vddupq_n_uv16qi (*__a, __imm);
13744   *__a -= __imm * 16u;
13745   return __res;
13746 }
13747 
13748 __extension__ extern __inline uint16x8_t
13749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_wb_u16(uint32_t * __a,const int __imm)13750 __arm_vddupq_wb_u16 (uint32_t * __a, const int __imm)
13751 {
13752   uint16x8_t __res = __builtin_mve_vddupq_n_uv8hi (*__a, __imm);
13753   *__a -= __imm * 8u;
13754   return __res;
13755 }
13756 
13757 __extension__ extern __inline uint32x4_t
13758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_wb_u32(uint32_t * __a,const int __imm)13759 __arm_vddupq_wb_u32 (uint32_t * __a, const int __imm)
13760 {
13761   uint32x4_t __res = __builtin_mve_vddupq_n_uv4si (*__a, __imm);
13762   *__a -= __imm * 4u;
13763   return __res;
13764 }
13765 
13766 __extension__ extern __inline uint8x16_t
13767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m_n_u8(uint8x16_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)13768 __arm_viwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13769 {
13770   uint64_t __c = ((uint64_t) __b) << 32;
13771   return __builtin_mve_viwdupq_m_n_uv16qi (__inactive, __a, __c, __imm, __p);
13772 }
13773 
13774 __extension__ extern __inline uint32x4_t
13775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m_n_u32(uint32x4_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)13776 __arm_viwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13777 {
13778   uint64_t __c = ((uint64_t) __b) << 32;
13779   return __builtin_mve_viwdupq_m_n_uv4si (__inactive, __a, __c, __imm, __p);
13780 }
13781 
13782 __extension__ extern __inline uint16x8_t
13783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m_n_u16(uint16x8_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)13784 __arm_viwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13785 {
13786   uint64_t __c = ((uint64_t) __b) << 32;
13787   return __builtin_mve_viwdupq_m_n_uv8hi (__inactive, __a, __c, __imm, __p);
13788 }
13789 
13790 __extension__ extern __inline uint8x16_t
13791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m_wb_u8(uint8x16_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)13792 __arm_viwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13793 {
13794   uint64_t __c = ((uint64_t) __b) << 32;
13795   uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__inactive, *__a, __c, __imm, __p);
13796   *__a =  __builtin_mve_viwdupq_m_wb_uv16qi (__inactive, *__a, __c, __imm, __p);
13797   return __res;
13798 }
13799 
13800 __extension__ extern __inline uint32x4_t
13801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m_wb_u32(uint32x4_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)13802 __arm_viwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13803 {
13804   uint64_t __c = ((uint64_t) __b) << 32;
13805   uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__inactive, *__a, __c, __imm, __p);
13806   *__a =  __builtin_mve_viwdupq_m_wb_uv4si (__inactive, *__a, __c, __imm, __p);
13807   return __res;
13808 }
13809 
13810 __extension__ extern __inline uint16x8_t
13811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m_wb_u16(uint16x8_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)13812 __arm_viwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13813 {
13814   uint64_t __c = ((uint64_t) __b) << 32;
13815   uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__inactive, *__a, __c, __imm, __p);
13816   *__a =  __builtin_mve_viwdupq_m_wb_uv8hi (__inactive, *__a, __c, __imm, __p);
13817   return __res;
13818 }
13819 
13820 __extension__ extern __inline uint8x16_t
13821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_n_u8(uint32_t __a,uint32_t __b,const int __imm)13822 __arm_viwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm)
13823 {
13824   uint64_t __c = ((uint64_t) __b) << 32;
13825   return __builtin_mve_viwdupq_n_uv16qi (__a, __c, __imm);
13826 }
13827 
13828 __extension__ extern __inline uint32x4_t
13829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_n_u32(uint32_t __a,uint32_t __b,const int __imm)13830 __arm_viwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm)
13831 {
13832   uint64_t __c = ((uint64_t) __b) << 32;
13833   return __builtin_mve_viwdupq_n_uv4si (__a, __c, __imm);
13834 }
13835 
13836 __extension__ extern __inline uint16x8_t
13837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_n_u16(uint32_t __a,uint32_t __b,const int __imm)13838 __arm_viwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm)
13839 {
13840   uint64_t __c = ((uint64_t) __b) << 32;
13841   return __builtin_mve_viwdupq_n_uv8hi (__a, __c, __imm);
13842 }
13843 
13844 __extension__ extern __inline uint8x16_t
13845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_wb_u8(uint32_t * __a,uint32_t __b,const int __imm)13846 __arm_viwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm)
13847 {
13848   uint64_t __c = ((uint64_t) __b) << 32;
13849   uint8x16_t __res = __builtin_mve_viwdupq_n_uv16qi (*__a, __c, __imm);
13850   *__a = __builtin_mve_viwdupq_wb_uv16qi (*__a, __c, __imm);
13851   return __res;
13852 }
13853 
13854 __extension__ extern __inline uint32x4_t
13855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_wb_u32(uint32_t * __a,uint32_t __b,const int __imm)13856 __arm_viwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm)
13857 {
13858   uint64_t __c = ((uint64_t) __b) << 32;
13859   uint32x4_t __res = __builtin_mve_viwdupq_n_uv4si (*__a, __c, __imm);
13860   *__a = __builtin_mve_viwdupq_wb_uv4si (*__a, __c, __imm);
13861   return __res;
13862 }
13863 
13864 __extension__ extern __inline uint16x8_t
13865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_wb_u16(uint32_t * __a,uint32_t __b,const int __imm)13866 __arm_viwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm)
13867 {
13868   uint64_t __c = ((uint64_t) __b) << 32;
13869   uint16x8_t __res = __builtin_mve_viwdupq_n_uv8hi (*__a, __c, __imm);
13870   *__a = __builtin_mve_viwdupq_wb_uv8hi (*__a, __c, __imm);
13871   return __res;
13872 }
13873 
13874 
13875 __extension__ extern __inline int64x2_t
13876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_wb_s64(uint64x2_t * __addr,const int __offset)13877 __arm_vldrdq_gather_base_wb_s64 (uint64x2_t * __addr, const int __offset)
13878 {
13879   int64x2_t
13880   result = __builtin_mve_vldrdq_gather_base_nowb_sv2di (*__addr, __offset);
13881   *__addr = __builtin_mve_vldrdq_gather_base_wb_sv2di (*__addr, __offset);
13882   return result;
13883 }
13884 
13885 __extension__ extern __inline uint64x2_t
13886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_wb_u64(uint64x2_t * __addr,const int __offset)13887 __arm_vldrdq_gather_base_wb_u64 (uint64x2_t * __addr, const int __offset)
13888 {
13889   uint64x2_t
13890   result = __builtin_mve_vldrdq_gather_base_nowb_uv2di (*__addr, __offset);
13891   *__addr = __builtin_mve_vldrdq_gather_base_wb_uv2di (*__addr, __offset);
13892   return result;
13893 }
13894 
13895 __extension__ extern __inline int64x2_t
13896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_wb_z_s64(uint64x2_t * __addr,const int __offset,mve_pred16_t __p)13897 __arm_vldrdq_gather_base_wb_z_s64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p)
13898 {
13899   int64x2_t
13900   result = __builtin_mve_vldrdq_gather_base_nowb_z_sv2di (*__addr, __offset, __p);
13901   *__addr = __builtin_mve_vldrdq_gather_base_wb_z_sv2di (*__addr, __offset, __p);
13902   return result;
13903 }
13904 
13905 __extension__ extern __inline uint64x2_t
13906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_wb_z_u64(uint64x2_t * __addr,const int __offset,mve_pred16_t __p)13907 __arm_vldrdq_gather_base_wb_z_u64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p)
13908 {
13909   uint64x2_t
13910   result = __builtin_mve_vldrdq_gather_base_nowb_z_uv2di (*__addr, __offset, __p);
13911   *__addr = __builtin_mve_vldrdq_gather_base_wb_z_uv2di (*__addr, __offset, __p);
13912   return result;
13913 }
13914 
13915 __extension__ extern __inline int32x4_t
13916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_wb_s32(uint32x4_t * __addr,const int __offset)13917 __arm_vldrwq_gather_base_wb_s32 (uint32x4_t * __addr, const int __offset)
13918 {
13919   int32x4_t
13920   result = __builtin_mve_vldrwq_gather_base_nowb_sv4si (*__addr, __offset);
13921   *__addr = __builtin_mve_vldrwq_gather_base_wb_sv4si (*__addr, __offset);
13922   return result;
13923 }
13924 
13925 __extension__ extern __inline uint32x4_t
13926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_wb_u32(uint32x4_t * __addr,const int __offset)13927 __arm_vldrwq_gather_base_wb_u32 (uint32x4_t * __addr, const int __offset)
13928 {
13929   uint32x4_t
13930   result = __builtin_mve_vldrwq_gather_base_nowb_uv4si (*__addr, __offset);
13931   *__addr = __builtin_mve_vldrwq_gather_base_wb_uv4si (*__addr, __offset);
13932   return result;
13933 }
13934 
13935 __extension__ extern __inline int32x4_t
13936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_wb_z_s32(uint32x4_t * __addr,const int __offset,mve_pred16_t __p)13937 __arm_vldrwq_gather_base_wb_z_s32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p)
13938 {
13939   int32x4_t
13940   result = __builtin_mve_vldrwq_gather_base_nowb_z_sv4si (*__addr, __offset, __p);
13941   *__addr = __builtin_mve_vldrwq_gather_base_wb_z_sv4si (*__addr, __offset, __p);
13942   return result;
13943 }
13944 
13945 __extension__ extern __inline uint32x4_t
13946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_wb_z_u32(uint32x4_t * __addr,const int __offset,mve_pred16_t __p)13947 __arm_vldrwq_gather_base_wb_z_u32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p)
13948 {
13949   uint32x4_t
13950   result = __builtin_mve_vldrwq_gather_base_nowb_z_uv4si (*__addr, __offset, __p);
13951   *__addr = __builtin_mve_vldrwq_gather_base_wb_z_uv4si (*__addr, __offset, __p);
13952   return result;
13953 }
13954 
13955 __extension__ extern __inline void
13956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_wb_s64(uint64x2_t * __addr,const int __offset,int64x2_t __value)13957 __arm_vstrdq_scatter_base_wb_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value)
13958 {
13959   *__addr = __builtin_mve_vstrdq_scatter_base_wb_sv2di (*__addr, __offset, __value);
13960 }
13961 
13962 __extension__ extern __inline void
13963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_wb_u64(uint64x2_t * __addr,const int __offset,uint64x2_t __value)13964 __arm_vstrdq_scatter_base_wb_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value)
13965 {
13966   *__addr = __builtin_mve_vstrdq_scatter_base_wb_uv2di (*__addr, __offset, __value);
13967 }
13968 
13969 __extension__ extern __inline void
13970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t * __addr,const int __offset,int64x2_t __value,mve_pred16_t __p)13971 __arm_vstrdq_scatter_base_wb_p_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
13972 {
13973  *__addr =  __builtin_mve_vstrdq_scatter_base_wb_p_sv2di (*__addr, __offset, __value, __p);
13974 }
13975 
13976 __extension__ extern __inline void
13977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t * __addr,const int __offset,uint64x2_t __value,mve_pred16_t __p)13978 __arm_vstrdq_scatter_base_wb_p_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
13979 {
13980   *__addr = __builtin_mve_vstrdq_scatter_base_wb_p_uv2di (*__addr, __offset, __value, __p);
13981 }
13982 
13983 __extension__ extern __inline void
13984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t * __addr,const int __offset,int32x4_t __value,mve_pred16_t __p)13985 __arm_vstrwq_scatter_base_wb_p_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
13986 {
13987   *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_sv4si (*__addr, __offset, __value, __p);
13988 }
13989 
13990 __extension__ extern __inline void
13991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t * __addr,const int __offset,uint32x4_t __value,mve_pred16_t __p)13992 __arm_vstrwq_scatter_base_wb_p_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
13993 {
13994   *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_uv4si (*__addr, __offset, __value, __p);
13995 }
13996 
13997 __extension__ extern __inline void
13998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb_s32(uint32x4_t * __addr,const int __offset,int32x4_t __value)13999 __arm_vstrwq_scatter_base_wb_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value)
14000 {
14001   *__addr = __builtin_mve_vstrwq_scatter_base_wb_sv4si (*__addr, __offset, __value);
14002 }
14003 
14004 __extension__ extern __inline void
14005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb_u32(uint32x4_t * __addr,const int __offset,uint32x4_t __value)14006 __arm_vstrwq_scatter_base_wb_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value)
14007 {
14008   *__addr = __builtin_mve_vstrwq_scatter_base_wb_uv4si (*__addr, __offset, __value);
14009 }
14010 
14011 __extension__ extern __inline uint8x16_t
14012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_n_u8(uint32_t __a,const int __imm,mve_pred16_t __p)14013 __arm_vddupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
14014 {
14015   return __builtin_mve_vddupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
14016 }
14017 
14018 __extension__ extern __inline uint16x8_t
14019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_n_u16(uint32_t __a,const int __imm,mve_pred16_t __p)14020 __arm_vddupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
14021 {
14022   return __builtin_mve_vddupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
14023 }
14024 
14025 __extension__ extern __inline uint32x4_t
14026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_n_u32(uint32_t __a,const int __imm,mve_pred16_t __p)14027 __arm_vddupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
14028 {
14029   return __builtin_mve_vddupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
14030 }
14031 
14032 __extension__ extern __inline uint8x16_t
14033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_wb_u8(uint32_t * __a,const int __imm,mve_pred16_t __p)14034 __arm_vddupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14035 {
14036   uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
14037   uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__arg1, * __a, __imm, __p);
14038   *__a -= __imm * 16u;
14039   return __res;
14040 }
14041 
14042 __extension__ extern __inline uint16x8_t
14043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_wb_u16(uint32_t * __a,const int __imm,mve_pred16_t __p)14044 __arm_vddupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14045 {
14046   uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
14047   uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__arg1, *__a, __imm, __p);
14048   *__a -= __imm * 8u;
14049   return __res;
14050 }
14051 
14052 __extension__ extern __inline uint32x4_t
14053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_wb_u32(uint32_t * __a,const int __imm,mve_pred16_t __p)14054 __arm_vddupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14055 {
14056   uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
14057   uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__arg1, *__a, __imm, __p);
14058   *__a -= __imm * 4u;
14059   return __res;
14060 }
14061 
14062 __extension__ extern __inline uint8x16_t
14063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_n_u8(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)14064 __arm_vdwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14065 {
14066   uint64_t __c = ((uint64_t) __b) << 32;
14067   return __builtin_mve_vdwdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __c, __imm, __p);
14068 }
14069 
14070 __extension__ extern __inline uint16x8_t
14071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_n_u16(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)14072 __arm_vdwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14073 {
14074   uint64_t __c = ((uint64_t) __b) << 32;
14075   return __builtin_mve_vdwdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __c, __imm, __p);
14076 }
14077 
14078 __extension__ extern __inline uint32x4_t
14079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_n_u32(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)14080 __arm_vdwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14081 {
14082   uint64_t __c = ((uint64_t) __b) << 32;
14083   return __builtin_mve_vdwdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __c, __imm, __p);
14084 }
14085 
14086 __extension__ extern __inline uint8x16_t
14087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_wb_u8(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)14088 __arm_vdwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14089 {
14090   uint64_t __c = ((uint64_t) __b) << 32;
14091   uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
14092   uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__arg1, *__a, __c, __imm, __p);
14093   *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__arg1, *__a, __c, __imm, __p);
14094   return __res;
14095 }
14096 
14097 __extension__ extern __inline uint16x8_t
14098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_wb_u16(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)14099 __arm_vdwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14100 {
14101   uint64_t __c = ((uint64_t) __b) << 32;
14102   uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
14103   uint16x8_t __res =  __builtin_mve_vdwdupq_m_n_uv8hi (__arg1, *__a, __c, __imm, __p);
14104   *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__arg1, *__a, __c, __imm, __p);
14105   return __res;
14106 }
14107 
14108 __extension__ extern __inline uint32x4_t
14109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_wb_u32(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)14110 __arm_vdwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14111 {
14112   uint64_t __c = ((uint64_t) __b) << 32;
14113   uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
14114   uint32x4_t __res =  __builtin_mve_vdwdupq_m_n_uv4si (__arg1, *__a, __c, __imm, __p);
14115   *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__arg1, *__a, __c, __imm, __p);
14116   return __res;
14117 }
14118 
14119 __extension__ extern __inline uint8x16_t
14120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_n_u8(uint32_t __a,const int __imm,mve_pred16_t __p)14121 __arm_vidupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
14122 {
14123   return __builtin_mve_vidupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
14124 }
14125 
14126 __extension__ extern __inline uint16x8_t
14127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_n_u16(uint32_t __a,const int __imm,mve_pred16_t __p)14128 __arm_vidupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
14129 {
14130   return __builtin_mve_vidupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
14131 }
14132 
14133 __extension__ extern __inline uint32x4_t
14134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_n_u32(uint32_t __a,const int __imm,mve_pred16_t __p)14135 __arm_vidupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
14136 {
14137   return __builtin_mve_vidupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
14138 }
14139 
14140 __extension__ extern __inline uint8x16_t
14141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_wb_u8(uint32_t * __a,const int __imm,mve_pred16_t __p)14142 __arm_vidupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14143 {
14144   uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
14145   uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__arg1, *__a, __imm, __p);
14146   *__a += __imm * 16u;
14147   return __res;
14148 }
14149 
14150 __extension__ extern __inline uint16x8_t
14151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_wb_u16(uint32_t * __a,const int __imm,mve_pred16_t __p)14152 __arm_vidupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14153 {
14154   uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
14155   uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__arg1, *__a, __imm, __p);
14156   *__a += __imm * 8u;
14157   return __res;
14158 }
14159 
14160 __extension__ extern __inline uint32x4_t
14161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_wb_u32(uint32_t * __a,const int __imm,mve_pred16_t __p)14162 __arm_vidupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14163 {
14164   uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
14165   uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__arg1, *__a, __imm, __p);
14166   *__a += __imm * 4u;
14167   return __res;
14168 }
14169 
14170 __extension__ extern __inline uint8x16_t
14171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_n_u8(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)14172 __arm_viwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14173 {
14174   uint64_t __c = ((uint64_t) __b) << 32;
14175   return __builtin_mve_viwdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __c, __imm, __p);
14176 }
14177 
14178 __extension__ extern __inline uint16x8_t
14179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_n_u16(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)14180 __arm_viwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14181 {
14182   uint64_t __c = ((uint64_t) __b) << 32;
14183   return __builtin_mve_viwdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __c, __imm, __p);
14184 }
14185 
14186 __extension__ extern __inline uint32x4_t
14187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_n_u32(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)14188 __arm_viwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14189 {
14190   uint64_t __c = ((uint64_t) __b) << 32;
14191   return __builtin_mve_viwdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __c, __imm, __p);
14192 }
14193 
14194 __extension__ extern __inline uint8x16_t
14195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_wb_u8(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)14196 __arm_viwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14197 {
14198   uint64_t __c = ((uint64_t) __b) << 32;
14199   uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
14200   uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__arg1, *__a, __c, __imm, __p);
14201   *__a =  __builtin_mve_viwdupq_m_wb_uv16qi (__arg1, *__a, __c, __imm, __p);
14202   return __res;
14203 }
14204 
14205 __extension__ extern __inline uint16x8_t
14206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_wb_u16(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)14207 __arm_viwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14208 {
14209   uint64_t __c = ((uint64_t) __b) << 32;
14210   uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
14211   uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__arg1, *__a, __c, __imm, __p);
14212   *__a =  __builtin_mve_viwdupq_m_wb_uv8hi (__arg1, *__a, __c, __imm, __p);
14213   return __res;
14214 }
14215 
14216 __extension__ extern __inline uint32x4_t
14217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_wb_u32(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)14218 __arm_viwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14219 {
14220   uint64_t __c = ((uint64_t) __b) << 32;
14221   uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
14222   uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__arg1, *__a, __c, __imm, __p);
14223   *__a =  __builtin_mve_viwdupq_m_wb_uv4si (__arg1, *__a, __c, __imm, __p);
14224   return __res;
14225 }
14226 
14227 __extension__ extern __inline int8x16_t
14228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_x_n_s8(int8_t __a,mve_pred16_t __p)14229 __arm_vdupq_x_n_s8 (int8_t __a, mve_pred16_t __p)
14230 {
14231   return __builtin_mve_vdupq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14232 }
14233 
14234 __extension__ extern __inline int16x8_t
14235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_x_n_s16(int16_t __a,mve_pred16_t __p)14236 __arm_vdupq_x_n_s16 (int16_t __a, mve_pred16_t __p)
14237 {
14238   return __builtin_mve_vdupq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14239 }
14240 
14241 __extension__ extern __inline int32x4_t
14242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_x_n_s32(int32_t __a,mve_pred16_t __p)14243 __arm_vdupq_x_n_s32 (int32_t __a, mve_pred16_t __p)
14244 {
14245   return __builtin_mve_vdupq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14246 }
14247 
14248 __extension__ extern __inline uint8x16_t
14249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_x_n_u8(uint8_t __a,mve_pred16_t __p)14250 __arm_vdupq_x_n_u8 (uint8_t __a, mve_pred16_t __p)
14251 {
14252   return __builtin_mve_vdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
14253 }
14254 
14255 __extension__ extern __inline uint16x8_t
14256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_x_n_u16(uint16_t __a,mve_pred16_t __p)14257 __arm_vdupq_x_n_u16 (uint16_t __a, mve_pred16_t __p)
14258 {
14259   return __builtin_mve_vdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
14260 }
14261 
14262 __extension__ extern __inline uint32x4_t
14263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_x_n_u32(uint32_t __a,mve_pred16_t __p)14264 __arm_vdupq_x_n_u32 (uint32_t __a, mve_pred16_t __p)
14265 {
14266   return __builtin_mve_vdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
14267 }
14268 
14269 __extension__ extern __inline int8x16_t
14270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14271 __arm_vminq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14272 {
14273   return __builtin_mve_vminq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14274 }
14275 
14276 __extension__ extern __inline int16x8_t
14277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14278 __arm_vminq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14279 {
14280   return __builtin_mve_vminq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14281 }
14282 
14283 __extension__ extern __inline int32x4_t
14284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14285 __arm_vminq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14286 {
14287   return __builtin_mve_vminq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14288 }
14289 
14290 __extension__ extern __inline uint8x16_t
14291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14292 __arm_vminq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14293 {
14294   return __builtin_mve_vminq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14295 }
14296 
14297 __extension__ extern __inline uint16x8_t
14298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14299 __arm_vminq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14300 {
14301   return __builtin_mve_vminq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14302 }
14303 
14304 __extension__ extern __inline uint32x4_t
14305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14306 __arm_vminq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14307 {
14308   return __builtin_mve_vminq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14309 }
14310 
14311 __extension__ extern __inline int8x16_t
14312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14313 __arm_vmaxq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14314 {
14315   return __builtin_mve_vmaxq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14316 }
14317 
14318 __extension__ extern __inline int16x8_t
14319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14320 __arm_vmaxq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14321 {
14322   return __builtin_mve_vmaxq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14323 }
14324 
14325 __extension__ extern __inline int32x4_t
14326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14327 __arm_vmaxq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14328 {
14329   return __builtin_mve_vmaxq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14330 }
14331 
14332 __extension__ extern __inline uint8x16_t
14333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14334 __arm_vmaxq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14335 {
14336   return __builtin_mve_vmaxq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14337 }
14338 
14339 __extension__ extern __inline uint16x8_t
14340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14341 __arm_vmaxq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14342 {
14343   return __builtin_mve_vmaxq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14344 }
14345 
14346 __extension__ extern __inline uint32x4_t
14347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14348 __arm_vmaxq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14349 {
14350   return __builtin_mve_vmaxq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14351 }
14352 
14353 __extension__ extern __inline int8x16_t
14354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14355 __arm_vabdq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14356 {
14357   return __builtin_mve_vabdq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14358 }
14359 
14360 __extension__ extern __inline int16x8_t
14361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14362 __arm_vabdq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14363 {
14364   return __builtin_mve_vabdq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14365 }
14366 
14367 __extension__ extern __inline int32x4_t
14368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14369 __arm_vabdq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14370 {
14371   return __builtin_mve_vabdq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14372 }
14373 
14374 __extension__ extern __inline uint8x16_t
14375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14376 __arm_vabdq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14377 {
14378   return __builtin_mve_vabdq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14379 }
14380 
14381 __extension__ extern __inline uint16x8_t
14382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14383 __arm_vabdq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14384 {
14385   return __builtin_mve_vabdq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14386 }
14387 
14388 __extension__ extern __inline uint32x4_t
14389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14390 __arm_vabdq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14391 {
14392   return __builtin_mve_vabdq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14393 }
14394 
14395 __extension__ extern __inline int8x16_t
14396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x_s8(int8x16_t __a,mve_pred16_t __p)14397 __arm_vabsq_x_s8 (int8x16_t __a, mve_pred16_t __p)
14398 {
14399   return __builtin_mve_vabsq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14400 }
14401 
14402 __extension__ extern __inline int16x8_t
14403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x_s16(int16x8_t __a,mve_pred16_t __p)14404 __arm_vabsq_x_s16 (int16x8_t __a, mve_pred16_t __p)
14405 {
14406   return __builtin_mve_vabsq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14407 }
14408 
14409 __extension__ extern __inline int32x4_t
14410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x_s32(int32x4_t __a,mve_pred16_t __p)14411 __arm_vabsq_x_s32 (int32x4_t __a, mve_pred16_t __p)
14412 {
14413   return __builtin_mve_vabsq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14414 }
14415 
14416 __extension__ extern __inline int8x16_t
14417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14418 __arm_vaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14419 {
14420   return __builtin_mve_vaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14421 }
14422 
14423 __extension__ extern __inline int16x8_t
14424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14425 __arm_vaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14426 {
14427   return __builtin_mve_vaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14428 }
14429 
14430 __extension__ extern __inline int32x4_t
14431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14432 __arm_vaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14433 {
14434   return __builtin_mve_vaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14435 }
14436 
14437 __extension__ extern __inline int8x16_t
14438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)14439 __arm_vaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
14440 {
14441   return __builtin_mve_vaddq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14442 }
14443 
14444 __extension__ extern __inline int16x8_t
14445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)14446 __arm_vaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
14447 {
14448   return __builtin_mve_vaddq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14449 }
14450 
14451 __extension__ extern __inline int32x4_t
14452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)14453 __arm_vaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
14454 {
14455   return __builtin_mve_vaddq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14456 }
14457 
14458 __extension__ extern __inline uint8x16_t
14459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14460 __arm_vaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14461 {
14462   return __builtin_mve_vaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14463 }
14464 
14465 __extension__ extern __inline uint16x8_t
14466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14467 __arm_vaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14468 {
14469   return __builtin_mve_vaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14470 }
14471 
14472 __extension__ extern __inline uint32x4_t
14473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14474 __arm_vaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14475 {
14476   return __builtin_mve_vaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14477 }
14478 
14479 __extension__ extern __inline uint8x16_t
14480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_n_u8(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)14481 __arm_vaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
14482 {
14483   return __builtin_mve_vaddq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14484 }
14485 
14486 __extension__ extern __inline uint16x8_t
14487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_n_u16(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)14488 __arm_vaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
14489 {
14490   return __builtin_mve_vaddq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14491 }
14492 
14493 __extension__ extern __inline uint32x4_t
14494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_n_u32(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)14495 __arm_vaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
14496 {
14497   return __builtin_mve_vaddq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14498 }
14499 
14500 __extension__ extern __inline int8x16_t
14501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_x_s8(int8x16_t __a,mve_pred16_t __p)14502 __arm_vclsq_x_s8 (int8x16_t __a, mve_pred16_t __p)
14503 {
14504   return __builtin_mve_vclsq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14505 }
14506 
14507 __extension__ extern __inline int16x8_t
14508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_x_s16(int16x8_t __a,mve_pred16_t __p)14509 __arm_vclsq_x_s16 (int16x8_t __a, mve_pred16_t __p)
14510 {
14511   return __builtin_mve_vclsq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14512 }
14513 
14514 __extension__ extern __inline int32x4_t
14515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_x_s32(int32x4_t __a,mve_pred16_t __p)14516 __arm_vclsq_x_s32 (int32x4_t __a, mve_pred16_t __p)
14517 {
14518   return __builtin_mve_vclsq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14519 }
14520 
14521 __extension__ extern __inline int8x16_t
14522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x_s8(int8x16_t __a,mve_pred16_t __p)14523 __arm_vclzq_x_s8 (int8x16_t __a, mve_pred16_t __p)
14524 {
14525   return __builtin_mve_vclzq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14526 }
14527 
14528 __extension__ extern __inline int16x8_t
14529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x_s16(int16x8_t __a,mve_pred16_t __p)14530 __arm_vclzq_x_s16 (int16x8_t __a, mve_pred16_t __p)
14531 {
14532   return __builtin_mve_vclzq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14533 }
14534 
14535 __extension__ extern __inline int32x4_t
14536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x_s32(int32x4_t __a,mve_pred16_t __p)14537 __arm_vclzq_x_s32 (int32x4_t __a, mve_pred16_t __p)
14538 {
14539   return __builtin_mve_vclzq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14540 }
14541 
14542 __extension__ extern __inline uint8x16_t
14543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x_u8(uint8x16_t __a,mve_pred16_t __p)14544 __arm_vclzq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
14545 {
14546   return __builtin_mve_vclzq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
14547 }
14548 
14549 __extension__ extern __inline uint16x8_t
14550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x_u16(uint16x8_t __a,mve_pred16_t __p)14551 __arm_vclzq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
14552 {
14553   return __builtin_mve_vclzq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
14554 }
14555 
14556 __extension__ extern __inline uint32x4_t
14557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x_u32(uint32x4_t __a,mve_pred16_t __p)14558 __arm_vclzq_x_u32 (uint32x4_t __a, mve_pred16_t __p)
14559 {
14560   return __builtin_mve_vclzq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
14561 }
14562 
14563 __extension__ extern __inline int8x16_t
14564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x_s8(int8x16_t __a,mve_pred16_t __p)14565 __arm_vnegq_x_s8 (int8x16_t __a, mve_pred16_t __p)
14566 {
14567   return __builtin_mve_vnegq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14568 }
14569 
14570 __extension__ extern __inline int16x8_t
14571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x_s16(int16x8_t __a,mve_pred16_t __p)14572 __arm_vnegq_x_s16 (int16x8_t __a, mve_pred16_t __p)
14573 {
14574   return __builtin_mve_vnegq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14575 }
14576 
14577 __extension__ extern __inline int32x4_t
14578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x_s32(int32x4_t __a,mve_pred16_t __p)14579 __arm_vnegq_x_s32 (int32x4_t __a, mve_pred16_t __p)
14580 {
14581   return __builtin_mve_vnegq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14582 }
14583 
14584 __extension__ extern __inline int8x16_t
14585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14586 __arm_vmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14587 {
14588   return __builtin_mve_vmulhq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14589 }
14590 
14591 __extension__ extern __inline int16x8_t
14592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14593 __arm_vmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14594 {
14595   return __builtin_mve_vmulhq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14596 }
14597 
14598 __extension__ extern __inline int32x4_t
14599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14600 __arm_vmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14601 {
14602   return __builtin_mve_vmulhq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14603 }
14604 
14605 __extension__ extern __inline uint8x16_t
14606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14607 __arm_vmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14608 {
14609   return __builtin_mve_vmulhq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14610 }
14611 
14612 __extension__ extern __inline uint16x8_t
14613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14614 __arm_vmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14615 {
14616   return __builtin_mve_vmulhq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14617 }
14618 
14619 __extension__ extern __inline uint32x4_t
14620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14621 __arm_vmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14622 {
14623   return __builtin_mve_vmulhq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14624 }
14625 
14626 __extension__ extern __inline uint16x8_t
14627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_x_p8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14628 __arm_vmullbq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14629 {
14630   return __builtin_mve_vmullbq_poly_m_pv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14631 }
14632 
14633 __extension__ extern __inline uint32x4_t
14634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_x_p16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14635 __arm_vmullbq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14636 {
14637   return __builtin_mve_vmullbq_poly_m_pv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
14638 }
14639 
14640 __extension__ extern __inline int16x8_t
14641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14642 __arm_vmullbq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14643 {
14644   return __builtin_mve_vmullbq_int_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14645 }
14646 
14647 __extension__ extern __inline int32x4_t
14648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14649 __arm_vmullbq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14650 {
14651   return __builtin_mve_vmullbq_int_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __b, __p);
14652 }
14653 
14654 __extension__ extern __inline int64x2_t
14655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14656 __arm_vmullbq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14657 {
14658   return __builtin_mve_vmullbq_int_m_sv4si (__arm_vuninitializedq_s64 (), __a, __b, __p);
14659 }
14660 
14661 __extension__ extern __inline uint16x8_t
14662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14663 __arm_vmullbq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14664 {
14665   return __builtin_mve_vmullbq_int_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14666 }
14667 
14668 __extension__ extern __inline uint32x4_t
14669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14670 __arm_vmullbq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14671 {
14672   return __builtin_mve_vmullbq_int_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
14673 }
14674 
14675 __extension__ extern __inline uint64x2_t
14676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14677 __arm_vmullbq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14678 {
14679   return __builtin_mve_vmullbq_int_m_uv4si (__arm_vuninitializedq_u64 (), __a, __b, __p);
14680 }
14681 
14682 __extension__ extern __inline uint16x8_t
14683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_x_p8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14684 __arm_vmulltq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14685 {
14686   return __builtin_mve_vmulltq_poly_m_pv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14687 }
14688 
14689 __extension__ extern __inline uint32x4_t
14690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_x_p16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14691 __arm_vmulltq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14692 {
14693   return __builtin_mve_vmulltq_poly_m_pv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
14694 }
14695 
14696 __extension__ extern __inline int16x8_t
14697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14698 __arm_vmulltq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14699 {
14700   return __builtin_mve_vmulltq_int_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14701 }
14702 
14703 __extension__ extern __inline int32x4_t
14704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14705 __arm_vmulltq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14706 {
14707   return __builtin_mve_vmulltq_int_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __b, __p);
14708 }
14709 
14710 __extension__ extern __inline int64x2_t
14711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14712 __arm_vmulltq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14713 {
14714   return __builtin_mve_vmulltq_int_m_sv4si (__arm_vuninitializedq_s64 (), __a, __b, __p);
14715 }
14716 
14717 __extension__ extern __inline uint16x8_t
14718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14719 __arm_vmulltq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14720 {
14721   return __builtin_mve_vmulltq_int_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14722 }
14723 
14724 __extension__ extern __inline uint32x4_t
14725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14726 __arm_vmulltq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14727 {
14728   return __builtin_mve_vmulltq_int_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
14729 }
14730 
14731 __extension__ extern __inline uint64x2_t
14732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14733 __arm_vmulltq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14734 {
14735   return __builtin_mve_vmulltq_int_m_uv4si (__arm_vuninitializedq_u64 (), __a, __b, __p);
14736 }
14737 
14738 __extension__ extern __inline int8x16_t
14739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14740 __arm_vmulq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14741 {
14742   return __builtin_mve_vmulq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14743 }
14744 
14745 __extension__ extern __inline int16x8_t
14746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14747 __arm_vmulq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14748 {
14749   return __builtin_mve_vmulq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14750 }
14751 
14752 __extension__ extern __inline int32x4_t
14753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14754 __arm_vmulq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14755 {
14756   return __builtin_mve_vmulq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14757 }
14758 
14759 __extension__ extern __inline int8x16_t
14760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)14761 __arm_vmulq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
14762 {
14763   return __builtin_mve_vmulq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14764 }
14765 
14766 __extension__ extern __inline int16x8_t
14767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)14768 __arm_vmulq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
14769 {
14770   return __builtin_mve_vmulq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14771 }
14772 
14773 __extension__ extern __inline int32x4_t
14774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)14775 __arm_vmulq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
14776 {
14777   return __builtin_mve_vmulq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14778 }
14779 
14780 __extension__ extern __inline uint8x16_t
14781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14782 __arm_vmulq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14783 {
14784   return __builtin_mve_vmulq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14785 }
14786 
14787 __extension__ extern __inline uint16x8_t
14788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14789 __arm_vmulq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14790 {
14791   return __builtin_mve_vmulq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14792 }
14793 
14794 __extension__ extern __inline uint32x4_t
14795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14796 __arm_vmulq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14797 {
14798   return __builtin_mve_vmulq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14799 }
14800 
14801 __extension__ extern __inline uint8x16_t
14802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_n_u8(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)14803 __arm_vmulq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
14804 {
14805   return __builtin_mve_vmulq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14806 }
14807 
14808 __extension__ extern __inline uint16x8_t
14809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_n_u16(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)14810 __arm_vmulq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
14811 {
14812   return __builtin_mve_vmulq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14813 }
14814 
14815 __extension__ extern __inline uint32x4_t
14816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_n_u32(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)14817 __arm_vmulq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
14818 {
14819   return __builtin_mve_vmulq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14820 }
14821 
14822 __extension__ extern __inline int8x16_t
14823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14824 __arm_vsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14825 {
14826   return __builtin_mve_vsubq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14827 }
14828 
14829 __extension__ extern __inline int16x8_t
14830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14831 __arm_vsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14832 {
14833   return __builtin_mve_vsubq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14834 }
14835 
14836 __extension__ extern __inline int32x4_t
14837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14838 __arm_vsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14839 {
14840   return __builtin_mve_vsubq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14841 }
14842 
14843 __extension__ extern __inline int8x16_t
14844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)14845 __arm_vsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
14846 {
14847   return __builtin_mve_vsubq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14848 }
14849 
14850 __extension__ extern __inline int16x8_t
14851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)14852 __arm_vsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
14853 {
14854   return __builtin_mve_vsubq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14855 }
14856 
14857 __extension__ extern __inline int32x4_t
14858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)14859 __arm_vsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
14860 {
14861   return __builtin_mve_vsubq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14862 }
14863 
14864 __extension__ extern __inline uint8x16_t
14865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14866 __arm_vsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14867 {
14868   return __builtin_mve_vsubq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14869 }
14870 
14871 __extension__ extern __inline uint16x8_t
14872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14873 __arm_vsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14874 {
14875   return __builtin_mve_vsubq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14876 }
14877 
14878 __extension__ extern __inline uint32x4_t
14879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14880 __arm_vsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14881 {
14882   return __builtin_mve_vsubq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14883 }
14884 
14885 __extension__ extern __inline uint8x16_t
14886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_n_u8(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)14887 __arm_vsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
14888 {
14889   return __builtin_mve_vsubq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14890 }
14891 
14892 __extension__ extern __inline uint16x8_t
14893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_n_u16(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)14894 __arm_vsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
14895 {
14896   return __builtin_mve_vsubq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14897 }
14898 
14899 __extension__ extern __inline uint32x4_t
14900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_n_u32(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)14901 __arm_vsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
14902 {
14903   return __builtin_mve_vsubq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14904 }
14905 
14906 __extension__ extern __inline int8x16_t
14907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14908 __arm_vcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14909 {
14910   return __builtin_mve_vcaddq_rot90_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14911 }
14912 
14913 __extension__ extern __inline int16x8_t
14914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14915 __arm_vcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14916 {
14917   return __builtin_mve_vcaddq_rot90_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14918 }
14919 
14920 __extension__ extern __inline int32x4_t
14921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14922 __arm_vcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14923 {
14924   return __builtin_mve_vcaddq_rot90_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14925 }
14926 
14927 __extension__ extern __inline uint8x16_t
14928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14929 __arm_vcaddq_rot90_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14930 {
14931   return __builtin_mve_vcaddq_rot90_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14932 }
14933 
14934 __extension__ extern __inline uint16x8_t
14935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14936 __arm_vcaddq_rot90_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14937 {
14938   return __builtin_mve_vcaddq_rot90_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14939 }
14940 
14941 __extension__ extern __inline uint32x4_t
14942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14943 __arm_vcaddq_rot90_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14944 {
14945   return __builtin_mve_vcaddq_rot90_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14946 }
14947 
14948 __extension__ extern __inline int8x16_t
14949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)14950 __arm_vcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14951 {
14952   return __builtin_mve_vcaddq_rot270_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14953 }
14954 
14955 __extension__ extern __inline int16x8_t
14956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)14957 __arm_vcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14958 {
14959   return __builtin_mve_vcaddq_rot270_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14960 }
14961 
14962 __extension__ extern __inline int32x4_t
14963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)14964 __arm_vcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14965 {
14966   return __builtin_mve_vcaddq_rot270_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14967 }
14968 
14969 __extension__ extern __inline uint8x16_t
14970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)14971 __arm_vcaddq_rot270_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14972 {
14973   return __builtin_mve_vcaddq_rot270_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14974 }
14975 
14976 __extension__ extern __inline uint16x8_t
14977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)14978 __arm_vcaddq_rot270_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14979 {
14980   return __builtin_mve_vcaddq_rot270_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14981 }
14982 
14983 __extension__ extern __inline uint32x4_t
14984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)14985 __arm_vcaddq_rot270_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14986 {
14987   return __builtin_mve_vcaddq_rot270_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14988 }
14989 
14990 __extension__ extern __inline int8x16_t
14991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)14992 __arm_vhaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
14993 {
14994   return __builtin_mve_vhaddq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14995 }
14996 
14997 __extension__ extern __inline int16x8_t
14998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)14999 __arm_vhaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
15000 {
15001   return __builtin_mve_vhaddq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15002 }
15003 
15004 __extension__ extern __inline int32x4_t
15005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)15006 __arm_vhaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
15007 {
15008   return __builtin_mve_vhaddq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15009 }
15010 
15011 __extension__ extern __inline uint8x16_t
15012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_n_u8(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)15013 __arm_vhaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
15014 {
15015   return __builtin_mve_vhaddq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15016 }
15017 
15018 __extension__ extern __inline uint16x8_t
15019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_n_u16(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)15020 __arm_vhaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
15021 {
15022   return __builtin_mve_vhaddq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15023 }
15024 
15025 __extension__ extern __inline uint32x4_t
15026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_n_u32(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)15027 __arm_vhaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
15028 {
15029   return __builtin_mve_vhaddq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15030 }
15031 
15032 __extension__ extern __inline int8x16_t
15033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15034 __arm_vhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15035 {
15036   return __builtin_mve_vhaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15037 }
15038 
15039 __extension__ extern __inline int16x8_t
15040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15041 __arm_vhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15042 {
15043   return __builtin_mve_vhaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15044 }
15045 
15046 __extension__ extern __inline int32x4_t
15047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15048 __arm_vhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15049 {
15050   return __builtin_mve_vhaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15051 }
15052 
15053 __extension__ extern __inline uint8x16_t
15054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)15055 __arm_vhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15056 {
15057   return __builtin_mve_vhaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15058 }
15059 
15060 __extension__ extern __inline uint16x8_t
15061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)15062 __arm_vhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15063 {
15064   return __builtin_mve_vhaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15065 }
15066 
15067 __extension__ extern __inline uint32x4_t
15068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)15069 __arm_vhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15070 {
15071   return __builtin_mve_vhaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15072 }
15073 
15074 __extension__ extern __inline int8x16_t
15075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15076 __arm_vhcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15077 {
15078   return __builtin_mve_vhcaddq_rot90_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15079 }
15080 
15081 __extension__ extern __inline int16x8_t
15082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15083 __arm_vhcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15084 {
15085   return __builtin_mve_vhcaddq_rot90_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15086 }
15087 
15088 __extension__ extern __inline int32x4_t
15089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15090 __arm_vhcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15091 {
15092   return __builtin_mve_vhcaddq_rot90_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15093 }
15094 
15095 __extension__ extern __inline int8x16_t
15096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15097 __arm_vhcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15098 {
15099   return __builtin_mve_vhcaddq_rot270_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15100 }
15101 
15102 __extension__ extern __inline int16x8_t
15103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15104 __arm_vhcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15105 {
15106   return __builtin_mve_vhcaddq_rot270_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15107 }
15108 
15109 __extension__ extern __inline int32x4_t
15110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15111 __arm_vhcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15112 {
15113   return __builtin_mve_vhcaddq_rot270_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15114 }
15115 
15116 __extension__ extern __inline int8x16_t
15117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_n_s8(int8x16_t __a,int8_t __b,mve_pred16_t __p)15118 __arm_vhsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
15119 {
15120   return __builtin_mve_vhsubq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15121 }
15122 
15123 __extension__ extern __inline int16x8_t
15124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_n_s16(int16x8_t __a,int16_t __b,mve_pred16_t __p)15125 __arm_vhsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
15126 {
15127   return __builtin_mve_vhsubq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15128 }
15129 
15130 __extension__ extern __inline int32x4_t
15131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)15132 __arm_vhsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
15133 {
15134   return __builtin_mve_vhsubq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15135 }
15136 
15137 __extension__ extern __inline uint8x16_t
15138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_n_u8(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)15139 __arm_vhsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
15140 {
15141   return __builtin_mve_vhsubq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15142 }
15143 
15144 __extension__ extern __inline uint16x8_t
15145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_n_u16(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)15146 __arm_vhsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
15147 {
15148   return __builtin_mve_vhsubq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15149 }
15150 
15151 __extension__ extern __inline uint32x4_t
15152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_n_u32(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)15153 __arm_vhsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
15154 {
15155   return __builtin_mve_vhsubq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15156 }
15157 
15158 __extension__ extern __inline int8x16_t
15159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15160 __arm_vhsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15161 {
15162   return __builtin_mve_vhsubq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15163 }
15164 
15165 __extension__ extern __inline int16x8_t
15166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15167 __arm_vhsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15168 {
15169   return __builtin_mve_vhsubq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15170 }
15171 
15172 __extension__ extern __inline int32x4_t
15173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15174 __arm_vhsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15175 {
15176   return __builtin_mve_vhsubq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15177 }
15178 
15179 __extension__ extern __inline uint8x16_t
15180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)15181 __arm_vhsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15182 {
15183   return __builtin_mve_vhsubq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15184 }
15185 
15186 __extension__ extern __inline uint16x8_t
15187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)15188 __arm_vhsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15189 {
15190   return __builtin_mve_vhsubq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15191 }
15192 
15193 __extension__ extern __inline uint32x4_t
15194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)15195 __arm_vhsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15196 {
15197   return __builtin_mve_vhsubq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15198 }
15199 
15200 __extension__ extern __inline int8x16_t
15201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15202 __arm_vrhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15203 {
15204   return __builtin_mve_vrhaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15205 }
15206 
15207 __extension__ extern __inline int16x8_t
15208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15209 __arm_vrhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15210 {
15211   return __builtin_mve_vrhaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15212 }
15213 
15214 __extension__ extern __inline int32x4_t
15215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15216 __arm_vrhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15217 {
15218   return __builtin_mve_vrhaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15219 }
15220 
15221 __extension__ extern __inline uint8x16_t
15222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)15223 __arm_vrhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15224 {
15225   return __builtin_mve_vrhaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15226 }
15227 
15228 __extension__ extern __inline uint16x8_t
15229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)15230 __arm_vrhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15231 {
15232   return __builtin_mve_vrhaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15233 }
15234 
15235 __extension__ extern __inline uint32x4_t
15236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)15237 __arm_vrhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15238 {
15239   return __builtin_mve_vrhaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15240 }
15241 
15242 __extension__ extern __inline int8x16_t
15243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15244 __arm_vrmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15245 {
15246   return __builtin_mve_vrmulhq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15247 }
15248 
15249 __extension__ extern __inline int16x8_t
15250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15251 __arm_vrmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15252 {
15253   return __builtin_mve_vrmulhq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15254 }
15255 
15256 __extension__ extern __inline int32x4_t
15257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15258 __arm_vrmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15259 {
15260   return __builtin_mve_vrmulhq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15261 }
15262 
15263 __extension__ extern __inline uint8x16_t
15264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)15265 __arm_vrmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15266 {
15267   return __builtin_mve_vrmulhq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15268 }
15269 
15270 __extension__ extern __inline uint16x8_t
15271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)15272 __arm_vrmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15273 {
15274   return __builtin_mve_vrmulhq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15275 }
15276 
15277 __extension__ extern __inline uint32x4_t
15278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)15279 __arm_vrmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15280 {
15281   return __builtin_mve_vrmulhq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15282 }
15283 
15284 __extension__ extern __inline int8x16_t
15285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15286 __arm_vandq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15287 {
15288   return __builtin_mve_vandq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15289 }
15290 
15291 __extension__ extern __inline int16x8_t
15292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15293 __arm_vandq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15294 {
15295   return __builtin_mve_vandq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15296 }
15297 
15298 __extension__ extern __inline int32x4_t
15299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15300 __arm_vandq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15301 {
15302   return __builtin_mve_vandq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15303 }
15304 
15305 __extension__ extern __inline uint8x16_t
15306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)15307 __arm_vandq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15308 {
15309   return __builtin_mve_vandq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15310 }
15311 
15312 __extension__ extern __inline uint16x8_t
15313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)15314 __arm_vandq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15315 {
15316   return __builtin_mve_vandq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15317 }
15318 
15319 __extension__ extern __inline uint32x4_t
15320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)15321 __arm_vandq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15322 {
15323   return __builtin_mve_vandq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15324 }
15325 
15326 __extension__ extern __inline int8x16_t
15327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15328 __arm_vbicq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15329 {
15330   return __builtin_mve_vbicq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15331 }
15332 
15333 __extension__ extern __inline int16x8_t
15334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15335 __arm_vbicq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15336 {
15337   return __builtin_mve_vbicq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15338 }
15339 
15340 __extension__ extern __inline int32x4_t
15341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15342 __arm_vbicq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15343 {
15344   return __builtin_mve_vbicq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15345 }
15346 
15347 __extension__ extern __inline uint8x16_t
15348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)15349 __arm_vbicq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15350 {
15351   return __builtin_mve_vbicq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15352 }
15353 
15354 __extension__ extern __inline uint16x8_t
15355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)15356 __arm_vbicq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15357 {
15358   return __builtin_mve_vbicq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15359 }
15360 
15361 __extension__ extern __inline uint32x4_t
15362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)15363 __arm_vbicq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15364 {
15365   return __builtin_mve_vbicq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15366 }
15367 
15368 __extension__ extern __inline int8x16_t
15369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x_n_s8(int8x16_t __a,int32_t __b,mve_pred16_t __p)15370 __arm_vbrsrq_x_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
15371 {
15372   return __builtin_mve_vbrsrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15373 }
15374 
15375 __extension__ extern __inline int16x8_t
15376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x_n_s16(int16x8_t __a,int32_t __b,mve_pred16_t __p)15377 __arm_vbrsrq_x_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
15378 {
15379   return __builtin_mve_vbrsrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15380 }
15381 
15382 __extension__ extern __inline int32x4_t
15383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x_n_s32(int32x4_t __a,int32_t __b,mve_pred16_t __p)15384 __arm_vbrsrq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
15385 {
15386   return __builtin_mve_vbrsrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15387 }
15388 
15389 __extension__ extern __inline uint8x16_t
15390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x_n_u8(uint8x16_t __a,int32_t __b,mve_pred16_t __p)15391 __arm_vbrsrq_x_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
15392 {
15393   return __builtin_mve_vbrsrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15394 }
15395 
15396 __extension__ extern __inline uint16x8_t
15397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x_n_u16(uint16x8_t __a,int32_t __b,mve_pred16_t __p)15398 __arm_vbrsrq_x_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
15399 {
15400   return __builtin_mve_vbrsrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15401 }
15402 
15403 __extension__ extern __inline uint32x4_t
15404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x_n_u32(uint32x4_t __a,int32_t __b,mve_pred16_t __p)15405 __arm_vbrsrq_x_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
15406 {
15407   return __builtin_mve_vbrsrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15408 }
15409 
15410 __extension__ extern __inline int8x16_t
15411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15412 __arm_veorq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15413 {
15414   return __builtin_mve_veorq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15415 }
15416 
15417 __extension__ extern __inline int16x8_t
15418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15419 __arm_veorq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15420 {
15421   return __builtin_mve_veorq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15422 }
15423 
15424 __extension__ extern __inline int32x4_t
15425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15426 __arm_veorq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15427 {
15428   return __builtin_mve_veorq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15429 }
15430 
15431 __extension__ extern __inline uint8x16_t
15432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)15433 __arm_veorq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15434 {
15435   return __builtin_mve_veorq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15436 }
15437 
15438 __extension__ extern __inline uint16x8_t
15439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)15440 __arm_veorq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15441 {
15442   return __builtin_mve_veorq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15443 }
15444 
15445 __extension__ extern __inline uint32x4_t
15446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)15447 __arm_veorq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15448 {
15449   return __builtin_mve_veorq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15450 }
15451 
15452 __extension__ extern __inline int16x8_t
15453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_x_s8(int8x16_t __a,mve_pred16_t __p)15454 __arm_vmovlbq_x_s8 (int8x16_t __a, mve_pred16_t __p)
15455 {
15456   return __builtin_mve_vmovlbq_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __p);
15457 }
15458 
15459 __extension__ extern __inline int32x4_t
15460 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_x_s16(int16x8_t __a,mve_pred16_t __p)15461 __arm_vmovlbq_x_s16 (int16x8_t __a, mve_pred16_t __p)
15462 {
15463   return __builtin_mve_vmovlbq_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __p);
15464 }
15465 
15466 __extension__ extern __inline uint16x8_t
15467 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_x_u8(uint8x16_t __a,mve_pred16_t __p)15468 __arm_vmovlbq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15469 {
15470   return __builtin_mve_vmovlbq_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __p);
15471 }
15472 
15473 __extension__ extern __inline uint32x4_t
15474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_x_u16(uint16x8_t __a,mve_pred16_t __p)15475 __arm_vmovlbq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15476 {
15477   return __builtin_mve_vmovlbq_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __p);
15478 }
15479 
15480 __extension__ extern __inline int16x8_t
15481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_x_s8(int8x16_t __a,mve_pred16_t __p)15482 __arm_vmovltq_x_s8 (int8x16_t __a, mve_pred16_t __p)
15483 {
15484   return __builtin_mve_vmovltq_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __p);
15485 }
15486 
15487 __extension__ extern __inline int32x4_t
15488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_x_s16(int16x8_t __a,mve_pred16_t __p)15489 __arm_vmovltq_x_s16 (int16x8_t __a, mve_pred16_t __p)
15490 {
15491   return __builtin_mve_vmovltq_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __p);
15492 }
15493 
15494 __extension__ extern __inline uint16x8_t
15495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_x_u8(uint8x16_t __a,mve_pred16_t __p)15496 __arm_vmovltq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15497 {
15498   return __builtin_mve_vmovltq_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __p);
15499 }
15500 
15501 __extension__ extern __inline uint32x4_t
15502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_x_u16(uint16x8_t __a,mve_pred16_t __p)15503 __arm_vmovltq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15504 {
15505   return __builtin_mve_vmovltq_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __p);
15506 }
15507 
15508 __extension__ extern __inline int8x16_t
15509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_s8(int8x16_t __a,mve_pred16_t __p)15510 __arm_vmvnq_x_s8 (int8x16_t __a, mve_pred16_t __p)
15511 {
15512   return __builtin_mve_vmvnq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
15513 }
15514 
15515 __extension__ extern __inline int16x8_t
15516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_s16(int16x8_t __a,mve_pred16_t __p)15517 __arm_vmvnq_x_s16 (int16x8_t __a, mve_pred16_t __p)
15518 {
15519   return __builtin_mve_vmvnq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
15520 }
15521 
15522 __extension__ extern __inline int32x4_t
15523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_s32(int32x4_t __a,mve_pred16_t __p)15524 __arm_vmvnq_x_s32 (int32x4_t __a, mve_pred16_t __p)
15525 {
15526   return __builtin_mve_vmvnq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
15527 }
15528 
15529 __extension__ extern __inline uint8x16_t
15530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_u8(uint8x16_t __a,mve_pred16_t __p)15531 __arm_vmvnq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15532 {
15533   return __builtin_mve_vmvnq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
15534 }
15535 
15536 __extension__ extern __inline uint16x8_t
15537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_u16(uint16x8_t __a,mve_pred16_t __p)15538 __arm_vmvnq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15539 {
15540   return __builtin_mve_vmvnq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
15541 }
15542 
15543 __extension__ extern __inline uint32x4_t
15544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_u32(uint32x4_t __a,mve_pred16_t __p)15545 __arm_vmvnq_x_u32 (uint32x4_t __a, mve_pred16_t __p)
15546 {
15547   return __builtin_mve_vmvnq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
15548 }
15549 
15550 __extension__ extern __inline int16x8_t
15551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_n_s16(const int __imm,mve_pred16_t __p)15552 __arm_vmvnq_x_n_s16 (const int __imm, mve_pred16_t __p)
15553 {
15554   return __builtin_mve_vmvnq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __imm, __p);
15555 }
15556 
15557 __extension__ extern __inline int32x4_t
15558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_n_s32(const int __imm,mve_pred16_t __p)15559 __arm_vmvnq_x_n_s32 (const int __imm, mve_pred16_t __p)
15560 {
15561   return __builtin_mve_vmvnq_m_n_sv4si (__arm_vuninitializedq_s32 (), __imm, __p);
15562 }
15563 
15564 __extension__ extern __inline uint16x8_t
15565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_n_u16(const int __imm,mve_pred16_t __p)15566 __arm_vmvnq_x_n_u16 (const int __imm, mve_pred16_t __p)
15567 {
15568   return __builtin_mve_vmvnq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __imm, __p);
15569 }
15570 
15571 __extension__ extern __inline uint32x4_t
15572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x_n_u32(const int __imm,mve_pred16_t __p)15573 __arm_vmvnq_x_n_u32 (const int __imm, mve_pred16_t __p)
15574 {
15575   return __builtin_mve_vmvnq_m_n_uv4si (__arm_vuninitializedq_u32 (), __imm, __p);
15576 }
15577 
15578 __extension__ extern __inline int8x16_t
15579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15580 __arm_vornq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15581 {
15582   return __builtin_mve_vornq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15583 }
15584 
15585 __extension__ extern __inline int16x8_t
15586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15587 __arm_vornq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15588 {
15589   return __builtin_mve_vornq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15590 }
15591 
15592 __extension__ extern __inline int32x4_t
15593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15594 __arm_vornq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15595 {
15596   return __builtin_mve_vornq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15597 }
15598 
15599 __extension__ extern __inline uint8x16_t
15600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)15601 __arm_vornq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15602 {
15603   return __builtin_mve_vornq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15604 }
15605 
15606 __extension__ extern __inline uint16x8_t
15607 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)15608 __arm_vornq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15609 {
15610   return __builtin_mve_vornq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15611 }
15612 
15613 __extension__ extern __inline uint32x4_t
15614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)15615 __arm_vornq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15616 {
15617   return __builtin_mve_vornq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15618 }
15619 
15620 __extension__ extern __inline int8x16_t
15621 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15622 __arm_vorrq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15623 {
15624   return __builtin_mve_vorrq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15625 }
15626 
15627 __extension__ extern __inline int16x8_t
15628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15629 __arm_vorrq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15630 {
15631   return __builtin_mve_vorrq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15632 }
15633 
15634 __extension__ extern __inline int32x4_t
15635 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15636 __arm_vorrq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15637 {
15638   return __builtin_mve_vorrq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15639 }
15640 
15641 __extension__ extern __inline uint8x16_t
15642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x_u8(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)15643 __arm_vorrq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15644 {
15645   return __builtin_mve_vorrq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15646 }
15647 
15648 __extension__ extern __inline uint16x8_t
15649 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x_u16(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)15650 __arm_vorrq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15651 {
15652   return __builtin_mve_vorrq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15653 }
15654 
15655 __extension__ extern __inline uint32x4_t
15656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x_u32(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)15657 __arm_vorrq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15658 {
15659   return __builtin_mve_vorrq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15660 }
15661 
15662 __extension__ extern __inline int8x16_t
15663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_x_s8(int8x16_t __a,mve_pred16_t __p)15664 __arm_vrev16q_x_s8 (int8x16_t __a, mve_pred16_t __p)
15665 {
15666   return __builtin_mve_vrev16q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
15667 }
15668 
15669 __extension__ extern __inline uint8x16_t
15670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_x_u8(uint8x16_t __a,mve_pred16_t __p)15671 __arm_vrev16q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15672 {
15673   return __builtin_mve_vrev16q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
15674 }
15675 
15676 __extension__ extern __inline int8x16_t
15677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x_s8(int8x16_t __a,mve_pred16_t __p)15678 __arm_vrev32q_x_s8 (int8x16_t __a, mve_pred16_t __p)
15679 {
15680   return __builtin_mve_vrev32q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
15681 }
15682 
15683 __extension__ extern __inline int16x8_t
15684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x_s16(int16x8_t __a,mve_pred16_t __p)15685 __arm_vrev32q_x_s16 (int16x8_t __a, mve_pred16_t __p)
15686 {
15687   return __builtin_mve_vrev32q_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
15688 }
15689 
15690 __extension__ extern __inline uint8x16_t
15691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x_u8(uint8x16_t __a,mve_pred16_t __p)15692 __arm_vrev32q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15693 {
15694   return __builtin_mve_vrev32q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
15695 }
15696 
15697 __extension__ extern __inline uint16x8_t
15698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x_u16(uint16x8_t __a,mve_pred16_t __p)15699 __arm_vrev32q_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15700 {
15701   return __builtin_mve_vrev32q_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
15702 }
15703 
15704 __extension__ extern __inline int8x16_t
15705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x_s8(int8x16_t __a,mve_pred16_t __p)15706 __arm_vrev64q_x_s8 (int8x16_t __a, mve_pred16_t __p)
15707 {
15708   return __builtin_mve_vrev64q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
15709 }
15710 
15711 __extension__ extern __inline int16x8_t
15712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x_s16(int16x8_t __a,mve_pred16_t __p)15713 __arm_vrev64q_x_s16 (int16x8_t __a, mve_pred16_t __p)
15714 {
15715   return __builtin_mve_vrev64q_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
15716 }
15717 
15718 __extension__ extern __inline int32x4_t
15719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x_s32(int32x4_t __a,mve_pred16_t __p)15720 __arm_vrev64q_x_s32 (int32x4_t __a, mve_pred16_t __p)
15721 {
15722   return __builtin_mve_vrev64q_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
15723 }
15724 
15725 __extension__ extern __inline uint8x16_t
15726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x_u8(uint8x16_t __a,mve_pred16_t __p)15727 __arm_vrev64q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15728 {
15729   return __builtin_mve_vrev64q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
15730 }
15731 
15732 __extension__ extern __inline uint16x8_t
15733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x_u16(uint16x8_t __a,mve_pred16_t __p)15734 __arm_vrev64q_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15735 {
15736   return __builtin_mve_vrev64q_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
15737 }
15738 
15739 __extension__ extern __inline uint32x4_t
15740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x_u32(uint32x4_t __a,mve_pred16_t __p)15741 __arm_vrev64q_x_u32 (uint32x4_t __a, mve_pred16_t __p)
15742 {
15743   return __builtin_mve_vrev64q_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
15744 }
15745 
15746 __extension__ extern __inline int8x16_t
15747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15748 __arm_vrshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15749 {
15750   return __builtin_mve_vrshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15751 }
15752 
15753 __extension__ extern __inline int16x8_t
15754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15755 __arm_vrshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15756 {
15757   return __builtin_mve_vrshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15758 }
15759 
15760 __extension__ extern __inline int32x4_t
15761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15762 __arm_vrshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15763 {
15764   return __builtin_mve_vrshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15765 }
15766 
15767 __extension__ extern __inline uint8x16_t
15768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x_u8(uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)15769 __arm_vrshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15770 {
15771   return __builtin_mve_vrshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15772 }
15773 
15774 __extension__ extern __inline uint16x8_t
15775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x_u16(uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)15776 __arm_vrshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15777 {
15778   return __builtin_mve_vrshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15779 }
15780 
15781 __extension__ extern __inline uint32x4_t
15782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x_u32(uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)15783 __arm_vrshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15784 {
15785   return __builtin_mve_vrshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15786 }
15787 
15788 __extension__ extern __inline int16x8_t
15789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x_n_s8(int8x16_t __a,const int __imm,mve_pred16_t __p)15790 __arm_vshllbq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15791 {
15792   return __builtin_mve_vshllbq_m_n_sv16qi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15793 }
15794 
15795 __extension__ extern __inline int32x4_t
15796 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x_n_s16(int16x8_t __a,const int __imm,mve_pred16_t __p)15797 __arm_vshllbq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15798 {
15799   return __builtin_mve_vshllbq_m_n_sv8hi (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15800 }
15801 
15802 __extension__ extern __inline uint16x8_t
15803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x_n_u8(uint8x16_t __a,const int __imm,mve_pred16_t __p)15804 __arm_vshllbq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
15805 {
15806   return __builtin_mve_vshllbq_m_n_uv16qi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
15807 }
15808 
15809 __extension__ extern __inline uint32x4_t
15810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x_n_u16(uint16x8_t __a,const int __imm,mve_pred16_t __p)15811 __arm_vshllbq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
15812 {
15813   return __builtin_mve_vshllbq_m_n_uv8hi (__arm_vuninitializedq_u32 (), __a, __imm, __p);
15814 }
15815 
15816 __extension__ extern __inline int16x8_t
15817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_x_n_s8(int8x16_t __a,const int __imm,mve_pred16_t __p)15818 __arm_vshlltq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15819 {
15820   return __builtin_mve_vshlltq_m_n_sv16qi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15821 }
15822 
15823 __extension__ extern __inline int32x4_t
15824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_x_n_s16(int16x8_t __a,const int __imm,mve_pred16_t __p)15825 __arm_vshlltq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15826 {
15827   return __builtin_mve_vshlltq_m_n_sv8hi (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15828 }
15829 
15830 __extension__ extern __inline uint16x8_t
15831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_x_n_u8(uint8x16_t __a,const int __imm,mve_pred16_t __p)15832 __arm_vshlltq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
15833 {
15834   return __builtin_mve_vshlltq_m_n_uv16qi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
15835 }
15836 
15837 __extension__ extern __inline uint32x4_t
15838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_x_n_u16(uint16x8_t __a,const int __imm,mve_pred16_t __p)15839 __arm_vshlltq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
15840 {
15841   return __builtin_mve_vshlltq_m_n_uv8hi (__arm_vuninitializedq_u32 (), __a, __imm, __p);
15842 }
15843 
15844 __extension__ extern __inline int8x16_t
15845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_s8(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)15846 __arm_vshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15847 {
15848   return __builtin_mve_vshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15849 }
15850 
15851 __extension__ extern __inline int16x8_t
15852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_s16(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)15853 __arm_vshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15854 {
15855   return __builtin_mve_vshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15856 }
15857 
15858 __extension__ extern __inline int32x4_t
15859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_s32(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)15860 __arm_vshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15861 {
15862   return __builtin_mve_vshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15863 }
15864 
15865 __extension__ extern __inline uint8x16_t
15866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_u8(uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)15867 __arm_vshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15868 {
15869   return __builtin_mve_vshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15870 }
15871 
15872 __extension__ extern __inline uint16x8_t
15873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_u16(uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)15874 __arm_vshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15875 {
15876   return __builtin_mve_vshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15877 }
15878 
15879 __extension__ extern __inline uint32x4_t
15880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_u32(uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)15881 __arm_vshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15882 {
15883   return __builtin_mve_vshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15884 }
15885 
15886 __extension__ extern __inline int8x16_t
15887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n_s8(int8x16_t __a,const int __imm,mve_pred16_t __p)15888 __arm_vshlq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15889 {
15890   return __builtin_mve_vshlq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p);
15891 }
15892 
15893 __extension__ extern __inline int16x8_t
15894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n_s16(int16x8_t __a,const int __imm,mve_pred16_t __p)15895 __arm_vshlq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15896 {
15897   return __builtin_mve_vshlq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15898 }
15899 
15900 __extension__ extern __inline int32x4_t
15901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n_s32(int32x4_t __a,const int __imm,mve_pred16_t __p)15902 __arm_vshlq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
15903 {
15904   return __builtin_mve_vshlq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15905 }
15906 
15907 __extension__ extern __inline uint8x16_t
15908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n_u8(uint8x16_t __a,const int __imm,mve_pred16_t __p)15909 __arm_vshlq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
15910 {
15911   return __builtin_mve_vshlq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
15912 }
15913 
15914 __extension__ extern __inline uint16x8_t
15915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n_u16(uint16x8_t __a,const int __imm,mve_pred16_t __p)15916 __arm_vshlq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
15917 {
15918   return __builtin_mve_vshlq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
15919 }
15920 
15921 __extension__ extern __inline uint32x4_t
15922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n_u32(uint32x4_t __a,const int __imm,mve_pred16_t __p)15923 __arm_vshlq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
15924 {
15925   return __builtin_mve_vshlq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
15926 }
15927 
15928 __extension__ extern __inline int8x16_t
15929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x_n_s8(int8x16_t __a,const int __imm,mve_pred16_t __p)15930 __arm_vrshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15931 {
15932   return __builtin_mve_vrshrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p);
15933 }
15934 
15935 __extension__ extern __inline int16x8_t
15936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x_n_s16(int16x8_t __a,const int __imm,mve_pred16_t __p)15937 __arm_vrshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15938 {
15939   return __builtin_mve_vrshrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15940 }
15941 
15942 __extension__ extern __inline int32x4_t
15943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x_n_s32(int32x4_t __a,const int __imm,mve_pred16_t __p)15944 __arm_vrshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
15945 {
15946   return __builtin_mve_vrshrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15947 }
15948 
15949 __extension__ extern __inline uint8x16_t
15950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x_n_u8(uint8x16_t __a,const int __imm,mve_pred16_t __p)15951 __arm_vrshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
15952 {
15953   return __builtin_mve_vrshrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
15954 }
15955 
15956 __extension__ extern __inline uint16x8_t
15957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x_n_u16(uint16x8_t __a,const int __imm,mve_pred16_t __p)15958 __arm_vrshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
15959 {
15960   return __builtin_mve_vrshrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
15961 }
15962 
15963 __extension__ extern __inline uint32x4_t
15964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x_n_u32(uint32x4_t __a,const int __imm,mve_pred16_t __p)15965 __arm_vrshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
15966 {
15967   return __builtin_mve_vrshrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
15968 }
15969 
15970 __extension__ extern __inline int8x16_t
15971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x_n_s8(int8x16_t __a,const int __imm,mve_pred16_t __p)15972 __arm_vshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15973 {
15974   return __builtin_mve_vshrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p);
15975 }
15976 
15977 __extension__ extern __inline int16x8_t
15978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x_n_s16(int16x8_t __a,const int __imm,mve_pred16_t __p)15979 __arm_vshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15980 {
15981   return __builtin_mve_vshrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15982 }
15983 
15984 __extension__ extern __inline int32x4_t
15985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x_n_s32(int32x4_t __a,const int __imm,mve_pred16_t __p)15986 __arm_vshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
15987 {
15988   return __builtin_mve_vshrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15989 }
15990 
15991 __extension__ extern __inline uint8x16_t
15992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x_n_u8(uint8x16_t __a,const int __imm,mve_pred16_t __p)15993 __arm_vshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
15994 {
15995   return __builtin_mve_vshrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
15996 }
15997 
15998 __extension__ extern __inline uint16x8_t
15999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x_n_u16(uint16x8_t __a,const int __imm,mve_pred16_t __p)16000 __arm_vshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
16001 {
16002   return __builtin_mve_vshrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
16003 }
16004 
16005 __extension__ extern __inline uint32x4_t
16006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x_n_u32(uint32x4_t __a,const int __imm,mve_pred16_t __p)16007 __arm_vshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
16008 {
16009   return __builtin_mve_vshrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
16010 }
16011 
16012 __extension__ extern __inline int32x4_t
16013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadciq_s32(int32x4_t __a,int32x4_t __b,unsigned * __carry_out)16014 __arm_vadciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
16015 {
16016   int32x4_t __res = __builtin_mve_vadciq_sv4si (__a, __b);
16017   *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16018   return __res;
16019 }
16020 
16021 __extension__ extern __inline uint32x4_t
16022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadciq_u32(uint32x4_t __a,uint32x4_t __b,unsigned * __carry_out)16023 __arm_vadciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
16024 {
16025   uint32x4_t __res = __builtin_mve_vadciq_uv4si (__a, __b);
16026   *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16027   return __res;
16028 }
16029 
16030 __extension__ extern __inline int32x4_t
16031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadciq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,unsigned * __carry_out,mve_pred16_t __p)16032 __arm_vadciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
16033 {
16034   int32x4_t __res =  __builtin_mve_vadciq_m_sv4si (__inactive, __a, __b, __p);
16035   *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16036   return __res;
16037 }
16038 
16039 __extension__ extern __inline uint32x4_t
16040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadciq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,unsigned * __carry_out,mve_pred16_t __p)16041 __arm_vadciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
16042 {
16043   uint32x4_t __res = __builtin_mve_vadciq_m_uv4si (__inactive, __a, __b, __p);
16044   *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16045   return __res;
16046 }
16047 
16048 __extension__ extern __inline int32x4_t
16049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_s32(int32x4_t __a,int32x4_t __b,unsigned * __carry)16050 __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
16051 {
16052   __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16053   int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b);
16054   *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16055   return __res;
16056 }
16057 
16058 __extension__ extern __inline uint32x4_t
16059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_u32(uint32x4_t __a,uint32x4_t __b,unsigned * __carry)16060 __arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
16061 {
16062   __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16063   uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b);
16064   *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16065   return __res;
16066 }
16067 
16068 __extension__ extern __inline int32x4_t
16069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,unsigned * __carry,mve_pred16_t __p)16070 __arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
16071 {
16072   __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16073   int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p);
16074   *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16075   return __res;
16076 }
16077 
16078 __extension__ extern __inline uint32x4_t
16079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,unsigned * __carry,mve_pred16_t __p)16080 __arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
16081 {
16082   __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16083   uint32x4_t __res =  __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b, __p);
16084   *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16085   return __res;
16086 }
16087 
16088 __extension__ extern __inline int32x4_t
16089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbciq_s32(int32x4_t __a,int32x4_t __b,unsigned * __carry_out)16090 __arm_vsbciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
16091 {
16092   int32x4_t __res = __builtin_mve_vsbciq_sv4si (__a, __b);
16093   *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16094   return __res;
16095 }
16096 
16097 __extension__ extern __inline uint32x4_t
16098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbciq_u32(uint32x4_t __a,uint32x4_t __b,unsigned * __carry_out)16099 __arm_vsbciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
16100 {
16101   uint32x4_t __res = __builtin_mve_vsbciq_uv4si (__a, __b);
16102   *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16103   return __res;
16104 }
16105 
16106 __extension__ extern __inline int32x4_t
16107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbciq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,unsigned * __carry_out,mve_pred16_t __p)16108 __arm_vsbciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
16109 {
16110   int32x4_t __res = __builtin_mve_vsbciq_m_sv4si (__inactive, __a, __b, __p);
16111   *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16112   return __res;
16113 }
16114 
16115 __extension__ extern __inline uint32x4_t
16116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbciq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,unsigned * __carry_out,mve_pred16_t __p)16117 __arm_vsbciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
16118 {
16119   uint32x4_t __res = __builtin_mve_vsbciq_m_uv4si (__inactive, __a, __b, __p);
16120   *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16121   return __res;
16122 }
16123 
16124 __extension__ extern __inline int32x4_t
16125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_s32(int32x4_t __a,int32x4_t __b,unsigned * __carry)16126 __arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
16127 {
16128   __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16129   int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b);
16130   *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16131   return __res;
16132 }
16133 
16134 __extension__ extern __inline uint32x4_t
16135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_u32(uint32x4_t __a,uint32x4_t __b,unsigned * __carry)16136 __arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
16137 {
16138   __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16139   uint32x4_t __res =  __builtin_mve_vsbcq_uv4si (__a, __b);
16140   *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16141   return __res;
16142 }
16143 
16144 __extension__ extern __inline int32x4_t
16145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_m_s32(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,unsigned * __carry,mve_pred16_t __p)16146 __arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
16147 {
16148   __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16149   int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p);
16150   *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16151   return __res;
16152 }
16153 
16154 __extension__ extern __inline uint32x4_t
16155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_m_u32(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,unsigned * __carry,mve_pred16_t __p)16156 __arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
16157 {
16158   __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16159   uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b, __p);
16160   *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16161   return __res;
16162 }
16163 
16164 __extension__ extern __inline void
16165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p_u8(uint8_t * __addr,uint8x16_t __value,mve_pred16_t __p)16166 __arm_vst1q_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
16167 {
16168   return vstrbq_p_u8 (__addr, __value, __p);
16169 }
16170 
16171 __extension__ extern __inline void
16172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p_s8(int8_t * __addr,int8x16_t __value,mve_pred16_t __p)16173 __arm_vst1q_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
16174 {
16175   return vstrbq_p_s8 (__addr, __value, __p);
16176 }
16177 
16178 __extension__ extern __inline void
16179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q_s8(int8_t * __addr,int8x16x2_t __value)16180 __arm_vst2q_s8 (int8_t * __addr, int8x16x2_t __value)
16181 {
16182   union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
16183   __rv.__i = __value;
16184   __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
16185 }
16186 
16187 __extension__ extern __inline void
16188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q_u8(uint8_t * __addr,uint8x16x2_t __value)16189 __arm_vst2q_u8 (uint8_t * __addr, uint8x16x2_t __value)
16190 {
16191   union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
16192   __rv.__i = __value;
16193   __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
16194 }
16195 
16196 __extension__ extern __inline uint8x16_t
16197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z_u8(uint8_t const * __base,mve_pred16_t __p)16198 __arm_vld1q_z_u8 (uint8_t const *__base, mve_pred16_t __p)
16199 {
16200   return vldrbq_z_u8 ( __base, __p);
16201 }
16202 
16203 __extension__ extern __inline int8x16_t
16204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z_s8(int8_t const * __base,mve_pred16_t __p)16205 __arm_vld1q_z_s8 (int8_t const *__base, mve_pred16_t __p)
16206 {
16207   return vldrbq_z_s8 ( __base, __p);
16208 }
16209 
16210 __extension__ extern __inline int8x16x2_t
16211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q_s8(int8_t const * __addr)16212 __arm_vld2q_s8 (int8_t const * __addr)
16213 {
16214   union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
16215   __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr);
16216   return __rv.__i;
16217 }
16218 
16219 __extension__ extern __inline uint8x16x2_t
16220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q_u8(uint8_t const * __addr)16221 __arm_vld2q_u8 (uint8_t const * __addr)
16222 {
16223   union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
16224   __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr);
16225   return __rv.__i;
16226 }
16227 
16228 __extension__ extern __inline int8x16x4_t
16229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q_s8(int8_t const * __addr)16230 __arm_vld4q_s8 (int8_t const * __addr)
16231 {
16232   union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
16233   __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr);
16234   return __rv.__i;
16235 }
16236 
16237 __extension__ extern __inline uint8x16x4_t
16238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q_u8(uint8_t const * __addr)16239 __arm_vld4q_u8 (uint8_t const * __addr)
16240 {
16241   union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
16242   __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr);
16243   return __rv.__i;
16244 }
16245 
16246 __extension__ extern __inline void
16247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p_u16(uint16_t * __addr,uint16x8_t __value,mve_pred16_t __p)16248 __arm_vst1q_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
16249 {
16250   return vstrhq_p_u16 (__addr, __value, __p);
16251 }
16252 
16253 __extension__ extern __inline void
16254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p_s16(int16_t * __addr,int16x8_t __value,mve_pred16_t __p)16255 __arm_vst1q_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
16256 {
16257   return vstrhq_p_s16 (__addr, __value, __p);
16258 }
16259 
16260 __extension__ extern __inline void
16261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q_s16(int16_t * __addr,int16x8x2_t __value)16262 __arm_vst2q_s16 (int16_t * __addr, int16x8x2_t __value)
16263 {
16264   union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
16265   __rv.__i = __value;
16266   __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
16267 }
16268 
16269 __extension__ extern __inline void
16270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q_u16(uint16_t * __addr,uint16x8x2_t __value)16271 __arm_vst2q_u16 (uint16_t * __addr, uint16x8x2_t __value)
16272 {
16273   union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
16274   __rv.__i = __value;
16275   __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
16276 }
16277 
16278 __extension__ extern __inline uint16x8_t
16279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z_u16(uint16_t const * __base,mve_pred16_t __p)16280 __arm_vld1q_z_u16 (uint16_t const *__base, mve_pred16_t __p)
16281 {
16282   return vldrhq_z_u16 ( __base, __p);
16283 }
16284 
16285 __extension__ extern __inline int16x8_t
16286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z_s16(int16_t const * __base,mve_pred16_t __p)16287 __arm_vld1q_z_s16 (int16_t const *__base, mve_pred16_t __p)
16288 {
16289   return vldrhq_z_s16 ( __base, __p);
16290 }
16291 
16292 __extension__ extern __inline int16x8x2_t
16293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q_s16(int16_t const * __addr)16294 __arm_vld2q_s16 (int16_t const * __addr)
16295 {
16296   union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
16297   __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr);
16298   return __rv.__i;
16299 }
16300 
16301 __extension__ extern __inline uint16x8x2_t
16302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q_u16(uint16_t const * __addr)16303 __arm_vld2q_u16 (uint16_t const * __addr)
16304 {
16305   union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
16306   __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr);
16307   return __rv.__i;
16308 }
16309 
16310 __extension__ extern __inline int16x8x4_t
16311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q_s16(int16_t const * __addr)16312 __arm_vld4q_s16 (int16_t const * __addr)
16313 {
16314   union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
16315   __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr);
16316   return __rv.__i;
16317 }
16318 
16319 __extension__ extern __inline uint16x8x4_t
16320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q_u16(uint16_t const * __addr)16321 __arm_vld4q_u16 (uint16_t const * __addr)
16322 {
16323   union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
16324   __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr);
16325   return __rv.__i;
16326 }
16327 
16328 __extension__ extern __inline void
16329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p_u32(uint32_t * __addr,uint32x4_t __value,mve_pred16_t __p)16330 __arm_vst1q_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
16331 {
16332   return vstrwq_p_u32 (__addr, __value, __p);
16333 }
16334 
16335 __extension__ extern __inline void
16336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p_s32(int32_t * __addr,int32x4_t __value,mve_pred16_t __p)16337 __arm_vst1q_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
16338 {
16339   return vstrwq_p_s32 (__addr, __value, __p);
16340 }
16341 
16342 __extension__ extern __inline void
16343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q_s32(int32_t * __addr,int32x4x2_t __value)16344 __arm_vst2q_s32 (int32_t * __addr, int32x4x2_t __value)
16345 {
16346   union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
16347   __rv.__i = __value;
16348   __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o);
16349 }
16350 
16351 __extension__ extern __inline void
16352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q_u32(uint32_t * __addr,uint32x4x2_t __value)16353 __arm_vst2q_u32 (uint32_t * __addr, uint32x4x2_t __value)
16354 {
16355   union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
16356   __rv.__i = __value;
16357   __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o);
16358 }
16359 
16360 __extension__ extern __inline uint32x4_t
16361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z_u32(uint32_t const * __base,mve_pred16_t __p)16362 __arm_vld1q_z_u32 (uint32_t const *__base, mve_pred16_t __p)
16363 {
16364   return vldrwq_z_u32 ( __base, __p);
16365 }
16366 
16367 __extension__ extern __inline int32x4_t
16368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z_s32(int32_t const * __base,mve_pred16_t __p)16369 __arm_vld1q_z_s32 (int32_t const *__base, mve_pred16_t __p)
16370 {
16371   return vldrwq_z_s32 ( __base, __p);
16372 }
16373 
16374 __extension__ extern __inline int32x4x2_t
16375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q_s32(int32_t const * __addr)16376 __arm_vld2q_s32 (int32_t const * __addr)
16377 {
16378   union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
16379   __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr);
16380   return __rv.__i;
16381 }
16382 
16383 __extension__ extern __inline uint32x4x2_t
16384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q_u32(uint32_t const * __addr)16385 __arm_vld2q_u32 (uint32_t const * __addr)
16386 {
16387   union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
16388   __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr);
16389   return __rv.__i;
16390 }
16391 
16392 __extension__ extern __inline int32x4x4_t
16393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q_s32(int32_t const * __addr)16394 __arm_vld4q_s32 (int32_t const * __addr)
16395 {
16396   union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
16397   __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr);
16398   return __rv.__i;
16399 }
16400 
16401 __extension__ extern __inline uint32x4x4_t
16402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q_u32(uint32_t const * __addr)16403 __arm_vld4q_u32 (uint32_t const * __addr)
16404 {
16405   union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
16406   __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr);
16407   return __rv.__i;
16408 }
16409 
16410 __extension__ extern __inline int16x8_t
16411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_s16(int16_t __a,int16x8_t __b,const int __idx)16412 __arm_vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __idx)
16413 {
16414   __ARM_CHECK_LANEQ (__b, __idx);
16415   __b[__ARM_LANEQ(__b,__idx)] = __a;
16416   return __b;
16417 }
16418 
16419 __extension__ extern __inline int32x4_t
16420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_s32(int32_t __a,int32x4_t __b,const int __idx)16421 __arm_vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __idx)
16422 {
16423   __ARM_CHECK_LANEQ (__b, __idx);
16424   __b[__ARM_LANEQ(__b,__idx)] = __a;
16425   return __b;
16426 }
16427 
16428 __extension__ extern __inline int8x16_t
16429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_s8(int8_t __a,int8x16_t __b,const int __idx)16430 __arm_vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __idx)
16431 {
16432   __ARM_CHECK_LANEQ (__b, __idx);
16433   __b[__ARM_LANEQ(__b,__idx)] = __a;
16434   return __b;
16435 }
16436 
16437 __extension__ extern __inline int64x2_t
16438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_s64(int64_t __a,int64x2_t __b,const int __idx)16439 __arm_vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __idx)
16440 {
16441   __ARM_CHECK_LANEQ (__b, __idx);
16442   __b[__ARM_LANEQ(__b,__idx)] = __a;
16443   return __b;
16444 }
16445 
16446 __extension__ extern __inline uint8x16_t
16447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_u8(uint8_t __a,uint8x16_t __b,const int __idx)16448 __arm_vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __idx)
16449 {
16450   __ARM_CHECK_LANEQ (__b, __idx);
16451   __b[__ARM_LANEQ(__b,__idx)] = __a;
16452   return __b;
16453 }
16454 
16455 __extension__ extern __inline uint16x8_t
16456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_u16(uint16_t __a,uint16x8_t __b,const int __idx)16457 __arm_vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __idx)
16458 {
16459   __ARM_CHECK_LANEQ (__b, __idx);
16460   __b[__ARM_LANEQ(__b,__idx)] = __a;
16461   return __b;
16462 }
16463 
16464 __extension__ extern __inline uint32x4_t
16465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_u32(uint32_t __a,uint32x4_t __b,const int __idx)16466 __arm_vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __idx)
16467 {
16468   __ARM_CHECK_LANEQ (__b, __idx);
16469   __b[__ARM_LANEQ(__b,__idx)] = __a;
16470   return __b;
16471 }
16472 
16473 __extension__ extern __inline uint64x2_t
16474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_u64(uint64_t __a,uint64x2_t __b,const int __idx)16475 __arm_vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __idx)
16476 {
16477   __ARM_CHECK_LANEQ (__b, __idx);
16478   __b[__ARM_LANEQ(__b,__idx)] = __a;
16479   return __b;
16480 }
16481 
16482 __extension__ extern __inline int16_t
16483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_s16(int16x8_t __a,const int __idx)16484 __arm_vgetq_lane_s16 (int16x8_t __a, const int __idx)
16485 {
16486   __ARM_CHECK_LANEQ (__a, __idx);
16487   return __a[__ARM_LANEQ(__a,__idx)];
16488 }
16489 
16490 __extension__ extern __inline int32_t
16491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_s32(int32x4_t __a,const int __idx)16492 __arm_vgetq_lane_s32 (int32x4_t __a, const int __idx)
16493 {
16494   __ARM_CHECK_LANEQ (__a, __idx);
16495   return __a[__ARM_LANEQ(__a,__idx)];
16496 }
16497 
16498 __extension__ extern __inline int8_t
16499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_s8(int8x16_t __a,const int __idx)16500 __arm_vgetq_lane_s8 (int8x16_t __a, const int __idx)
16501 {
16502   __ARM_CHECK_LANEQ (__a, __idx);
16503   return __a[__ARM_LANEQ(__a,__idx)];
16504 }
16505 
16506 __extension__ extern __inline int64_t
16507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_s64(int64x2_t __a,const int __idx)16508 __arm_vgetq_lane_s64 (int64x2_t __a, const int __idx)
16509 {
16510   __ARM_CHECK_LANEQ (__a, __idx);
16511   return __a[__ARM_LANEQ(__a,__idx)];
16512 }
16513 
16514 __extension__ extern __inline uint8_t
16515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_u8(uint8x16_t __a,const int __idx)16516 __arm_vgetq_lane_u8 (uint8x16_t __a, const int __idx)
16517 {
16518   __ARM_CHECK_LANEQ (__a, __idx);
16519   return __a[__ARM_LANEQ(__a,__idx)];
16520 }
16521 
16522 __extension__ extern __inline uint16_t
16523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_u16(uint16x8_t __a,const int __idx)16524 __arm_vgetq_lane_u16 (uint16x8_t __a, const int __idx)
16525 {
16526   __ARM_CHECK_LANEQ (__a, __idx);
16527   return __a[__ARM_LANEQ(__a,__idx)];
16528 }
16529 
16530 __extension__ extern __inline uint32_t
16531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_u32(uint32x4_t __a,const int __idx)16532 __arm_vgetq_lane_u32 (uint32x4_t __a, const int __idx)
16533 {
16534   __ARM_CHECK_LANEQ (__a, __idx);
16535   return __a[__ARM_LANEQ(__a,__idx)];
16536 }
16537 
16538 __extension__ extern __inline uint64_t
16539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_u64(uint64x2_t __a,const int __idx)16540 __arm_vgetq_lane_u64 (uint64x2_t __a, const int __idx)
16541 {
16542   __ARM_CHECK_LANEQ (__a, __idx);
16543   return __a[__ARM_LANEQ(__a,__idx)];
16544 }
16545 
16546 __extension__ extern __inline  uint64_t
16547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_lsll(uint64_t value,int32_t shift)16548 __arm_lsll (uint64_t value, int32_t shift)
16549 {
16550   return (value << shift);
16551 }
16552 
16553 __extension__ extern __inline int64_t
16554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_asrl(int64_t value,int32_t shift)16555 __arm_asrl (int64_t value, int32_t shift)
16556 {
16557   return (value >> shift);
16558 }
16559 
16560 __extension__ extern __inline uint64_t
16561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_uqrshll(uint64_t value,int32_t shift)16562 __arm_uqrshll (uint64_t value, int32_t shift)
16563 {
16564   return __builtin_mve_uqrshll_sat64_di (value, shift);
16565 }
16566 
16567 __extension__ extern __inline uint64_t
16568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_uqrshll_sat48(uint64_t value,int32_t shift)16569 __arm_uqrshll_sat48 (uint64_t value, int32_t shift)
16570 {
16571   return __builtin_mve_uqrshll_sat48_di (value, shift);
16572 }
16573 
16574 __extension__ extern __inline int64_t
16575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_sqrshrl(int64_t value,int32_t shift)16576 __arm_sqrshrl (int64_t value, int32_t shift)
16577 {
16578   return __builtin_mve_sqrshrl_sat64_di (value, shift);
16579 }
16580 
16581 __extension__ extern __inline int64_t
16582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_sqrshrl_sat48(int64_t value,int32_t shift)16583 __arm_sqrshrl_sat48 (int64_t value, int32_t shift)
16584 {
16585   return __builtin_mve_sqrshrl_sat48_di (value, shift);
16586 }
16587 
16588 __extension__ extern __inline uint64_t
16589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_uqshll(uint64_t value,const int shift)16590 __arm_uqshll (uint64_t value, const int shift)
16591 {
16592   return __builtin_mve_uqshll_di (value, shift);
16593 }
16594 
16595 __extension__ extern __inline uint64_t
16596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_urshrl(uint64_t value,const int shift)16597 __arm_urshrl (uint64_t value, const int shift)
16598 {
16599   return __builtin_mve_urshrl_di (value, shift);
16600 }
16601 
16602 __extension__ extern __inline int64_t
16603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_srshrl(int64_t value,const int shift)16604 __arm_srshrl (int64_t value, const int shift)
16605 {
16606   return __builtin_mve_srshrl_di (value, shift);
16607 }
16608 
16609 __extension__ extern __inline int64_t
16610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_sqshll(int64_t value,const int shift)16611 __arm_sqshll (int64_t value, const int shift)
16612 {
16613   return __builtin_mve_sqshll_di (value, shift);
16614 }
16615 
16616 __extension__ extern __inline uint32_t
16617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_uqrshl(uint32_t value,int32_t shift)16618 __arm_uqrshl (uint32_t value, int32_t shift)
16619 {
16620   return __builtin_mve_uqrshl_si (value, shift);
16621 }
16622 
16623 __extension__ extern __inline int32_t
16624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_sqrshr(int32_t value,int32_t shift)16625 __arm_sqrshr (int32_t value, int32_t shift)
16626 {
16627   return __builtin_mve_sqrshr_si (value, shift);
16628 }
16629 
16630 __extension__ extern __inline uint32_t
16631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_uqshl(uint32_t value,const int shift)16632 __arm_uqshl (uint32_t value, const int shift)
16633 {
16634   return  __builtin_mve_uqshl_si (value, shift);
16635 }
16636 
16637 __extension__ extern __inline uint32_t
16638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_urshr(uint32_t value,const int shift)16639 __arm_urshr (uint32_t value, const int shift)
16640 {
16641   return __builtin_mve_urshr_si (value, shift);
16642 }
16643 
16644 __extension__ extern __inline int32_t
16645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_sqshl(int32_t value,const int shift)16646 __arm_sqshl (int32_t value, const int shift)
16647 {
16648   return __builtin_mve_sqshl_si (value, shift);
16649 }
16650 
16651 __extension__ extern __inline int32_t
16652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_srshr(int32_t value,const int shift)16653 __arm_srshr (int32_t value, const int shift)
16654 {
16655   return __builtin_mve_srshr_si (value, shift);
16656 }
16657 
16658 __extension__ extern __inline int8x16_t
16659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m_s8(int8x16_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)16660 __arm_vshlcq_m_s8 (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16661 {
16662   int8x16_t __res = __builtin_mve_vshlcq_m_vec_sv16qi (__a, *__b, __imm, __p);
16663   *__b = __builtin_mve_vshlcq_m_carry_sv16qi (__a, *__b, __imm, __p);
16664   return __res;
16665 }
16666 
16667 __extension__ extern __inline uint8x16_t
16668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m_u8(uint8x16_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)16669 __arm_vshlcq_m_u8 (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16670 {
16671   uint8x16_t __res = __builtin_mve_vshlcq_m_vec_uv16qi (__a, *__b, __imm, __p);
16672   *__b = __builtin_mve_vshlcq_m_carry_uv16qi (__a, *__b, __imm, __p);
16673   return __res;
16674 }
16675 
16676 __extension__ extern __inline int16x8_t
16677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m_s16(int16x8_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)16678 __arm_vshlcq_m_s16 (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16679 {
16680   int16x8_t __res = __builtin_mve_vshlcq_m_vec_sv8hi (__a, *__b, __imm, __p);
16681   *__b = __builtin_mve_vshlcq_m_carry_sv8hi (__a, *__b, __imm, __p);
16682   return __res;
16683 }
16684 
16685 __extension__ extern __inline uint16x8_t
16686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m_u16(uint16x8_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)16687 __arm_vshlcq_m_u16 (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16688 {
16689   uint16x8_t __res = __builtin_mve_vshlcq_m_vec_uv8hi (__a, *__b, __imm, __p);
16690   *__b = __builtin_mve_vshlcq_m_carry_uv8hi (__a, *__b, __imm, __p);
16691   return __res;
16692 }
16693 
16694 __extension__ extern __inline int32x4_t
16695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m_s32(int32x4_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)16696 __arm_vshlcq_m_s32 (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16697 {
16698   int32x4_t __res = __builtin_mve_vshlcq_m_vec_sv4si (__a, *__b, __imm, __p);
16699   *__b = __builtin_mve_vshlcq_m_carry_sv4si (__a, *__b, __imm, __p);
16700   return __res;
16701 }
16702 
16703 __extension__ extern __inline uint32x4_t
16704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m_u32(uint32x4_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)16705 __arm_vshlcq_m_u32 (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16706 {
16707   uint32x4_t __res = __builtin_mve_vshlcq_m_vec_uv4si (__a, *__b, __imm, __p);
16708   *__b = __builtin_mve_vshlcq_m_carry_uv4si (__a, *__b, __imm, __p);
16709   return __res;
16710 }
16711 
16712 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point.  */
16713 
16714 __extension__ extern __inline void
16715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q_f16(float16_t * __addr,float16x8x4_t __value)16716 __arm_vst4q_f16 (float16_t * __addr, float16x8x4_t __value)
16717 {
16718   union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv;
16719   __rv.__i = __value;
16720   __builtin_mve_vst4qv8hf (__addr, __rv.__o);
16721 }
16722 
16723 __extension__ extern __inline void
16724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q_f32(float32_t * __addr,float32x4x4_t __value)16725 __arm_vst4q_f32 (float32_t * __addr, float32x4x4_t __value)
16726 {
16727   union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
16728   __rv.__i = __value;
16729   __builtin_mve_vst4qv4sf (__addr, __rv.__o);
16730 }
16731 
16732 __extension__ extern __inline float16x8_t
16733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_f16(float16x8_t __a)16734 __arm_vrndxq_f16 (float16x8_t __a)
16735 {
16736   return __builtin_mve_vrndxq_fv8hf (__a);
16737 }
16738 
16739 __extension__ extern __inline float32x4_t
16740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_f32(float32x4_t __a)16741 __arm_vrndxq_f32 (float32x4_t __a)
16742 {
16743   return __builtin_mve_vrndxq_fv4sf (__a);
16744 }
16745 
16746 __extension__ extern __inline float16x8_t
16747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_f16(float16x8_t __a)16748 __arm_vrndq_f16 (float16x8_t __a)
16749 {
16750   return __builtin_mve_vrndq_fv8hf (__a);
16751 }
16752 
16753 __extension__ extern __inline float32x4_t
16754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_f32(float32x4_t __a)16755 __arm_vrndq_f32 (float32x4_t __a)
16756 {
16757   return __builtin_mve_vrndq_fv4sf (__a);
16758 }
16759 
16760 __extension__ extern __inline float16x8_t
16761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_f16(float16x8_t __a)16762 __arm_vrndpq_f16 (float16x8_t __a)
16763 {
16764   return __builtin_mve_vrndpq_fv8hf (__a);
16765 }
16766 
16767 __extension__ extern __inline float32x4_t
16768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_f32(float32x4_t __a)16769 __arm_vrndpq_f32 (float32x4_t __a)
16770 {
16771   return __builtin_mve_vrndpq_fv4sf (__a);
16772 }
16773 
16774 __extension__ extern __inline float16x8_t
16775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_f16(float16x8_t __a)16776 __arm_vrndnq_f16 (float16x8_t __a)
16777 {
16778   return __builtin_mve_vrndnq_fv8hf (__a);
16779 }
16780 
16781 __extension__ extern __inline float32x4_t
16782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_f32(float32x4_t __a)16783 __arm_vrndnq_f32 (float32x4_t __a)
16784 {
16785   return __builtin_mve_vrndnq_fv4sf (__a);
16786 }
16787 
16788 __extension__ extern __inline float16x8_t
16789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_f16(float16x8_t __a)16790 __arm_vrndmq_f16 (float16x8_t __a)
16791 {
16792   return __builtin_mve_vrndmq_fv8hf (__a);
16793 }
16794 
16795 __extension__ extern __inline float32x4_t
16796 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_f32(float32x4_t __a)16797 __arm_vrndmq_f32 (float32x4_t __a)
16798 {
16799   return __builtin_mve_vrndmq_fv4sf (__a);
16800 }
16801 
16802 __extension__ extern __inline float16x8_t
16803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_f16(float16x8_t __a)16804 __arm_vrndaq_f16 (float16x8_t __a)
16805 {
16806   return __builtin_mve_vrndaq_fv8hf (__a);
16807 }
16808 
16809 __extension__ extern __inline float32x4_t
16810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_f32(float32x4_t __a)16811 __arm_vrndaq_f32 (float32x4_t __a)
16812 {
16813   return __builtin_mve_vrndaq_fv4sf (__a);
16814 }
16815 
16816 __extension__ extern __inline float16x8_t
16817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_f16(float16x8_t __a)16818 __arm_vrev64q_f16 (float16x8_t __a)
16819 {
16820   return __builtin_mve_vrev64q_fv8hf (__a);
16821 }
16822 
16823 __extension__ extern __inline float32x4_t
16824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_f32(float32x4_t __a)16825 __arm_vrev64q_f32 (float32x4_t __a)
16826 {
16827   return __builtin_mve_vrev64q_fv4sf (__a);
16828 }
16829 
16830 __extension__ extern __inline float16x8_t
16831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_f16(float16x8_t __a)16832 __arm_vnegq_f16 (float16x8_t __a)
16833 {
16834   return __builtin_mve_vnegq_fv8hf (__a);
16835 }
16836 
16837 __extension__ extern __inline float32x4_t
16838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_f32(float32x4_t __a)16839 __arm_vnegq_f32 (float32x4_t __a)
16840 {
16841   return __builtin_mve_vnegq_fv4sf (__a);
16842 }
16843 
16844 __extension__ extern __inline float16x8_t
16845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n_f16(float16_t __a)16846 __arm_vdupq_n_f16 (float16_t __a)
16847 {
16848   return __builtin_mve_vdupq_n_fv8hf (__a);
16849 }
16850 
16851 __extension__ extern __inline float32x4_t
16852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n_f32(float32_t __a)16853 __arm_vdupq_n_f32 (float32_t __a)
16854 {
16855   return __builtin_mve_vdupq_n_fv4sf (__a);
16856 }
16857 
16858 __extension__ extern __inline float16x8_t
16859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_f16(float16x8_t __a)16860 __arm_vabsq_f16 (float16x8_t __a)
16861 {
16862   return __builtin_mve_vabsq_fv8hf (__a);
16863 }
16864 
16865 __extension__ extern __inline float32x4_t
16866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_f32(float32x4_t __a)16867 __arm_vabsq_f32 (float32x4_t __a)
16868 {
16869   return __builtin_mve_vabsq_fv4sf (__a);
16870 }
16871 
16872 __extension__ extern __inline float16x8_t
16873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_f16(float16x8_t __a)16874 __arm_vrev32q_f16 (float16x8_t __a)
16875 {
16876   return __builtin_mve_vrev32q_fv8hf (__a);
16877 }
16878 
16879 __extension__ extern __inline float32x4_t
16880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvttq_f32_f16(float16x8_t __a)16881 __arm_vcvttq_f32_f16 (float16x8_t __a)
16882 {
16883   return __builtin_mve_vcvttq_f32_f16v4sf (__a);
16884 }
16885 
16886 __extension__ extern __inline float32x4_t
16887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtbq_f32_f16(float16x8_t __a)16888 __arm_vcvtbq_f32_f16 (float16x8_t __a)
16889 {
16890   return __builtin_mve_vcvtbq_f32_f16v4sf (__a);
16891 }
16892 
16893 __extension__ extern __inline float16x8_t
16894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_f16_s16(int16x8_t __a)16895 __arm_vcvtq_f16_s16 (int16x8_t __a)
16896 {
16897   return __builtin_mve_vcvtq_to_f_sv8hf (__a);
16898 }
16899 
16900 __extension__ extern __inline float32x4_t
16901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_f32_s32(int32x4_t __a)16902 __arm_vcvtq_f32_s32 (int32x4_t __a)
16903 {
16904   return __builtin_mve_vcvtq_to_f_sv4sf (__a);
16905 }
16906 
16907 __extension__ extern __inline float16x8_t
16908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_f16_u16(uint16x8_t __a)16909 __arm_vcvtq_f16_u16 (uint16x8_t __a)
16910 {
16911   return __builtin_mve_vcvtq_to_f_uv8hf (__a);
16912 }
16913 
16914 __extension__ extern __inline float32x4_t
16915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_f32_u32(uint32x4_t __a)16916 __arm_vcvtq_f32_u32 (uint32x4_t __a)
16917 {
16918   return __builtin_mve_vcvtq_to_f_uv4sf (__a);
16919 }
16920 
16921 __extension__ extern __inline int16x8_t
16922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_s16_f16(float16x8_t __a)16923 __arm_vcvtq_s16_f16 (float16x8_t __a)
16924 {
16925   return __builtin_mve_vcvtq_from_f_sv8hi (__a);
16926 }
16927 
16928 __extension__ extern __inline int32x4_t
16929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_s32_f32(float32x4_t __a)16930 __arm_vcvtq_s32_f32 (float32x4_t __a)
16931 {
16932   return __builtin_mve_vcvtq_from_f_sv4si (__a);
16933 }
16934 
16935 __extension__ extern __inline uint16x8_t
16936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_u16_f16(float16x8_t __a)16937 __arm_vcvtq_u16_f16 (float16x8_t __a)
16938 {
16939   return __builtin_mve_vcvtq_from_f_uv8hi (__a);
16940 }
16941 
16942 __extension__ extern __inline uint32x4_t
16943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_u32_f32(float32x4_t __a)16944 __arm_vcvtq_u32_f32 (float32x4_t __a)
16945 {
16946   return __builtin_mve_vcvtq_from_f_uv4si (__a);
16947 }
16948 
16949 __extension__ extern __inline uint16x8_t
16950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_u16_f16(float16x8_t __a)16951 __arm_vcvtpq_u16_f16 (float16x8_t __a)
16952 {
16953   return __builtin_mve_vcvtpq_uv8hi (__a);
16954 }
16955 
16956 __extension__ extern __inline uint32x4_t
16957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_u32_f32(float32x4_t __a)16958 __arm_vcvtpq_u32_f32 (float32x4_t __a)
16959 {
16960   return __builtin_mve_vcvtpq_uv4si (__a);
16961 }
16962 
16963 __extension__ extern __inline uint16x8_t
16964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_u16_f16(float16x8_t __a)16965 __arm_vcvtnq_u16_f16 (float16x8_t __a)
16966 {
16967   return __builtin_mve_vcvtnq_uv8hi (__a);
16968 }
16969 
16970 __extension__ extern __inline uint32x4_t
16971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_u32_f32(float32x4_t __a)16972 __arm_vcvtnq_u32_f32 (float32x4_t __a)
16973 {
16974   return __builtin_mve_vcvtnq_uv4si (__a);
16975 }
16976 
16977 __extension__ extern __inline uint16x8_t
16978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_u16_f16(float16x8_t __a)16979 __arm_vcvtmq_u16_f16 (float16x8_t __a)
16980 {
16981   return __builtin_mve_vcvtmq_uv8hi (__a);
16982 }
16983 
16984 __extension__ extern __inline uint32x4_t
16985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_u32_f32(float32x4_t __a)16986 __arm_vcvtmq_u32_f32 (float32x4_t __a)
16987 {
16988   return __builtin_mve_vcvtmq_uv4si (__a);
16989 }
16990 
16991 __extension__ extern __inline uint16x8_t
16992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_u16_f16(float16x8_t __a)16993 __arm_vcvtaq_u16_f16 (float16x8_t __a)
16994 {
16995   return __builtin_mve_vcvtaq_uv8hi (__a);
16996 }
16997 
16998 __extension__ extern __inline uint32x4_t
16999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_u32_f32(float32x4_t __a)17000 __arm_vcvtaq_u32_f32 (float32x4_t __a)
17001 {
17002   return __builtin_mve_vcvtaq_uv4si (__a);
17003 }
17004 
17005 __extension__ extern __inline int16x8_t
17006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_s16_f16(float16x8_t __a)17007 __arm_vcvtaq_s16_f16 (float16x8_t __a)
17008 {
17009   return __builtin_mve_vcvtaq_sv8hi (__a);
17010 }
17011 
17012 __extension__ extern __inline int32x4_t
17013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_s32_f32(float32x4_t __a)17014 __arm_vcvtaq_s32_f32 (float32x4_t __a)
17015 {
17016   return __builtin_mve_vcvtaq_sv4si (__a);
17017 }
17018 
17019 __extension__ extern __inline int16x8_t
17020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_s16_f16(float16x8_t __a)17021 __arm_vcvtnq_s16_f16 (float16x8_t __a)
17022 {
17023   return __builtin_mve_vcvtnq_sv8hi (__a);
17024 }
17025 
17026 __extension__ extern __inline int32x4_t
17027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_s32_f32(float32x4_t __a)17028 __arm_vcvtnq_s32_f32 (float32x4_t __a)
17029 {
17030   return __builtin_mve_vcvtnq_sv4si (__a);
17031 }
17032 
17033 __extension__ extern __inline int16x8_t
17034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_s16_f16(float16x8_t __a)17035 __arm_vcvtpq_s16_f16 (float16x8_t __a)
17036 {
17037   return __builtin_mve_vcvtpq_sv8hi (__a);
17038 }
17039 
17040 __extension__ extern __inline int32x4_t
17041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_s32_f32(float32x4_t __a)17042 __arm_vcvtpq_s32_f32 (float32x4_t __a)
17043 {
17044   return __builtin_mve_vcvtpq_sv4si (__a);
17045 }
17046 
17047 __extension__ extern __inline int16x8_t
17048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_s16_f16(float16x8_t __a)17049 __arm_vcvtmq_s16_f16 (float16x8_t __a)
17050 {
17051   return __builtin_mve_vcvtmq_sv8hi (__a);
17052 }
17053 
17054 __extension__ extern __inline int32x4_t
17055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_s32_f32(float32x4_t __a)17056 __arm_vcvtmq_s32_f32 (float32x4_t __a)
17057 {
17058   return __builtin_mve_vcvtmq_sv4si (__a);
17059 }
17060 
17061 __extension__ extern __inline float16x8_t
17062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_n_f16(float16x8_t __a,float16_t __b)17063 __arm_vsubq_n_f16 (float16x8_t __a, float16_t __b)
17064 {
17065   return __builtin_mve_vsubq_n_fv8hf (__a, __b);
17066 }
17067 
17068 __extension__ extern __inline float32x4_t
17069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_n_f32(float32x4_t __a,float32_t __b)17070 __arm_vsubq_n_f32 (float32x4_t __a, float32_t __b)
17071 {
17072   return __builtin_mve_vsubq_n_fv4sf (__a, __b);
17073 }
17074 
17075 __extension__ extern __inline float16x8_t
17076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_n_f16(float16x8_t __a,int32_t __b)17077 __arm_vbrsrq_n_f16 (float16x8_t __a, int32_t __b)
17078 {
17079   return __builtin_mve_vbrsrq_n_fv8hf (__a, __b);
17080 }
17081 
17082 __extension__ extern __inline float32x4_t
17083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_n_f32(float32x4_t __a,int32_t __b)17084 __arm_vbrsrq_n_f32 (float32x4_t __a, int32_t __b)
17085 {
17086   return __builtin_mve_vbrsrq_n_fv4sf (__a, __b);
17087 }
17088 
17089 __extension__ extern __inline float16x8_t
17090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n_f16_s16(int16x8_t __a,const int __imm6)17091 __arm_vcvtq_n_f16_s16 (int16x8_t __a, const int __imm6)
17092 {
17093   return __builtin_mve_vcvtq_n_to_f_sv8hf (__a, __imm6);
17094 }
17095 
17096 __extension__ extern __inline float32x4_t
17097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n_f32_s32(int32x4_t __a,const int __imm6)17098 __arm_vcvtq_n_f32_s32 (int32x4_t __a, const int __imm6)
17099 {
17100   return __builtin_mve_vcvtq_n_to_f_sv4sf (__a, __imm6);
17101 }
17102 
17103 __extension__ extern __inline float16x8_t
17104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n_f16_u16(uint16x8_t __a,const int __imm6)17105 __arm_vcvtq_n_f16_u16 (uint16x8_t __a, const int __imm6)
17106 {
17107   return __builtin_mve_vcvtq_n_to_f_uv8hf (__a, __imm6);
17108 }
17109 
17110 __extension__ extern __inline float32x4_t
17111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n_f32_u32(uint32x4_t __a,const int __imm6)17112 __arm_vcvtq_n_f32_u32 (uint32x4_t __a, const int __imm6)
17113 {
17114   return __builtin_mve_vcvtq_n_to_f_uv4sf (__a, __imm6);
17115 }
17116 
17117 __extension__ extern __inline float16x8_t
17118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_f16(uint64_t __a,uint64_t __b)17119 __arm_vcreateq_f16 (uint64_t __a, uint64_t __b)
17120 {
17121   return __builtin_mve_vcreateq_fv8hf (__a, __b);
17122 }
17123 
17124 __extension__ extern __inline float32x4_t
17125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcreateq_f32(uint64_t __a,uint64_t __b)17126 __arm_vcreateq_f32 (uint64_t __a, uint64_t __b)
17127 {
17128   return __builtin_mve_vcreateq_fv4sf (__a, __b);
17129 }
17130 
17131 __extension__ extern __inline int16x8_t
17132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n_s16_f16(float16x8_t __a,const int __imm6)17133 __arm_vcvtq_n_s16_f16 (float16x8_t __a, const int __imm6)
17134 {
17135   return __builtin_mve_vcvtq_n_from_f_sv8hi (__a, __imm6);
17136 }
17137 
17138 __extension__ extern __inline int32x4_t
17139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n_s32_f32(float32x4_t __a,const int __imm6)17140 __arm_vcvtq_n_s32_f32 (float32x4_t __a, const int __imm6)
17141 {
17142   return __builtin_mve_vcvtq_n_from_f_sv4si (__a, __imm6);
17143 }
17144 
17145 __extension__ extern __inline uint16x8_t
17146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n_u16_f16(float16x8_t __a,const int __imm6)17147 __arm_vcvtq_n_u16_f16 (float16x8_t __a, const int __imm6)
17148 {
17149   return __builtin_mve_vcvtq_n_from_f_uv8hi (__a, __imm6);
17150 }
17151 
17152 __extension__ extern __inline uint32x4_t
17153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n_u32_f32(float32x4_t __a,const int __imm6)17154 __arm_vcvtq_n_u32_f32 (float32x4_t __a, const int __imm6)
17155 {
17156   return __builtin_mve_vcvtq_n_from_f_uv4si (__a, __imm6);
17157 }
17158 
17159 __extension__ extern __inline mve_pred16_t
17160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_n_f16(float16x8_t __a,float16_t __b)17161 __arm_vcmpneq_n_f16 (float16x8_t __a, float16_t __b)
17162 {
17163   return __builtin_mve_vcmpneq_n_fv8hf (__a, __b);
17164 }
17165 
17166 __extension__ extern __inline mve_pred16_t
17167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_f16(float16x8_t __a,float16x8_t __b)17168 __arm_vcmpneq_f16 (float16x8_t __a, float16x8_t __b)
17169 {
17170   return __builtin_mve_vcmpneq_fv8hf (__a, __b);
17171 }
17172 
17173 __extension__ extern __inline mve_pred16_t
17174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_n_f16(float16x8_t __a,float16_t __b)17175 __arm_vcmpltq_n_f16 (float16x8_t __a, float16_t __b)
17176 {
17177   return __builtin_mve_vcmpltq_n_fv8hf (__a, __b);
17178 }
17179 
17180 __extension__ extern __inline mve_pred16_t
17181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_f16(float16x8_t __a,float16x8_t __b)17182 __arm_vcmpltq_f16 (float16x8_t __a, float16x8_t __b)
17183 {
17184   return __builtin_mve_vcmpltq_fv8hf (__a, __b);
17185 }
17186 
17187 __extension__ extern __inline mve_pred16_t
17188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_n_f16(float16x8_t __a,float16_t __b)17189 __arm_vcmpleq_n_f16 (float16x8_t __a, float16_t __b)
17190 {
17191   return __builtin_mve_vcmpleq_n_fv8hf (__a, __b);
17192 }
17193 
17194 __extension__ extern __inline mve_pred16_t
17195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_f16(float16x8_t __a,float16x8_t __b)17196 __arm_vcmpleq_f16 (float16x8_t __a, float16x8_t __b)
17197 {
17198   return __builtin_mve_vcmpleq_fv8hf (__a, __b);
17199 }
17200 
17201 __extension__ extern __inline mve_pred16_t
17202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_n_f16(float16x8_t __a,float16_t __b)17203 __arm_vcmpgtq_n_f16 (float16x8_t __a, float16_t __b)
17204 {
17205   return __builtin_mve_vcmpgtq_n_fv8hf (__a, __b);
17206 }
17207 
17208 __extension__ extern __inline mve_pred16_t
17209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_f16(float16x8_t __a,float16x8_t __b)17210 __arm_vcmpgtq_f16 (float16x8_t __a, float16x8_t __b)
17211 {
17212   return __builtin_mve_vcmpgtq_fv8hf (__a, __b);
17213 }
17214 
17215 __extension__ extern __inline mve_pred16_t
17216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_n_f16(float16x8_t __a,float16_t __b)17217 __arm_vcmpgeq_n_f16 (float16x8_t __a, float16_t __b)
17218 {
17219   return __builtin_mve_vcmpgeq_n_fv8hf (__a, __b);
17220 }
17221 
17222 __extension__ extern __inline mve_pred16_t
17223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_f16(float16x8_t __a,float16x8_t __b)17224 __arm_vcmpgeq_f16 (float16x8_t __a, float16x8_t __b)
17225 {
17226   return __builtin_mve_vcmpgeq_fv8hf (__a, __b);
17227 }
17228 
17229 __extension__ extern __inline mve_pred16_t
17230 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_n_f16(float16x8_t __a,float16_t __b)17231 __arm_vcmpeqq_n_f16 (float16x8_t __a, float16_t __b)
17232 {
17233   return __builtin_mve_vcmpeqq_n_fv8hf (__a, __b);
17234 }
17235 
17236 __extension__ extern __inline mve_pred16_t
17237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_f16(float16x8_t __a,float16x8_t __b)17238 __arm_vcmpeqq_f16 (float16x8_t __a, float16x8_t __b)
17239 {
17240   return __builtin_mve_vcmpeqq_fv8hf (__a, __b);
17241 }
17242 
17243 __extension__ extern __inline float16x8_t
17244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_f16(float16x8_t __a,float16x8_t __b)17245 __arm_vsubq_f16 (float16x8_t __a, float16x8_t __b)
17246 {
17247   return __builtin_mve_vsubq_fv8hf (__a, __b);
17248 }
17249 
17250 __extension__ extern __inline float16x8_t
17251 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_f16(float16x8_t __a,float16x8_t __b)17252 __arm_vorrq_f16 (float16x8_t __a, float16x8_t __b)
17253 {
17254   return __builtin_mve_vorrq_fv8hf (__a, __b);
17255 }
17256 
17257 __extension__ extern __inline float16x8_t
17258 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_f16(float16x8_t __a,float16x8_t __b)17259 __arm_vornq_f16 (float16x8_t __a, float16x8_t __b)
17260 {
17261   return __builtin_mve_vornq_fv8hf (__a, __b);
17262 }
17263 
17264 __extension__ extern __inline float16x8_t
17265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_n_f16(float16x8_t __a,float16_t __b)17266 __arm_vmulq_n_f16 (float16x8_t __a, float16_t __b)
17267 {
17268   return __builtin_mve_vmulq_n_fv8hf (__a, __b);
17269 }
17270 
17271 __extension__ extern __inline float16x8_t
17272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_f16(float16x8_t __a,float16x8_t __b)17273 __arm_vmulq_f16 (float16x8_t __a, float16x8_t __b)
17274 {
17275   return __builtin_mve_vmulq_fv8hf (__a, __b);
17276 }
17277 
17278 __extension__ extern __inline float16_t
17279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmvq_f16(float16_t __a,float16x8_t __b)17280 __arm_vminnmvq_f16 (float16_t __a, float16x8_t __b)
17281 {
17282   return __builtin_mve_vminnmvq_fv8hf (__a, __b);
17283 }
17284 
17285 __extension__ extern __inline float16x8_t
17286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_f16(float16x8_t __a,float16x8_t __b)17287 __arm_vminnmq_f16 (float16x8_t __a, float16x8_t __b)
17288 {
17289   return __builtin_mve_vminnmq_fv8hf (__a, __b);
17290 }
17291 
17292 __extension__ extern __inline float16_t
17293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmavq_f16(float16_t __a,float16x8_t __b)17294 __arm_vminnmavq_f16 (float16_t __a, float16x8_t __b)
17295 {
17296   return __builtin_mve_vminnmavq_fv8hf (__a, __b);
17297 }
17298 
17299 __extension__ extern __inline float16x8_t
17300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmaq_f16(float16x8_t __a,float16x8_t __b)17301 __arm_vminnmaq_f16 (float16x8_t __a, float16x8_t __b)
17302 {
17303   return __builtin_mve_vminnmaq_fv8hf (__a, __b);
17304 }
17305 
17306 __extension__ extern __inline float16_t
17307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmvq_f16(float16_t __a,float16x8_t __b)17308 __arm_vmaxnmvq_f16 (float16_t __a, float16x8_t __b)
17309 {
17310   return __builtin_mve_vmaxnmvq_fv8hf (__a, __b);
17311 }
17312 
17313 __extension__ extern __inline float16x8_t
17314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_f16(float16x8_t __a,float16x8_t __b)17315 __arm_vmaxnmq_f16 (float16x8_t __a, float16x8_t __b)
17316 {
17317   return __builtin_mve_vmaxnmq_fv8hf (__a, __b);
17318 }
17319 
17320 __extension__ extern __inline float16_t
17321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmavq_f16(float16_t __a,float16x8_t __b)17322 __arm_vmaxnmavq_f16 (float16_t __a, float16x8_t __b)
17323 {
17324   return __builtin_mve_vmaxnmavq_fv8hf (__a, __b);
17325 }
17326 
17327 __extension__ extern __inline float16x8_t
17328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmaq_f16(float16x8_t __a,float16x8_t __b)17329 __arm_vmaxnmaq_f16 (float16x8_t __a, float16x8_t __b)
17330 {
17331   return __builtin_mve_vmaxnmaq_fv8hf (__a, __b);
17332 }
17333 
17334 __extension__ extern __inline float16x8_t
17335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_f16(float16x8_t __a,float16x8_t __b)17336 __arm_veorq_f16 (float16x8_t __a, float16x8_t __b)
17337 {
17338   return __builtin_mve_veorq_fv8hf (__a, __b);
17339 }
17340 
17341 __extension__ extern __inline float16x8_t
17342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_f16(float16x8_t __a,float16x8_t __b)17343 __arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b)
17344 {
17345   return __builtin_mve_vcmulq_rot90_fv8hf (__a, __b);
17346 }
17347 
17348 __extension__ extern __inline float16x8_t
17349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_f16(float16x8_t __a,float16x8_t __b)17350 __arm_vcmulq_rot270_f16 (float16x8_t __a, float16x8_t __b)
17351 {
17352   return __builtin_mve_vcmulq_rot270_fv8hf (__a, __b);
17353 }
17354 
17355 __extension__ extern __inline float16x8_t
17356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_f16(float16x8_t __a,float16x8_t __b)17357 __arm_vcmulq_rot180_f16 (float16x8_t __a, float16x8_t __b)
17358 {
17359   return __builtin_mve_vcmulq_rot180_fv8hf (__a, __b);
17360 }
17361 
17362 __extension__ extern __inline float16x8_t
17363 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_f16(float16x8_t __a,float16x8_t __b)17364 __arm_vcmulq_f16 (float16x8_t __a, float16x8_t __b)
17365 {
17366   return __builtin_mve_vcmulq_fv8hf (__a, __b);
17367 }
17368 
17369 __extension__ extern __inline float16x8_t
17370 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_f16(float16x8_t __a,float16x8_t __b)17371 __arm_vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b)
17372 {
17373   return __builtin_mve_vcaddq_rot90_fv8hf (__a, __b);
17374 }
17375 
17376 __extension__ extern __inline float16x8_t
17377 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_f16(float16x8_t __a,float16x8_t __b)17378 __arm_vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b)
17379 {
17380   return __builtin_mve_vcaddq_rot270_fv8hf (__a, __b);
17381 }
17382 
17383 __extension__ extern __inline float16x8_t
17384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_f16(float16x8_t __a,float16x8_t __b)17385 __arm_vbicq_f16 (float16x8_t __a, float16x8_t __b)
17386 {
17387   return __builtin_mve_vbicq_fv8hf (__a, __b);
17388 }
17389 
17390 __extension__ extern __inline float16x8_t
17391 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_f16(float16x8_t __a,float16x8_t __b)17392 __arm_vandq_f16 (float16x8_t __a, float16x8_t __b)
17393 {
17394   return __builtin_mve_vandq_fv8hf (__a, __b);
17395 }
17396 
17397 __extension__ extern __inline float16x8_t
17398 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_n_f16(float16x8_t __a,float16_t __b)17399 __arm_vaddq_n_f16 (float16x8_t __a, float16_t __b)
17400 {
17401   return __builtin_mve_vaddq_n_fv8hf (__a, __b);
17402 }
17403 
17404 __extension__ extern __inline float16x8_t
17405 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_f16(float16x8_t __a,float16x8_t __b)17406 __arm_vabdq_f16 (float16x8_t __a, float16x8_t __b)
17407 {
17408   return __builtin_mve_vabdq_fv8hf (__a, __b);
17409 }
17410 
17411 __extension__ extern __inline mve_pred16_t
17412 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_n_f32(float32x4_t __a,float32_t __b)17413 __arm_vcmpneq_n_f32 (float32x4_t __a, float32_t __b)
17414 {
17415   return __builtin_mve_vcmpneq_n_fv4sf (__a, __b);
17416 }
17417 
17418 __extension__ extern __inline mve_pred16_t
17419 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_f32(float32x4_t __a,float32x4_t __b)17420 __arm_vcmpneq_f32 (float32x4_t __a, float32x4_t __b)
17421 {
17422   return __builtin_mve_vcmpneq_fv4sf (__a, __b);
17423 }
17424 
17425 __extension__ extern __inline mve_pred16_t
17426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_n_f32(float32x4_t __a,float32_t __b)17427 __arm_vcmpltq_n_f32 (float32x4_t __a, float32_t __b)
17428 {
17429   return __builtin_mve_vcmpltq_n_fv4sf (__a, __b);
17430 }
17431 
17432 __extension__ extern __inline mve_pred16_t
17433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_f32(float32x4_t __a,float32x4_t __b)17434 __arm_vcmpltq_f32 (float32x4_t __a, float32x4_t __b)
17435 {
17436   return __builtin_mve_vcmpltq_fv4sf (__a, __b);
17437 }
17438 
17439 __extension__ extern __inline mve_pred16_t
17440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_n_f32(float32x4_t __a,float32_t __b)17441 __arm_vcmpleq_n_f32 (float32x4_t __a, float32_t __b)
17442 {
17443   return __builtin_mve_vcmpleq_n_fv4sf (__a, __b);
17444 }
17445 
17446 __extension__ extern __inline mve_pred16_t
17447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_f32(float32x4_t __a,float32x4_t __b)17448 __arm_vcmpleq_f32 (float32x4_t __a, float32x4_t __b)
17449 {
17450   return __builtin_mve_vcmpleq_fv4sf (__a, __b);
17451 }
17452 
17453 __extension__ extern __inline mve_pred16_t
17454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_n_f32(float32x4_t __a,float32_t __b)17455 __arm_vcmpgtq_n_f32 (float32x4_t __a, float32_t __b)
17456 {
17457   return __builtin_mve_vcmpgtq_n_fv4sf (__a, __b);
17458 }
17459 
17460 __extension__ extern __inline mve_pred16_t
17461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_f32(float32x4_t __a,float32x4_t __b)17462 __arm_vcmpgtq_f32 (float32x4_t __a, float32x4_t __b)
17463 {
17464   return __builtin_mve_vcmpgtq_fv4sf (__a, __b);
17465 }
17466 
17467 __extension__ extern __inline mve_pred16_t
17468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_n_f32(float32x4_t __a,float32_t __b)17469 __arm_vcmpgeq_n_f32 (float32x4_t __a, float32_t __b)
17470 {
17471   return __builtin_mve_vcmpgeq_n_fv4sf (__a, __b);
17472 }
17473 
17474 __extension__ extern __inline mve_pred16_t
17475 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_f32(float32x4_t __a,float32x4_t __b)17476 __arm_vcmpgeq_f32 (float32x4_t __a, float32x4_t __b)
17477 {
17478   return __builtin_mve_vcmpgeq_fv4sf (__a, __b);
17479 }
17480 
17481 __extension__ extern __inline mve_pred16_t
17482 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_n_f32(float32x4_t __a,float32_t __b)17483 __arm_vcmpeqq_n_f32 (float32x4_t __a, float32_t __b)
17484 {
17485   return __builtin_mve_vcmpeqq_n_fv4sf (__a, __b);
17486 }
17487 
17488 __extension__ extern __inline mve_pred16_t
17489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_f32(float32x4_t __a,float32x4_t __b)17490 __arm_vcmpeqq_f32 (float32x4_t __a, float32x4_t __b)
17491 {
17492   return __builtin_mve_vcmpeqq_fv4sf (__a, __b);
17493 }
17494 
17495 __extension__ extern __inline float32x4_t
17496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_f32(float32x4_t __a,float32x4_t __b)17497 __arm_vsubq_f32 (float32x4_t __a, float32x4_t __b)
17498 {
17499   return __builtin_mve_vsubq_fv4sf (__a, __b);
17500 }
17501 
17502 __extension__ extern __inline float32x4_t
17503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_f32(float32x4_t __a,float32x4_t __b)17504 __arm_vorrq_f32 (float32x4_t __a, float32x4_t __b)
17505 {
17506   return __builtin_mve_vorrq_fv4sf (__a, __b);
17507 }
17508 
17509 __extension__ extern __inline float32x4_t
17510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_f32(float32x4_t __a,float32x4_t __b)17511 __arm_vornq_f32 (float32x4_t __a, float32x4_t __b)
17512 {
17513   return __builtin_mve_vornq_fv4sf (__a, __b);
17514 }
17515 
17516 __extension__ extern __inline float32x4_t
17517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_n_f32(float32x4_t __a,float32_t __b)17518 __arm_vmulq_n_f32 (float32x4_t __a, float32_t __b)
17519 {
17520   return __builtin_mve_vmulq_n_fv4sf (__a, __b);
17521 }
17522 
17523 __extension__ extern __inline float32x4_t
17524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_f32(float32x4_t __a,float32x4_t __b)17525 __arm_vmulq_f32 (float32x4_t __a, float32x4_t __b)
17526 {
17527   return __builtin_mve_vmulq_fv4sf (__a, __b);
17528 }
17529 
17530 __extension__ extern __inline float32_t
17531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmvq_f32(float32_t __a,float32x4_t __b)17532 __arm_vminnmvq_f32 (float32_t __a, float32x4_t __b)
17533 {
17534   return __builtin_mve_vminnmvq_fv4sf (__a, __b);
17535 }
17536 
17537 __extension__ extern __inline float32x4_t
17538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_f32(float32x4_t __a,float32x4_t __b)17539 __arm_vminnmq_f32 (float32x4_t __a, float32x4_t __b)
17540 {
17541   return __builtin_mve_vminnmq_fv4sf (__a, __b);
17542 }
17543 
17544 __extension__ extern __inline float32_t
17545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmavq_f32(float32_t __a,float32x4_t __b)17546 __arm_vminnmavq_f32 (float32_t __a, float32x4_t __b)
17547 {
17548   return __builtin_mve_vminnmavq_fv4sf (__a, __b);
17549 }
17550 
17551 __extension__ extern __inline float32x4_t
17552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmaq_f32(float32x4_t __a,float32x4_t __b)17553 __arm_vminnmaq_f32 (float32x4_t __a, float32x4_t __b)
17554 {
17555   return __builtin_mve_vminnmaq_fv4sf (__a, __b);
17556 }
17557 
17558 __extension__ extern __inline float32_t
17559 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmvq_f32(float32_t __a,float32x4_t __b)17560 __arm_vmaxnmvq_f32 (float32_t __a, float32x4_t __b)
17561 {
17562   return __builtin_mve_vmaxnmvq_fv4sf (__a, __b);
17563 }
17564 
17565 __extension__ extern __inline float32x4_t
17566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_f32(float32x4_t __a,float32x4_t __b)17567 __arm_vmaxnmq_f32 (float32x4_t __a, float32x4_t __b)
17568 {
17569   return __builtin_mve_vmaxnmq_fv4sf (__a, __b);
17570 }
17571 
17572 __extension__ extern __inline float32_t
17573 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmavq_f32(float32_t __a,float32x4_t __b)17574 __arm_vmaxnmavq_f32 (float32_t __a, float32x4_t __b)
17575 {
17576   return __builtin_mve_vmaxnmavq_fv4sf (__a, __b);
17577 }
17578 
17579 __extension__ extern __inline float32x4_t
17580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmaq_f32(float32x4_t __a,float32x4_t __b)17581 __arm_vmaxnmaq_f32 (float32x4_t __a, float32x4_t __b)
17582 {
17583   return __builtin_mve_vmaxnmaq_fv4sf (__a, __b);
17584 }
17585 
17586 __extension__ extern __inline float32x4_t
17587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_f32(float32x4_t __a,float32x4_t __b)17588 __arm_veorq_f32 (float32x4_t __a, float32x4_t __b)
17589 {
17590   return __builtin_mve_veorq_fv4sf (__a, __b);
17591 }
17592 
17593 __extension__ extern __inline float32x4_t
17594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_f32(float32x4_t __a,float32x4_t __b)17595 __arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b)
17596 {
17597   return __builtin_mve_vcmulq_rot90_fv4sf (__a, __b);
17598 }
17599 
17600 __extension__ extern __inline float32x4_t
17601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_f32(float32x4_t __a,float32x4_t __b)17602 __arm_vcmulq_rot270_f32 (float32x4_t __a, float32x4_t __b)
17603 {
17604   return __builtin_mve_vcmulq_rot270_fv4sf (__a, __b);
17605 }
17606 
17607 __extension__ extern __inline float32x4_t
17608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_f32(float32x4_t __a,float32x4_t __b)17609 __arm_vcmulq_rot180_f32 (float32x4_t __a, float32x4_t __b)
17610 {
17611   return __builtin_mve_vcmulq_rot180_fv4sf (__a, __b);
17612 }
17613 
17614 __extension__ extern __inline float32x4_t
17615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_f32(float32x4_t __a,float32x4_t __b)17616 __arm_vcmulq_f32 (float32x4_t __a, float32x4_t __b)
17617 {
17618   return __builtin_mve_vcmulq_fv4sf (__a, __b);
17619 }
17620 
17621 __extension__ extern __inline float32x4_t
17622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_f32(float32x4_t __a,float32x4_t __b)17623 __arm_vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b)
17624 {
17625   return __builtin_mve_vcaddq_rot90_fv4sf (__a, __b);
17626 }
17627 
17628 __extension__ extern __inline float32x4_t
17629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_f32(float32x4_t __a,float32x4_t __b)17630 __arm_vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b)
17631 {
17632   return __builtin_mve_vcaddq_rot270_fv4sf (__a, __b);
17633 }
17634 
17635 __extension__ extern __inline float32x4_t
17636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_f32(float32x4_t __a,float32x4_t __b)17637 __arm_vbicq_f32 (float32x4_t __a, float32x4_t __b)
17638 {
17639   return __builtin_mve_vbicq_fv4sf (__a, __b);
17640 }
17641 
17642 __extension__ extern __inline float32x4_t
17643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_f32(float32x4_t __a,float32x4_t __b)17644 __arm_vandq_f32 (float32x4_t __a, float32x4_t __b)
17645 {
17646   return __builtin_mve_vandq_fv4sf (__a, __b);
17647 }
17648 
17649 __extension__ extern __inline float32x4_t
17650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_n_f32(float32x4_t __a,float32_t __b)17651 __arm_vaddq_n_f32 (float32x4_t __a, float32_t __b)
17652 {
17653   return __builtin_mve_vaddq_n_fv4sf (__a, __b);
17654 }
17655 
17656 __extension__ extern __inline float32x4_t
17657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_f32(float32x4_t __a,float32x4_t __b)17658 __arm_vabdq_f32 (float32x4_t __a, float32x4_t __b)
17659 {
17660   return __builtin_mve_vabdq_fv4sf (__a, __b);
17661 }
17662 
17663 __extension__ extern __inline float16x8_t
17664 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvttq_f16_f32(float16x8_t __a,float32x4_t __b)17665 __arm_vcvttq_f16_f32 (float16x8_t __a, float32x4_t __b)
17666 {
17667   return __builtin_mve_vcvttq_f16_f32v8hf (__a, __b);
17668 }
17669 
17670 __extension__ extern __inline float16x8_t
17671 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtbq_f16_f32(float16x8_t __a,float32x4_t __b)17672 __arm_vcvtbq_f16_f32 (float16x8_t __a, float32x4_t __b)
17673 {
17674   return __builtin_mve_vcvtbq_f16_f32v8hf (__a, __b);
17675 }
17676 
17677 __extension__ extern __inline mve_pred16_t
17678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)17679 __arm_vcmpeqq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
17680 {
17681   return __builtin_mve_vcmpeqq_m_fv8hf (__a, __b, __p);
17682 }
17683 
17684 __extension__ extern __inline mve_pred16_t
17685 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)17686 __arm_vcmpeqq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
17687 {
17688   return __builtin_mve_vcmpeqq_m_fv4sf (__a, __b, __p);
17689 }
17690 
17691 __extension__ extern __inline int16x8_t
17692 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_m_s16_f16(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17693 __arm_vcvtaq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17694 {
17695   return __builtin_mve_vcvtaq_m_sv8hi (__inactive, __a, __p);
17696 }
17697 
17698 __extension__ extern __inline uint16x8_t
17699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_m_u16_f16(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17700 __arm_vcvtaq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17701 {
17702   return __builtin_mve_vcvtaq_m_uv8hi (__inactive, __a, __p);
17703 }
17704 
17705 __extension__ extern __inline int32x4_t
17706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_m_s32_f32(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)17707 __arm_vcvtaq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
17708 {
17709   return __builtin_mve_vcvtaq_m_sv4si (__inactive, __a, __p);
17710 }
17711 
17712 __extension__ extern __inline uint32x4_t
17713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_m_u32_f32(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)17714 __arm_vcvtaq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
17715 {
17716   return __builtin_mve_vcvtaq_m_uv4si (__inactive, __a, __p);
17717 }
17718 
17719 __extension__ extern __inline float16x8_t
17720 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_f16_s16(float16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)17721 __arm_vcvtq_m_f16_s16 (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
17722 {
17723   return __builtin_mve_vcvtq_m_to_f_sv8hf (__inactive, __a, __p);
17724 }
17725 
17726 __extension__ extern __inline float16x8_t
17727 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_f16_u16(float16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)17728 __arm_vcvtq_m_f16_u16 (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
17729 {
17730   return __builtin_mve_vcvtq_m_to_f_uv8hf (__inactive, __a, __p);
17731 }
17732 
17733 __extension__ extern __inline float32x4_t
17734 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_f32_s32(float32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)17735 __arm_vcvtq_m_f32_s32 (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
17736 {
17737   return __builtin_mve_vcvtq_m_to_f_sv4sf (__inactive, __a, __p);
17738 }
17739 
17740 __extension__ extern __inline float32x4_t
17741 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_f32_u32(float32x4_t __inactive,uint32x4_t __a,mve_pred16_t __p)17742 __arm_vcvtq_m_f32_u32 (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
17743 {
17744   return __builtin_mve_vcvtq_m_to_f_uv4sf (__inactive, __a, __p);
17745 }
17746 
17747 
17748 __extension__ extern __inline float16x8_t
17749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtbq_m_f16_f32(float16x8_t __a,float32x4_t __b,mve_pred16_t __p)17750 __arm_vcvtbq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
17751 {
17752   return __builtin_mve_vcvtbq_m_f16_f32v8hf (__a, __b, __p);
17753 }
17754 
17755 __extension__ extern __inline float32x4_t
17756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtbq_m_f32_f16(float32x4_t __inactive,float16x8_t __a,mve_pred16_t __p)17757 __arm_vcvtbq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
17758 {
17759   return __builtin_mve_vcvtbq_m_f32_f16v4sf (__inactive, __a, __p);
17760 }
17761 
17762 __extension__ extern __inline float16x8_t
17763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvttq_m_f16_f32(float16x8_t __a,float32x4_t __b,mve_pred16_t __p)17764 __arm_vcvttq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
17765 {
17766   return __builtin_mve_vcvttq_m_f16_f32v8hf (__a, __b, __p);
17767 }
17768 
17769 __extension__ extern __inline float32x4_t
17770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvttq_m_f32_f16(float32x4_t __inactive,float16x8_t __a,mve_pred16_t __p)17771 __arm_vcvttq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
17772 {
17773   return __builtin_mve_vcvttq_m_f32_f16v4sf (__inactive, __a, __p);
17774 }
17775 
17776 __extension__ extern __inline float16x8_t
17777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17778 __arm_vrev32q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17779 {
17780   return __builtin_mve_vrev32q_m_fv8hf (__inactive, __a, __p);
17781 }
17782 
17783 __extension__ extern __inline float16x8_t
17784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c)17785 __arm_vcmlaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17786 {
17787   return __builtin_mve_vcmlaq_fv8hf (__a, __b, __c);
17788 }
17789 
17790 __extension__ extern __inline float16x8_t
17791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c)17792 __arm_vcmlaq_rot180_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17793 {
17794   return __builtin_mve_vcmlaq_rot180_fv8hf (__a, __b, __c);
17795 }
17796 
17797 __extension__ extern __inline float16x8_t
17798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c)17799 __arm_vcmlaq_rot270_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17800 {
17801   return __builtin_mve_vcmlaq_rot270_fv8hf (__a, __b, __c);
17802 }
17803 
17804 __extension__ extern __inline float16x8_t
17805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c)17806 __arm_vcmlaq_rot90_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17807 {
17808   return __builtin_mve_vcmlaq_rot90_fv8hf (__a, __b, __c);
17809 }
17810 
17811 __extension__ extern __inline float16x8_t
17812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c)17813 __arm_vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17814 {
17815   return __builtin_mve_vfmaq_fv8hf (__a, __b, __c);
17816 }
17817 
17818 __extension__ extern __inline float16x8_t
17819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_n_f16(float16x8_t __a,float16x8_t __b,float16_t __c)17820 __arm_vfmaq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
17821 {
17822   return __builtin_mve_vfmaq_n_fv8hf (__a, __b, __c);
17823 }
17824 
17825 __extension__ extern __inline float16x8_t
17826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmasq_n_f16(float16x8_t __a,float16x8_t __b,float16_t __c)17827 __arm_vfmasq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
17828 {
17829   return __builtin_mve_vfmasq_n_fv8hf (__a, __b, __c);
17830 }
17831 
17832 __extension__ extern __inline float16x8_t
17833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmsq_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c)17834 __arm_vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17835 {
17836   return __builtin_mve_vfmsq_fv8hf (__a, __b, __c);
17837 }
17838 
17839 __extension__ extern __inline float16x8_t
17840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17841 __arm_vabsq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17842 {
17843   return __builtin_mve_vabsq_m_fv8hf (__inactive, __a, __p);
17844 }
17845 
17846 __extension__ extern __inline int16x8_t
17847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_m_s16_f16(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17848 __arm_vcvtmq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17849 {
17850   return __builtin_mve_vcvtmq_m_sv8hi (__inactive, __a, __p);
17851 }
17852 
17853 __extension__ extern __inline int16x8_t
17854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_m_s16_f16(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17855 __arm_vcvtnq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17856 {
17857   return __builtin_mve_vcvtnq_m_sv8hi (__inactive, __a, __p);
17858 }
17859 
17860 __extension__ extern __inline int16x8_t
17861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_m_s16_f16(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17862 __arm_vcvtpq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17863 {
17864   return __builtin_mve_vcvtpq_m_sv8hi (__inactive, __a, __p);
17865 }
17866 
17867 __extension__ extern __inline int16x8_t
17868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_s16_f16(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17869 __arm_vcvtq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17870 {
17871   return __builtin_mve_vcvtq_m_from_f_sv8hi (__inactive, __a, __p);
17872 }
17873 
17874 __extension__ extern __inline float16x8_t
17875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m_n_f16(float16x8_t __inactive,float16_t __a,mve_pred16_t __p)17876 __arm_vdupq_m_n_f16 (float16x8_t __inactive, float16_t __a, mve_pred16_t __p)
17877 {
17878   return __builtin_mve_vdupq_m_n_fv8hf (__inactive, __a, __p);
17879 }
17880 
17881 __extension__ extern __inline float16x8_t
17882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmaq_m_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)17883 __arm_vmaxnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
17884 {
17885   return __builtin_mve_vmaxnmaq_m_fv8hf (__a, __b, __p);
17886 }
17887 
17888 __extension__ extern __inline float16_t
17889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmavq_p_f16(float16_t __a,float16x8_t __b,mve_pred16_t __p)17890 __arm_vmaxnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
17891 {
17892   return __builtin_mve_vmaxnmavq_p_fv8hf (__a, __b, __p);
17893 }
17894 
17895 __extension__ extern __inline float16_t
17896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmvq_p_f16(float16_t __a,float16x8_t __b,mve_pred16_t __p)17897 __arm_vmaxnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
17898 {
17899   return __builtin_mve_vmaxnmvq_p_fv8hf (__a, __b, __p);
17900 }
17901 
17902 __extension__ extern __inline float16x8_t
17903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmaq_m_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)17904 __arm_vminnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
17905 {
17906   return __builtin_mve_vminnmaq_m_fv8hf (__a, __b, __p);
17907 }
17908 
17909 __extension__ extern __inline float16_t
17910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmavq_p_f16(float16_t __a,float16x8_t __b,mve_pred16_t __p)17911 __arm_vminnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
17912 {
17913   return __builtin_mve_vminnmavq_p_fv8hf (__a, __b, __p);
17914 }
17915 
17916 __extension__ extern __inline float16_t
17917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmvq_p_f16(float16_t __a,float16x8_t __b,mve_pred16_t __p)17918 __arm_vminnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
17919 {
17920   return __builtin_mve_vminnmvq_p_fv8hf (__a, __b, __p);
17921 }
17922 
17923 __extension__ extern __inline float16x8_t
17924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17925 __arm_vnegq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17926 {
17927   return __builtin_mve_vnegq_m_fv8hf (__inactive, __a, __p);
17928 }
17929 
17930 __extension__ extern __inline float16x8_t
17931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)17932 __arm_vpselq_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
17933 {
17934   return __builtin_mve_vpselq_fv8hf (__a, __b, __p);
17935 }
17936 
17937 __extension__ extern __inline float16x8_t
17938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17939 __arm_vrev64q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17940 {
17941   return __builtin_mve_vrev64q_m_fv8hf (__inactive, __a, __p);
17942 }
17943 
17944 __extension__ extern __inline float16x8_t
17945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17946 __arm_vrndaq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17947 {
17948   return __builtin_mve_vrndaq_m_fv8hf (__inactive, __a, __p);
17949 }
17950 
17951 __extension__ extern __inline float16x8_t
17952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17953 __arm_vrndmq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17954 {
17955   return __builtin_mve_vrndmq_m_fv8hf (__inactive, __a, __p);
17956 }
17957 
17958 __extension__ extern __inline float16x8_t
17959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17960 __arm_vrndnq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17961 {
17962   return __builtin_mve_vrndnq_m_fv8hf (__inactive, __a, __p);
17963 }
17964 
17965 __extension__ extern __inline float16x8_t
17966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17967 __arm_vrndpq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17968 {
17969   return __builtin_mve_vrndpq_m_fv8hf (__inactive, __a, __p);
17970 }
17971 
17972 __extension__ extern __inline float16x8_t
17973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17974 __arm_vrndq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17975 {
17976   return __builtin_mve_vrndq_m_fv8hf (__inactive, __a, __p);
17977 }
17978 
17979 __extension__ extern __inline float16x8_t
17980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_m_f16(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)17981 __arm_vrndxq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17982 {
17983   return __builtin_mve_vrndxq_m_fv8hf (__inactive, __a, __p);
17984 }
17985 
17986 __extension__ extern __inline mve_pred16_t
17987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_n_f16(float16x8_t __a,float16_t __b,mve_pred16_t __p)17988 __arm_vcmpeqq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
17989 {
17990   return __builtin_mve_vcmpeqq_m_n_fv8hf (__a, __b, __p);
17991 }
17992 
17993 __extension__ extern __inline mve_pred16_t
17994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)17995 __arm_vcmpgeq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
17996 {
17997   return __builtin_mve_vcmpgeq_m_fv8hf (__a, __b, __p);
17998 }
17999 
18000 __extension__ extern __inline mve_pred16_t
18001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_n_f16(float16x8_t __a,float16_t __b,mve_pred16_t __p)18002 __arm_vcmpgeq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18003 {
18004   return __builtin_mve_vcmpgeq_m_n_fv8hf (__a, __b, __p);
18005 }
18006 
18007 __extension__ extern __inline mve_pred16_t
18008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18009 __arm_vcmpgtq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18010 {
18011   return __builtin_mve_vcmpgtq_m_fv8hf (__a, __b, __p);
18012 }
18013 
18014 __extension__ extern __inline mve_pred16_t
18015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_n_f16(float16x8_t __a,float16_t __b,mve_pred16_t __p)18016 __arm_vcmpgtq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18017 {
18018   return __builtin_mve_vcmpgtq_m_n_fv8hf (__a, __b, __p);
18019 }
18020 
18021 __extension__ extern __inline mve_pred16_t
18022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18023 __arm_vcmpleq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18024 {
18025   return __builtin_mve_vcmpleq_m_fv8hf (__a, __b, __p);
18026 }
18027 
18028 __extension__ extern __inline mve_pred16_t
18029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_n_f16(float16x8_t __a,float16_t __b,mve_pred16_t __p)18030 __arm_vcmpleq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18031 {
18032   return __builtin_mve_vcmpleq_m_n_fv8hf (__a, __b, __p);
18033 }
18034 
18035 __extension__ extern __inline mve_pred16_t
18036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18037 __arm_vcmpltq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18038 {
18039   return __builtin_mve_vcmpltq_m_fv8hf (__a, __b, __p);
18040 }
18041 
18042 __extension__ extern __inline mve_pred16_t
18043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_n_f16(float16x8_t __a,float16_t __b,mve_pred16_t __p)18044 __arm_vcmpltq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18045 {
18046   return __builtin_mve_vcmpltq_m_n_fv8hf (__a, __b, __p);
18047 }
18048 
18049 __extension__ extern __inline mve_pred16_t
18050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18051 __arm_vcmpneq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18052 {
18053   return __builtin_mve_vcmpneq_m_fv8hf (__a, __b, __p);
18054 }
18055 
18056 __extension__ extern __inline mve_pred16_t
18057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_n_f16(float16x8_t __a,float16_t __b,mve_pred16_t __p)18058 __arm_vcmpneq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18059 {
18060   return __builtin_mve_vcmpneq_m_n_fv8hf (__a, __b, __p);
18061 }
18062 
18063 __extension__ extern __inline uint16x8_t
18064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_m_u16_f16(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)18065 __arm_vcvtmq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
18066 {
18067   return __builtin_mve_vcvtmq_m_uv8hi (__inactive, __a, __p);
18068 }
18069 
18070 __extension__ extern __inline uint16x8_t
18071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_m_u16_f16(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)18072 __arm_vcvtnq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
18073 {
18074   return __builtin_mve_vcvtnq_m_uv8hi (__inactive, __a, __p);
18075 }
18076 
18077 __extension__ extern __inline uint16x8_t
18078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_m_u16_f16(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)18079 __arm_vcvtpq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
18080 {
18081   return __builtin_mve_vcvtpq_m_uv8hi (__inactive, __a, __p);
18082 }
18083 
18084 __extension__ extern __inline uint16x8_t
18085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_u16_f16(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)18086 __arm_vcvtq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
18087 {
18088   return __builtin_mve_vcvtq_m_from_f_uv8hi (__inactive, __a, __p);
18089 }
18090 
18091 __extension__ extern __inline float32x4_t
18092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c)18093 __arm_vcmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18094 {
18095   return __builtin_mve_vcmlaq_fv4sf (__a, __b, __c);
18096 }
18097 
18098 __extension__ extern __inline float32x4_t
18099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c)18100 __arm_vcmlaq_rot180_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18101 {
18102   return __builtin_mve_vcmlaq_rot180_fv4sf (__a, __b, __c);
18103 }
18104 
18105 __extension__ extern __inline float32x4_t
18106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c)18107 __arm_vcmlaq_rot270_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18108 {
18109   return __builtin_mve_vcmlaq_rot270_fv4sf (__a, __b, __c);
18110 }
18111 
18112 __extension__ extern __inline float32x4_t
18113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c)18114 __arm_vcmlaq_rot90_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18115 {
18116   return __builtin_mve_vcmlaq_rot90_fv4sf (__a, __b, __c);
18117 }
18118 
18119 __extension__ extern __inline float32x4_t
18120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c)18121 __arm_vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18122 {
18123   return __builtin_mve_vfmaq_fv4sf (__a, __b, __c);
18124 }
18125 
18126 __extension__ extern __inline float32x4_t
18127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_n_f32(float32x4_t __a,float32x4_t __b,float32_t __c)18128 __arm_vfmaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
18129 {
18130   return __builtin_mve_vfmaq_n_fv4sf (__a, __b, __c);
18131 }
18132 
18133 __extension__ extern __inline float32x4_t
18134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmasq_n_f32(float32x4_t __a,float32x4_t __b,float32_t __c)18135 __arm_vfmasq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
18136 {
18137   return __builtin_mve_vfmasq_n_fv4sf (__a, __b, __c);
18138 }
18139 
18140 __extension__ extern __inline float32x4_t
18141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmsq_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c)18142 __arm_vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18143 {
18144   return __builtin_mve_vfmsq_fv4sf (__a, __b, __c);
18145 }
18146 
18147 __extension__ extern __inline float32x4_t
18148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m_f32(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18149 __arm_vabsq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18150 {
18151   return __builtin_mve_vabsq_m_fv4sf (__inactive, __a, __p);
18152 }
18153 
18154 __extension__ extern __inline int32x4_t
18155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_m_s32_f32(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18156 __arm_vcvtmq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18157 {
18158   return __builtin_mve_vcvtmq_m_sv4si (__inactive, __a, __p);
18159 }
18160 
18161 __extension__ extern __inline int32x4_t
18162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_m_s32_f32(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18163 __arm_vcvtnq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18164 {
18165   return __builtin_mve_vcvtnq_m_sv4si (__inactive, __a, __p);
18166 }
18167 
18168 __extension__ extern __inline int32x4_t
18169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_m_s32_f32(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18170 __arm_vcvtpq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18171 {
18172   return __builtin_mve_vcvtpq_m_sv4si (__inactive, __a, __p);
18173 }
18174 
18175 __extension__ extern __inline int32x4_t
18176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_s32_f32(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18177 __arm_vcvtq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18178 {
18179   return __builtin_mve_vcvtq_m_from_f_sv4si (__inactive, __a, __p);
18180 }
18181 
18182 __extension__ extern __inline float32x4_t
18183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m_n_f32(float32x4_t __inactive,float32_t __a,mve_pred16_t __p)18184 __arm_vdupq_m_n_f32 (float32x4_t __inactive, float32_t __a, mve_pred16_t __p)
18185 {
18186   return __builtin_mve_vdupq_m_n_fv4sf (__inactive, __a, __p);
18187 }
18188 
18189 __extension__ extern __inline float32x4_t
18190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmaq_m_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18191 __arm_vmaxnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18192 {
18193   return __builtin_mve_vmaxnmaq_m_fv4sf (__a, __b, __p);
18194 }
18195 
18196 __extension__ extern __inline float32_t
18197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmavq_p_f32(float32_t __a,float32x4_t __b,mve_pred16_t __p)18198 __arm_vmaxnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
18199 {
18200   return __builtin_mve_vmaxnmavq_p_fv4sf (__a, __b, __p);
18201 }
18202 
18203 __extension__ extern __inline float32_t
18204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmvq_p_f32(float32_t __a,float32x4_t __b,mve_pred16_t __p)18205 __arm_vmaxnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
18206 {
18207   return __builtin_mve_vmaxnmvq_p_fv4sf (__a, __b, __p);
18208 }
18209 
18210 __extension__ extern __inline float32x4_t
18211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmaq_m_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18212 __arm_vminnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18213 {
18214   return __builtin_mve_vminnmaq_m_fv4sf (__a, __b, __p);
18215 }
18216 
18217 __extension__ extern __inline float32_t
18218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmavq_p_f32(float32_t __a,float32x4_t __b,mve_pred16_t __p)18219 __arm_vminnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
18220 {
18221   return __builtin_mve_vminnmavq_p_fv4sf (__a, __b, __p);
18222 }
18223 
18224 __extension__ extern __inline float32_t
18225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmvq_p_f32(float32_t __a,float32x4_t __b,mve_pred16_t __p)18226 __arm_vminnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
18227 {
18228   return __builtin_mve_vminnmvq_p_fv4sf (__a, __b, __p);
18229 }
18230 
18231 __extension__ extern __inline float32x4_t
18232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m_f32(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18233 __arm_vnegq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18234 {
18235   return __builtin_mve_vnegq_m_fv4sf (__inactive, __a, __p);
18236 }
18237 
18238 __extension__ extern __inline float32x4_t
18239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18240 __arm_vpselq_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18241 {
18242   return __builtin_mve_vpselq_fv4sf (__a, __b, __p);
18243 }
18244 
18245 __extension__ extern __inline float32x4_t
18246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m_f32(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18247 __arm_vrev64q_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18248 {
18249   return __builtin_mve_vrev64q_m_fv4sf (__inactive, __a, __p);
18250 }
18251 
18252 __extension__ extern __inline float32x4_t
18253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_m_f32(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18254 __arm_vrndaq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18255 {
18256   return __builtin_mve_vrndaq_m_fv4sf (__inactive, __a, __p);
18257 }
18258 
18259 __extension__ extern __inline float32x4_t
18260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_m_f32(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18261 __arm_vrndmq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18262 {
18263   return __builtin_mve_vrndmq_m_fv4sf (__inactive, __a, __p);
18264 }
18265 
18266 __extension__ extern __inline float32x4_t
18267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_m_f32(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18268 __arm_vrndnq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18269 {
18270   return __builtin_mve_vrndnq_m_fv4sf (__inactive, __a, __p);
18271 }
18272 
18273 __extension__ extern __inline float32x4_t
18274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_m_f32(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18275 __arm_vrndpq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18276 {
18277   return __builtin_mve_vrndpq_m_fv4sf (__inactive, __a, __p);
18278 }
18279 
18280 __extension__ extern __inline float32x4_t
18281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_m_f32(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18282 __arm_vrndq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18283 {
18284   return __builtin_mve_vrndq_m_fv4sf (__inactive, __a, __p);
18285 }
18286 
18287 __extension__ extern __inline float32x4_t
18288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_m_f32(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18289 __arm_vrndxq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18290 {
18291   return __builtin_mve_vrndxq_m_fv4sf (__inactive, __a, __p);
18292 }
18293 
18294 __extension__ extern __inline mve_pred16_t
18295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m_n_f32(float32x4_t __a,float32_t __b,mve_pred16_t __p)18296 __arm_vcmpeqq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18297 {
18298   return __builtin_mve_vcmpeqq_m_n_fv4sf (__a, __b, __p);
18299 }
18300 
18301 __extension__ extern __inline mve_pred16_t
18302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18303 __arm_vcmpgeq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18304 {
18305   return __builtin_mve_vcmpgeq_m_fv4sf (__a, __b, __p);
18306 }
18307 
18308 __extension__ extern __inline mve_pred16_t
18309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m_n_f32(float32x4_t __a,float32_t __b,mve_pred16_t __p)18310 __arm_vcmpgeq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18311 {
18312   return __builtin_mve_vcmpgeq_m_n_fv4sf (__a, __b, __p);
18313 }
18314 
18315 __extension__ extern __inline mve_pred16_t
18316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18317 __arm_vcmpgtq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18318 {
18319   return __builtin_mve_vcmpgtq_m_fv4sf (__a, __b, __p);
18320 }
18321 
18322 __extension__ extern __inline mve_pred16_t
18323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m_n_f32(float32x4_t __a,float32_t __b,mve_pred16_t __p)18324 __arm_vcmpgtq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18325 {
18326   return __builtin_mve_vcmpgtq_m_n_fv4sf (__a, __b, __p);
18327 }
18328 
18329 __extension__ extern __inline mve_pred16_t
18330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18331 __arm_vcmpleq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18332 {
18333   return __builtin_mve_vcmpleq_m_fv4sf (__a, __b, __p);
18334 }
18335 
18336 __extension__ extern __inline mve_pred16_t
18337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m_n_f32(float32x4_t __a,float32_t __b,mve_pred16_t __p)18338 __arm_vcmpleq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18339 {
18340   return __builtin_mve_vcmpleq_m_n_fv4sf (__a, __b, __p);
18341 }
18342 
18343 __extension__ extern __inline mve_pred16_t
18344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18345 __arm_vcmpltq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18346 {
18347   return __builtin_mve_vcmpltq_m_fv4sf (__a, __b, __p);
18348 }
18349 
18350 __extension__ extern __inline mve_pred16_t
18351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m_n_f32(float32x4_t __a,float32_t __b,mve_pred16_t __p)18352 __arm_vcmpltq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18353 {
18354   return __builtin_mve_vcmpltq_m_n_fv4sf (__a, __b, __p);
18355 }
18356 
18357 __extension__ extern __inline mve_pred16_t
18358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18359 __arm_vcmpneq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18360 {
18361   return __builtin_mve_vcmpneq_m_fv4sf (__a, __b, __p);
18362 }
18363 
18364 __extension__ extern __inline mve_pred16_t
18365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m_n_f32(float32x4_t __a,float32_t __b,mve_pred16_t __p)18366 __arm_vcmpneq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18367 {
18368   return __builtin_mve_vcmpneq_m_n_fv4sf (__a, __b, __p);
18369 }
18370 
18371 __extension__ extern __inline uint32x4_t
18372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_m_u32_f32(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18373 __arm_vcvtmq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18374 {
18375   return __builtin_mve_vcvtmq_m_uv4si (__inactive, __a, __p);
18376 }
18377 
18378 __extension__ extern __inline uint32x4_t
18379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_m_u32_f32(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18380 __arm_vcvtnq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18381 {
18382   return __builtin_mve_vcvtnq_m_uv4si (__inactive, __a, __p);
18383 }
18384 
18385 __extension__ extern __inline uint32x4_t
18386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_m_u32_f32(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18387 __arm_vcvtpq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18388 {
18389   return __builtin_mve_vcvtpq_m_uv4si (__inactive, __a, __p);
18390 }
18391 
18392 __extension__ extern __inline uint32x4_t
18393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_u32_f32(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)18394 __arm_vcvtq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18395 {
18396   return __builtin_mve_vcvtq_m_from_f_uv4si (__inactive, __a, __p);
18397 }
18398 
18399 __extension__ extern __inline float16x8_t
18400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n_f16_u16(float16x8_t __inactive,uint16x8_t __a,const int __imm6,mve_pred16_t __p)18401 __arm_vcvtq_m_n_f16_u16 (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p)
18402 {
18403   return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__inactive, __a, __imm6, __p);
18404 }
18405 
18406 __extension__ extern __inline float16x8_t
18407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n_f16_s16(float16x8_t __inactive,int16x8_t __a,const int __imm6,mve_pred16_t __p)18408 __arm_vcvtq_m_n_f16_s16 (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p)
18409 {
18410   return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__inactive, __a, __imm6, __p);
18411 }
18412 
18413 __extension__ extern __inline float32x4_t
18414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n_f32_u32(float32x4_t __inactive,uint32x4_t __a,const int __imm6,mve_pred16_t __p)18415 __arm_vcvtq_m_n_f32_u32 (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p)
18416 {
18417   return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__inactive, __a, __imm6, __p);
18418 }
18419 
18420 __extension__ extern __inline float32x4_t
18421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n_f32_s32(float32x4_t __inactive,int32x4_t __a,const int __imm6,mve_pred16_t __p)18422 __arm_vcvtq_m_n_f32_s32 (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p)
18423 {
18424   return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__inactive, __a, __imm6, __p);
18425 }
18426 
18427 __extension__ extern __inline float32x4_t
18428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18429 __arm_vabdq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18430 {
18431   return __builtin_mve_vabdq_m_fv4sf (__inactive, __a, __b, __p);
18432 }
18433 
18434 __extension__ extern __inline float16x8_t
18435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18436 __arm_vabdq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18437 {
18438   return __builtin_mve_vabdq_m_fv8hf (__inactive, __a, __b, __p);
18439 }
18440 
18441 __extension__ extern __inline float32x4_t
18442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18443 __arm_vaddq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18444 {
18445   return __builtin_mve_vaddq_m_fv4sf (__inactive, __a, __b, __p);
18446 }
18447 
18448 __extension__ extern __inline float16x8_t
18449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18450 __arm_vaddq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18451 {
18452   return __builtin_mve_vaddq_m_fv8hf (__inactive, __a, __b, __p);
18453 }
18454 
18455 __extension__ extern __inline float32x4_t
18456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_n_f32(float32x4_t __inactive,float32x4_t __a,float32_t __b,mve_pred16_t __p)18457 __arm_vaddq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
18458 {
18459   return __builtin_mve_vaddq_m_n_fv4sf (__inactive, __a, __b, __p);
18460 }
18461 
18462 __extension__ extern __inline float16x8_t
18463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m_n_f16(float16x8_t __inactive,float16x8_t __a,float16_t __b,mve_pred16_t __p)18464 __arm_vaddq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
18465 {
18466   return __builtin_mve_vaddq_m_n_fv8hf (__inactive, __a, __b, __p);
18467 }
18468 
18469 __extension__ extern __inline float32x4_t
18470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18471 __arm_vandq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18472 {
18473   return __builtin_mve_vandq_m_fv4sf (__inactive, __a, __b, __p);
18474 }
18475 
18476 __extension__ extern __inline float16x8_t
18477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18478 __arm_vandq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18479 {
18480   return __builtin_mve_vandq_m_fv8hf (__inactive, __a, __b, __p);
18481 }
18482 
18483 __extension__ extern __inline float32x4_t
18484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18485 __arm_vbicq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18486 {
18487   return __builtin_mve_vbicq_m_fv4sf (__inactive, __a, __b, __p);
18488 }
18489 
18490 __extension__ extern __inline float16x8_t
18491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18492 __arm_vbicq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18493 {
18494   return __builtin_mve_vbicq_m_fv8hf (__inactive, __a, __b, __p);
18495 }
18496 
18497 __extension__ extern __inline float32x4_t
18498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m_n_f32(float32x4_t __inactive,float32x4_t __a,int32_t __b,mve_pred16_t __p)18499 __arm_vbrsrq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p)
18500 {
18501   return __builtin_mve_vbrsrq_m_n_fv4sf (__inactive, __a, __b, __p);
18502 }
18503 
18504 __extension__ extern __inline float16x8_t
18505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m_n_f16(float16x8_t __inactive,float16x8_t __a,int32_t __b,mve_pred16_t __p)18506 __arm_vbrsrq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p)
18507 {
18508   return __builtin_mve_vbrsrq_m_n_fv8hf (__inactive, __a, __b, __p);
18509 }
18510 
18511 __extension__ extern __inline float32x4_t
18512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18513 __arm_vcaddq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18514 {
18515   return __builtin_mve_vcaddq_rot270_m_fv4sf (__inactive, __a, __b, __p);
18516 }
18517 
18518 __extension__ extern __inline float16x8_t
18519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18520 __arm_vcaddq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18521 {
18522   return __builtin_mve_vcaddq_rot270_m_fv8hf (__inactive, __a, __b, __p);
18523 }
18524 
18525 __extension__ extern __inline float32x4_t
18526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18527 __arm_vcaddq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18528 {
18529   return __builtin_mve_vcaddq_rot90_m_fv4sf (__inactive, __a, __b, __p);
18530 }
18531 
18532 __extension__ extern __inline float16x8_t
18533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18534 __arm_vcaddq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18535 {
18536   return __builtin_mve_vcaddq_rot90_m_fv8hf (__inactive, __a, __b, __p);
18537 }
18538 
18539 __extension__ extern __inline float32x4_t
18540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_m_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)18541 __arm_vcmlaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18542 {
18543   return __builtin_mve_vcmlaq_m_fv4sf (__a, __b, __c, __p);
18544 }
18545 
18546 __extension__ extern __inline float16x8_t
18547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_m_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)18548 __arm_vcmlaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18549 {
18550   return __builtin_mve_vcmlaq_m_fv8hf (__a, __b, __c, __p);
18551 }
18552 
18553 __extension__ extern __inline float32x4_t
18554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180_m_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)18555 __arm_vcmlaq_rot180_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18556 {
18557   return __builtin_mve_vcmlaq_rot180_m_fv4sf (__a, __b, __c, __p);
18558 }
18559 
18560 __extension__ extern __inline float16x8_t
18561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180_m_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)18562 __arm_vcmlaq_rot180_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18563 {
18564   return __builtin_mve_vcmlaq_rot180_m_fv8hf (__a, __b, __c, __p);
18565 }
18566 
18567 __extension__ extern __inline float32x4_t
18568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270_m_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)18569 __arm_vcmlaq_rot270_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18570 {
18571   return __builtin_mve_vcmlaq_rot270_m_fv4sf (__a, __b, __c, __p);
18572 }
18573 
18574 __extension__ extern __inline float16x8_t
18575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270_m_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)18576 __arm_vcmlaq_rot270_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18577 {
18578   return __builtin_mve_vcmlaq_rot270_m_fv8hf (__a, __b, __c, __p);
18579 }
18580 
18581 __extension__ extern __inline float32x4_t
18582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90_m_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)18583 __arm_vcmlaq_rot90_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18584 {
18585   return __builtin_mve_vcmlaq_rot90_m_fv4sf (__a, __b, __c, __p);
18586 }
18587 
18588 __extension__ extern __inline float16x8_t
18589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90_m_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)18590 __arm_vcmlaq_rot90_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18591 {
18592   return __builtin_mve_vcmlaq_rot90_m_fv8hf (__a, __b, __c, __p);
18593 }
18594 
18595 __extension__ extern __inline float32x4_t
18596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18597 __arm_vcmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18598 {
18599   return __builtin_mve_vcmulq_m_fv4sf (__inactive, __a, __b, __p);
18600 }
18601 
18602 __extension__ extern __inline float16x8_t
18603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18604 __arm_vcmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18605 {
18606   return __builtin_mve_vcmulq_m_fv8hf (__inactive, __a, __b, __p);
18607 }
18608 
18609 __extension__ extern __inline float32x4_t
18610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18611 __arm_vcmulq_rot180_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18612 {
18613   return __builtin_mve_vcmulq_rot180_m_fv4sf (__inactive, __a, __b, __p);
18614 }
18615 
18616 __extension__ extern __inline float16x8_t
18617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18618 __arm_vcmulq_rot180_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18619 {
18620   return __builtin_mve_vcmulq_rot180_m_fv8hf (__inactive, __a, __b, __p);
18621 }
18622 
18623 __extension__ extern __inline float32x4_t
18624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18625 __arm_vcmulq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18626 {
18627   return __builtin_mve_vcmulq_rot270_m_fv4sf (__inactive, __a, __b, __p);
18628 }
18629 
18630 __extension__ extern __inline float16x8_t
18631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18632 __arm_vcmulq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18633 {
18634   return __builtin_mve_vcmulq_rot270_m_fv8hf (__inactive, __a, __b, __p);
18635 }
18636 
18637 __extension__ extern __inline float32x4_t
18638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18639 __arm_vcmulq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18640 {
18641   return __builtin_mve_vcmulq_rot90_m_fv4sf (__inactive, __a, __b, __p);
18642 }
18643 
18644 __extension__ extern __inline float16x8_t
18645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18646 __arm_vcmulq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18647 {
18648   return __builtin_mve_vcmulq_rot90_m_fv8hf (__inactive, __a, __b, __p);
18649 }
18650 
18651 __extension__ extern __inline int32x4_t
18652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n_s32_f32(int32x4_t __inactive,float32x4_t __a,const int __imm6,mve_pred16_t __p)18653 __arm_vcvtq_m_n_s32_f32 (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
18654 {
18655   return __builtin_mve_vcvtq_m_n_from_f_sv4si (__inactive, __a, __imm6, __p);
18656 }
18657 
18658 __extension__ extern __inline int16x8_t
18659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n_s16_f16(int16x8_t __inactive,float16x8_t __a,const int __imm6,mve_pred16_t __p)18660 __arm_vcvtq_m_n_s16_f16 (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
18661 {
18662   return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__inactive, __a, __imm6, __p);
18663 }
18664 
18665 __extension__ extern __inline uint32x4_t
18666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n_u32_f32(uint32x4_t __inactive,float32x4_t __a,const int __imm6,mve_pred16_t __p)18667 __arm_vcvtq_m_n_u32_f32 (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
18668 {
18669   return __builtin_mve_vcvtq_m_n_from_f_uv4si (__inactive, __a, __imm6, __p);
18670 }
18671 
18672 __extension__ extern __inline uint16x8_t
18673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n_u16_f16(uint16x8_t __inactive,float16x8_t __a,const int __imm6,mve_pred16_t __p)18674 __arm_vcvtq_m_n_u16_f16 (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
18675 {
18676   return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__inactive, __a, __imm6, __p);
18677 }
18678 
18679 __extension__ extern __inline float32x4_t
18680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18681 __arm_veorq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18682 {
18683   return __builtin_mve_veorq_m_fv4sf (__inactive, __a, __b, __p);
18684 }
18685 
18686 __extension__ extern __inline float16x8_t
18687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18688 __arm_veorq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18689 {
18690   return __builtin_mve_veorq_m_fv8hf (__inactive, __a, __b, __p);
18691 }
18692 
18693 __extension__ extern __inline float32x4_t
18694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_m_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)18695 __arm_vfmaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18696 {
18697   return __builtin_mve_vfmaq_m_fv4sf (__a, __b, __c, __p);
18698 }
18699 
18700 __extension__ extern __inline float16x8_t
18701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_m_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)18702 __arm_vfmaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18703 {
18704   return __builtin_mve_vfmaq_m_fv8hf (__a, __b, __c, __p);
18705 }
18706 
18707 __extension__ extern __inline float32x4_t
18708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_m_n_f32(float32x4_t __a,float32x4_t __b,float32_t __c,mve_pred16_t __p)18709 __arm_vfmaq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
18710 {
18711   return __builtin_mve_vfmaq_m_n_fv4sf (__a, __b, __c, __p);
18712 }
18713 
18714 __extension__ extern __inline float16x8_t
18715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_m_n_f16(float16x8_t __a,float16x8_t __b,float16_t __c,mve_pred16_t __p)18716 __arm_vfmaq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
18717 {
18718   return __builtin_mve_vfmaq_m_n_fv8hf (__a, __b, __c, __p);
18719 }
18720 
18721 __extension__ extern __inline float32x4_t
18722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmasq_m_n_f32(float32x4_t __a,float32x4_t __b,float32_t __c,mve_pred16_t __p)18723 __arm_vfmasq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
18724 {
18725   return __builtin_mve_vfmasq_m_n_fv4sf (__a, __b, __c, __p);
18726 }
18727 
18728 __extension__ extern __inline float16x8_t
18729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmasq_m_n_f16(float16x8_t __a,float16x8_t __b,float16_t __c,mve_pred16_t __p)18730 __arm_vfmasq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
18731 {
18732   return __builtin_mve_vfmasq_m_n_fv8hf (__a, __b, __c, __p);
18733 }
18734 
18735 __extension__ extern __inline float32x4_t
18736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmsq_m_f32(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)18737 __arm_vfmsq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18738 {
18739   return __builtin_mve_vfmsq_m_fv4sf (__a, __b, __c, __p);
18740 }
18741 
18742 __extension__ extern __inline float16x8_t
18743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmsq_m_f16(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)18744 __arm_vfmsq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18745 {
18746   return __builtin_mve_vfmsq_m_fv8hf (__a, __b, __c, __p);
18747 }
18748 
18749 __extension__ extern __inline float32x4_t
18750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18751 __arm_vmaxnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18752 {
18753   return __builtin_mve_vmaxnmq_m_fv4sf (__inactive, __a, __b, __p);
18754 }
18755 
18756 __extension__ extern __inline float16x8_t
18757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18758 __arm_vmaxnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18759 {
18760   return __builtin_mve_vmaxnmq_m_fv8hf (__inactive, __a, __b, __p);
18761 }
18762 
18763 __extension__ extern __inline float32x4_t
18764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18765 __arm_vminnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18766 {
18767   return __builtin_mve_vminnmq_m_fv4sf (__inactive, __a, __b, __p);
18768 }
18769 
18770 __extension__ extern __inline float16x8_t
18771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18772 __arm_vminnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18773 {
18774   return __builtin_mve_vminnmq_m_fv8hf (__inactive, __a, __b, __p);
18775 }
18776 
18777 __extension__ extern __inline float32x4_t
18778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18779 __arm_vmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18780 {
18781   return __builtin_mve_vmulq_m_fv4sf (__inactive, __a, __b, __p);
18782 }
18783 
18784 __extension__ extern __inline float16x8_t
18785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18786 __arm_vmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18787 {
18788   return __builtin_mve_vmulq_m_fv8hf (__inactive, __a, __b, __p);
18789 }
18790 
18791 __extension__ extern __inline float32x4_t
18792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_n_f32(float32x4_t __inactive,float32x4_t __a,float32_t __b,mve_pred16_t __p)18793 __arm_vmulq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
18794 {
18795   return __builtin_mve_vmulq_m_n_fv4sf (__inactive, __a, __b, __p);
18796 }
18797 
18798 __extension__ extern __inline float16x8_t
18799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m_n_f16(float16x8_t __inactive,float16x8_t __a,float16_t __b,mve_pred16_t __p)18800 __arm_vmulq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
18801 {
18802   return __builtin_mve_vmulq_m_n_fv8hf (__inactive, __a, __b, __p);
18803 }
18804 
18805 __extension__ extern __inline float32x4_t
18806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18807 __arm_vornq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18808 {
18809   return __builtin_mve_vornq_m_fv4sf (__inactive, __a, __b, __p);
18810 }
18811 
18812 __extension__ extern __inline float16x8_t
18813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18814 __arm_vornq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18815 {
18816   return __builtin_mve_vornq_m_fv8hf (__inactive, __a, __b, __p);
18817 }
18818 
18819 __extension__ extern __inline float32x4_t
18820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18821 __arm_vorrq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18822 {
18823   return __builtin_mve_vorrq_m_fv4sf (__inactive, __a, __b, __p);
18824 }
18825 
18826 __extension__ extern __inline float16x8_t
18827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18828 __arm_vorrq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18829 {
18830   return __builtin_mve_vorrq_m_fv8hf (__inactive, __a, __b, __p);
18831 }
18832 
18833 __extension__ extern __inline float32x4_t
18834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_f32(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)18835 __arm_vsubq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18836 {
18837   return __builtin_mve_vsubq_m_fv4sf (__inactive, __a, __b, __p);
18838 }
18839 
18840 __extension__ extern __inline float16x8_t
18841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_f16(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)18842 __arm_vsubq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18843 {
18844   return __builtin_mve_vsubq_m_fv8hf (__inactive, __a, __b, __p);
18845 }
18846 
18847 __extension__ extern __inline float32x4_t
18848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_n_f32(float32x4_t __inactive,float32x4_t __a,float32_t __b,mve_pred16_t __p)18849 __arm_vsubq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
18850 {
18851   return __builtin_mve_vsubq_m_n_fv4sf (__inactive, __a, __b, __p);
18852 }
18853 
18854 __extension__ extern __inline float16x8_t
18855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m_n_f16(float16x8_t __inactive,float16x8_t __a,float16_t __b,mve_pred16_t __p)18856 __arm_vsubq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
18857 {
18858   return __builtin_mve_vsubq_m_n_fv8hf (__inactive, __a, __b, __p);
18859 }
18860 
18861 __extension__ extern __inline float32x4_t
18862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_f32(float32_t const * __base)18863 __arm_vld1q_f32 (float32_t const * __base)
18864 {
18865   return __builtin_mve_vld1q_fv4sf((__builtin_neon_si *) __base);
18866 }
18867 
18868 __extension__ extern __inline float16x8_t
18869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_f16(float16_t const * __base)18870 __arm_vld1q_f16 (float16_t const * __base)
18871 {
18872   return __builtin_mve_vld1q_fv8hf((__builtin_neon_hi *) __base);
18873 }
18874 
18875 __extension__ extern __inline float32x4_t
18876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_f32(float32_t const * __base)18877 __arm_vldrwq_f32 (float32_t const * __base)
18878 {
18879   return __builtin_mve_vldrwq_fv4sf((__builtin_neon_si *) __base);
18880 }
18881 
18882 __extension__ extern __inline float32x4_t
18883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_z_f32(float32_t const * __base,mve_pred16_t __p)18884 __arm_vldrwq_z_f32 (float32_t const * __base, mve_pred16_t __p)
18885 {
18886   return __builtin_mve_vldrwq_z_fv4sf((__builtin_neon_si *) __base, __p);
18887 }
18888 
18889 __extension__ extern __inline float16x8_t
18890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_z_f16(float16_t const * __base,mve_pred16_t __p)18891 __arm_vldrhq_z_f16 (float16_t const * __base, mve_pred16_t __p)
18892 {
18893   return __builtin_mve_vldrhq_z_fv8hf((__builtin_neon_hi *) __base, __p);
18894 }
18895 
18896 __extension__ extern __inline float16x8_t
18897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_f16(float16_t const * __base)18898 __arm_vldrhq_f16 (float16_t const * __base)
18899 {
18900   return __builtin_mve_vldrhq_fv8hf((__builtin_neon_hi *) __base);
18901 }
18902 
18903 __extension__ extern __inline float16x8_t
18904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_f16(float16_t const * __base,uint16x8_t __offset)18905 __arm_vldrhq_gather_offset_f16 (float16_t const * __base, uint16x8_t __offset)
18906 {
18907   return __builtin_mve_vldrhq_gather_offset_fv8hf((__builtin_neon_hi *) __base, __offset);
18908 }
18909 
18910 __extension__ extern __inline float16x8_t
18911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z_f16(float16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)18912 __arm_vldrhq_gather_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
18913 {
18914   return __builtin_mve_vldrhq_gather_offset_z_fv8hf((__builtin_neon_hi *) __base, __offset, __p);
18915 }
18916 
18917 __extension__ extern __inline float16x8_t
18918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_f16(float16_t const * __base,uint16x8_t __offset)18919 __arm_vldrhq_gather_shifted_offset_f16 (float16_t const * __base, uint16x8_t __offset)
18920 {
18921   return __builtin_mve_vldrhq_gather_shifted_offset_fv8hf ((__builtin_neon_hi *) __base, __offset);
18922 }
18923 
18924 __extension__ extern __inline float16x8_t
18925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z_f16(float16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)18926 __arm_vldrhq_gather_shifted_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
18927 {
18928   return __builtin_mve_vldrhq_gather_shifted_offset_z_fv8hf ((__builtin_neon_hi *) __base, __offset, __p);
18929 }
18930 
18931 __extension__ extern __inline float32x4_t
18932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_f32(uint32x4_t __addr,const int __offset)18933 __arm_vldrwq_gather_base_f32 (uint32x4_t __addr, const int __offset)
18934 {
18935   return __builtin_mve_vldrwq_gather_base_fv4sf (__addr, __offset);
18936 }
18937 
18938 __extension__ extern __inline float32x4_t
18939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_z_f32(uint32x4_t __addr,const int __offset,mve_pred16_t __p)18940 __arm_vldrwq_gather_base_z_f32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
18941 {
18942   return __builtin_mve_vldrwq_gather_base_z_fv4sf (__addr, __offset, __p);
18943 }
18944 
18945 __extension__ extern __inline float32x4_t
18946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset_f32(float32_t const * __base,uint32x4_t __offset)18947 __arm_vldrwq_gather_offset_f32 (float32_t const * __base, uint32x4_t __offset)
18948 {
18949   return __builtin_mve_vldrwq_gather_offset_fv4sf((__builtin_neon_si *) __base, __offset);
18950 }
18951 
18952 __extension__ extern __inline float32x4_t
18953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset_z_f32(float32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)18954 __arm_vldrwq_gather_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
18955 {
18956   return __builtin_mve_vldrwq_gather_offset_z_fv4sf((__builtin_neon_si *) __base, __offset, __p);
18957 }
18958 
18959 __extension__ extern __inline float32x4_t
18960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset_f32(float32_t const * __base,uint32x4_t __offset)18961 __arm_vldrwq_gather_shifted_offset_f32 (float32_t const * __base, uint32x4_t __offset)
18962 {
18963   return __builtin_mve_vldrwq_gather_shifted_offset_fv4sf ((__builtin_neon_si *) __base, __offset);
18964 }
18965 
18966 __extension__ extern __inline float32x4_t
18967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset_z_f32(float32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)18968 __arm_vldrwq_gather_shifted_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
18969 {
18970   return __builtin_mve_vldrwq_gather_shifted_offset_z_fv4sf ((__builtin_neon_si *) __base, __offset, __p);
18971 }
18972 
18973 __extension__ extern __inline void
18974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_p_f32(float32_t * __addr,float32x4_t __value,mve_pred16_t __p)18975 __arm_vstrwq_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
18976 {
18977   __builtin_mve_vstrwq_p_fv4sf ((__builtin_neon_si *) __addr, __value, __p);
18978 }
18979 
18980 __extension__ extern __inline void
18981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_f32(float32_t * __addr,float32x4_t __value)18982 __arm_vstrwq_f32 (float32_t * __addr, float32x4_t __value)
18983 {
18984   __builtin_mve_vstrwq_fv4sf ((__builtin_neon_si *) __addr, __value);
18985 }
18986 
18987 __extension__ extern __inline void
18988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_f32(float32_t * __addr,float32x4_t __value)18989 __arm_vst1q_f32 (float32_t * __addr, float32x4_t __value)
18990 {
18991   __builtin_mve_vst1q_fv4sf ((__builtin_neon_si *) __addr, __value);
18992 }
18993 
18994 __extension__ extern __inline void
18995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_f16(float16_t * __addr,float16x8_t __value)18996 __arm_vst1q_f16 (float16_t * __addr, float16x8_t __value)
18997 {
18998   __builtin_mve_vst1q_fv8hf ((__builtin_neon_hi *) __addr, __value);
18999 }
19000 
19001 __extension__ extern __inline void
19002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_f16(float16_t * __addr,float16x8_t __value)19003 __arm_vstrhq_f16 (float16_t * __addr, float16x8_t __value)
19004 {
19005   __builtin_mve_vstrhq_fv8hf ((__builtin_neon_hi *) __addr, __value);
19006 }
19007 
19008 __extension__ extern __inline void
19009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p_f16(float16_t * __addr,float16x8_t __value,mve_pred16_t __p)19010 __arm_vstrhq_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
19011 {
19012   __builtin_mve_vstrhq_p_fv8hf ((__builtin_neon_hi *) __addr, __value, __p);
19013 }
19014 
19015 __extension__ extern __inline void
19016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_f16(float16_t * __base,uint16x8_t __offset,float16x8_t __value)19017 __arm_vstrhq_scatter_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
19018 {
19019   __builtin_mve_vstrhq_scatter_offset_fv8hf ((__builtin_neon_hi *) __base, __offset, __value);
19020 }
19021 
19022 __extension__ extern __inline void
19023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p_f16(float16_t * __base,uint16x8_t __offset,float16x8_t __value,mve_pred16_t __p)19024 __arm_vstrhq_scatter_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
19025 {
19026   __builtin_mve_vstrhq_scatter_offset_p_fv8hf ((__builtin_neon_hi *) __base, __offset, __value, __p);
19027 }
19028 
19029 __extension__ extern __inline void
19030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_f16(float16_t * __base,uint16x8_t __offset,float16x8_t __value)19031 __arm_vstrhq_scatter_shifted_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
19032 {
19033   __builtin_mve_vstrhq_scatter_shifted_offset_fv8hf ((__builtin_neon_hi *) __base, __offset, __value);
19034 }
19035 
19036 __extension__ extern __inline void
19037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p_f16(float16_t * __base,uint16x8_t __offset,float16x8_t __value,mve_pred16_t __p)19038 __arm_vstrhq_scatter_shifted_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
19039 {
19040   __builtin_mve_vstrhq_scatter_shifted_offset_p_fv8hf ((__builtin_neon_hi *) __base, __offset, __value, __p);
19041 }
19042 
19043 __extension__ extern __inline void
19044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_f32(uint32x4_t __addr,const int __offset,float32x4_t __value)19045 __arm_vstrwq_scatter_base_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value)
19046 {
19047   __builtin_mve_vstrwq_scatter_base_fv4sf (__addr, __offset, __value);
19048 }
19049 
19050 __extension__ extern __inline void
19051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_p_f32(uint32x4_t __addr,const int __offset,float32x4_t __value,mve_pred16_t __p)19052 __arm_vstrwq_scatter_base_p_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
19053 {
19054   __builtin_mve_vstrwq_scatter_base_p_fv4sf (__addr, __offset, __value, __p);
19055 }
19056 
19057 __extension__ extern __inline void
19058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset_f32(float32_t * __base,uint32x4_t __offset,float32x4_t __value)19059 __arm_vstrwq_scatter_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
19060 {
19061   __builtin_mve_vstrwq_scatter_offset_fv4sf ((__builtin_neon_si *) __base, __offset, __value);
19062 }
19063 
19064 __extension__ extern __inline void
19065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset_p_f32(float32_t * __base,uint32x4_t __offset,float32x4_t __value,mve_pred16_t __p)19066 __arm_vstrwq_scatter_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
19067 {
19068   __builtin_mve_vstrwq_scatter_offset_p_fv4sf ((__builtin_neon_si *) __base, __offset, __value, __p);
19069 }
19070 
19071 __extension__ extern __inline void
19072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset_f32(float32_t * __base,uint32x4_t __offset,float32x4_t __value)19073 __arm_vstrwq_scatter_shifted_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
19074 {
19075   __builtin_mve_vstrwq_scatter_shifted_offset_fv4sf ((__builtin_neon_si *) __base, __offset, __value);
19076 }
19077 
19078 __extension__ extern __inline void
19079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset_p_f32(float32_t * __base,uint32x4_t __offset,float32x4_t __value,mve_pred16_t __p)19080 __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
19081 {
19082   __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf ((__builtin_neon_si *) __base, __offset, __value, __p);
19083 }
19084 
19085 __extension__ extern __inline float16x8_t
19086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_f16(float16x8_t __a,float16x8_t __b)19087 __arm_vaddq_f16 (float16x8_t __a, float16x8_t __b)
19088 {
19089   return __a + __b;
19090 }
19091 
19092 __extension__ extern __inline float32x4_t
19093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_f32(float32x4_t __a,float32x4_t __b)19094 __arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
19095 {
19096   return __a + __b;
19097 }
19098 
19099 __extension__ extern __inline float32x4_t
19100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_wb_f32(uint32x4_t * __addr,const int __offset)19101 __arm_vldrwq_gather_base_wb_f32 (uint32x4_t * __addr, const int __offset)
19102 {
19103   float32x4_t
19104   result = __builtin_mve_vldrwq_gather_base_nowb_fv4sf (*__addr, __offset);
19105   *__addr = __builtin_mve_vldrwq_gather_base_wb_fv4sf (*__addr, __offset);
19106   return result;
19107 }
19108 
19109 __extension__ extern __inline float32x4_t
19110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_wb_z_f32(uint32x4_t * __addr,const int __offset,mve_pred16_t __p)19111 __arm_vldrwq_gather_base_wb_z_f32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p)
19112 {
19113   float32x4_t
19114   result = __builtin_mve_vldrwq_gather_base_nowb_z_fv4sf (*__addr, __offset, __p);
19115   *__addr = __builtin_mve_vldrwq_gather_base_wb_z_fv4sf (*__addr, __offset, __p);
19116   return result;
19117 }
19118 
19119 __extension__ extern __inline void
19120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb_f32(uint32x4_t * __addr,const int __offset,float32x4_t __value)19121 __arm_vstrwq_scatter_base_wb_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value)
19122 {
19123   *__addr = __builtin_mve_vstrwq_scatter_base_wb_fv4sf (*__addr, __offset, __value);
19124 }
19125 
19126 __extension__ extern __inline void
19127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t * __addr,const int __offset,float32x4_t __value,mve_pred16_t __p)19128 __arm_vstrwq_scatter_base_wb_p_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
19129 {
19130   *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_fv4sf (*__addr, __offset, __value, __p);
19131 }
19132 
19133 __extension__ extern __inline float16x8_t
19134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_x_n_f16(float16_t __a,mve_pred16_t __p)19135 __arm_vdupq_x_n_f16 (float16_t __a, mve_pred16_t __p)
19136 {
19137   return __builtin_mve_vdupq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19138 }
19139 
19140 __extension__ extern __inline float32x4_t
19141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_x_n_f32(float32_t __a,mve_pred16_t __p)19142 __arm_vdupq_x_n_f32 (float32_t __a, mve_pred16_t __p)
19143 {
19144   return __builtin_mve_vdupq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19145 }
19146 
19147 __extension__ extern __inline float16x8_t
19148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19149 __arm_vminnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19150 {
19151   return __builtin_mve_vminnmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19152 }
19153 
19154 __extension__ extern __inline float32x4_t
19155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19156 __arm_vminnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19157 {
19158   return __builtin_mve_vminnmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19159 }
19160 
19161 __extension__ extern __inline float16x8_t
19162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19163 __arm_vmaxnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19164 {
19165   return __builtin_mve_vmaxnmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19166 }
19167 
19168 __extension__ extern __inline float32x4_t
19169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19170 __arm_vmaxnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19171 {
19172   return __builtin_mve_vmaxnmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19173 }
19174 
19175 __extension__ extern __inline float16x8_t
19176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19177 __arm_vabdq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19178 {
19179   return __builtin_mve_vabdq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19180 }
19181 
19182 __extension__ extern __inline float32x4_t
19183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19184 __arm_vabdq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19185 {
19186   return __builtin_mve_vabdq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19187 }
19188 
19189 __extension__ extern __inline float16x8_t
19190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x_f16(float16x8_t __a,mve_pred16_t __p)19191 __arm_vabsq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19192 {
19193   return __builtin_mve_vabsq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19194 }
19195 
19196 __extension__ extern __inline float32x4_t
19197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x_f32(float32x4_t __a,mve_pred16_t __p)19198 __arm_vabsq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19199 {
19200   return __builtin_mve_vabsq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19201 }
19202 
19203 __extension__ extern __inline float16x8_t
19204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19205 __arm_vaddq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19206 {
19207   return __builtin_mve_vaddq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19208 }
19209 
19210 __extension__ extern __inline float32x4_t
19211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19212 __arm_vaddq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19213 {
19214   return __builtin_mve_vaddq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19215 }
19216 
19217 __extension__ extern __inline float16x8_t
19218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_n_f16(float16x8_t __a,float16_t __b,mve_pred16_t __p)19219 __arm_vaddq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
19220 {
19221   return __builtin_mve_vaddq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19222 }
19223 
19224 __extension__ extern __inline float32x4_t
19225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x_n_f32(float32x4_t __a,float32_t __b,mve_pred16_t __p)19226 __arm_vaddq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
19227 {
19228   return __builtin_mve_vaddq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19229 }
19230 
19231 __extension__ extern __inline float16x8_t
19232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x_f16(float16x8_t __a,mve_pred16_t __p)19233 __arm_vnegq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19234 {
19235   return __builtin_mve_vnegq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19236 }
19237 
19238 __extension__ extern __inline float32x4_t
19239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x_f32(float32x4_t __a,mve_pred16_t __p)19240 __arm_vnegq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19241 {
19242   return __builtin_mve_vnegq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19243 }
19244 
19245 __extension__ extern __inline float16x8_t
19246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19247 __arm_vmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19248 {
19249   return __builtin_mve_vmulq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19250 }
19251 
19252 __extension__ extern __inline float32x4_t
19253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19254 __arm_vmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19255 {
19256   return __builtin_mve_vmulq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19257 }
19258 
19259 __extension__ extern __inline float16x8_t
19260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_n_f16(float16x8_t __a,float16_t __b,mve_pred16_t __p)19261 __arm_vmulq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
19262 {
19263   return __builtin_mve_vmulq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19264 }
19265 
19266 __extension__ extern __inline float32x4_t
19267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x_n_f32(float32x4_t __a,float32_t __b,mve_pred16_t __p)19268 __arm_vmulq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
19269 {
19270   return __builtin_mve_vmulq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19271 }
19272 
19273 __extension__ extern __inline float16x8_t
19274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19275 __arm_vsubq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19276 {
19277   return __builtin_mve_vsubq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19278 }
19279 
19280 __extension__ extern __inline float32x4_t
19281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19282 __arm_vsubq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19283 {
19284   return __builtin_mve_vsubq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19285 }
19286 
19287 __extension__ extern __inline float16x8_t
19288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_n_f16(float16x8_t __a,float16_t __b,mve_pred16_t __p)19289 __arm_vsubq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
19290 {
19291   return __builtin_mve_vsubq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19292 }
19293 
19294 __extension__ extern __inline float32x4_t
19295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x_n_f32(float32x4_t __a,float32_t __b,mve_pred16_t __p)19296 __arm_vsubq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
19297 {
19298   return __builtin_mve_vsubq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19299 }
19300 
19301 __extension__ extern __inline float16x8_t
19302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19303 __arm_vcaddq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19304 {
19305   return __builtin_mve_vcaddq_rot90_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19306 }
19307 
19308 __extension__ extern __inline float32x4_t
19309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19310 __arm_vcaddq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19311 {
19312   return __builtin_mve_vcaddq_rot90_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19313 }
19314 
19315 __extension__ extern __inline float16x8_t
19316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19317 __arm_vcaddq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19318 {
19319   return __builtin_mve_vcaddq_rot270_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19320 }
19321 
19322 __extension__ extern __inline float32x4_t
19323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19324 __arm_vcaddq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19325 {
19326   return __builtin_mve_vcaddq_rot270_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19327 }
19328 
19329 __extension__ extern __inline float16x8_t
19330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19331 __arm_vcmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19332 {
19333   return __builtin_mve_vcmulq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19334 }
19335 
19336 __extension__ extern __inline float32x4_t
19337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19338 __arm_vcmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19339 {
19340   return __builtin_mve_vcmulq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19341 }
19342 
19343 __extension__ extern __inline float16x8_t
19344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19345 __arm_vcmulq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19346 {
19347   return __builtin_mve_vcmulq_rot90_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19348 }
19349 
19350 __extension__ extern __inline float32x4_t
19351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19352 __arm_vcmulq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19353 {
19354   return __builtin_mve_vcmulq_rot90_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19355 }
19356 
19357 __extension__ extern __inline float16x8_t
19358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19359 __arm_vcmulq_rot180_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19360 {
19361   return __builtin_mve_vcmulq_rot180_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19362 }
19363 
19364 __extension__ extern __inline float32x4_t
19365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19366 __arm_vcmulq_rot180_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19367 {
19368   return __builtin_mve_vcmulq_rot180_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19369 }
19370 
19371 __extension__ extern __inline float16x8_t
19372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19373 __arm_vcmulq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19374 {
19375   return __builtin_mve_vcmulq_rot270_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19376 }
19377 
19378 __extension__ extern __inline float32x4_t
19379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19380 __arm_vcmulq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19381 {
19382   return __builtin_mve_vcmulq_rot270_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19383 }
19384 
19385 __extension__ extern __inline int16x8_t
19386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_x_s16_f16(float16x8_t __a,mve_pred16_t __p)19387 __arm_vcvtaq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19388 {
19389   return __builtin_mve_vcvtaq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19390 }
19391 
19392 __extension__ extern __inline int32x4_t
19393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_x_s32_f32(float32x4_t __a,mve_pred16_t __p)19394 __arm_vcvtaq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19395 {
19396   return __builtin_mve_vcvtaq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19397 }
19398 
19399 __extension__ extern __inline uint16x8_t
19400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_x_u16_f16(float16x8_t __a,mve_pred16_t __p)19401 __arm_vcvtaq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19402 {
19403   return __builtin_mve_vcvtaq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19404 }
19405 
19406 __extension__ extern __inline uint32x4_t
19407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_x_u32_f32(float32x4_t __a,mve_pred16_t __p)19408 __arm_vcvtaq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19409 {
19410   return __builtin_mve_vcvtaq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19411 }
19412 
19413 __extension__ extern __inline int16x8_t
19414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_x_s16_f16(float16x8_t __a,mve_pred16_t __p)19415 __arm_vcvtnq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19416 {
19417   return __builtin_mve_vcvtnq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19418 }
19419 
19420 __extension__ extern __inline int32x4_t
19421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_x_s32_f32(float32x4_t __a,mve_pred16_t __p)19422 __arm_vcvtnq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19423 {
19424   return __builtin_mve_vcvtnq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19425 }
19426 
19427 __extension__ extern __inline uint16x8_t
19428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_x_u16_f16(float16x8_t __a,mve_pred16_t __p)19429 __arm_vcvtnq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19430 {
19431   return __builtin_mve_vcvtnq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19432 }
19433 
19434 __extension__ extern __inline uint32x4_t
19435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_x_u32_f32(float32x4_t __a,mve_pred16_t __p)19436 __arm_vcvtnq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19437 {
19438   return __builtin_mve_vcvtnq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19439 }
19440 
19441 __extension__ extern __inline int16x8_t
19442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_x_s16_f16(float16x8_t __a,mve_pred16_t __p)19443 __arm_vcvtpq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19444 {
19445   return __builtin_mve_vcvtpq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19446 }
19447 
19448 __extension__ extern __inline int32x4_t
19449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_x_s32_f32(float32x4_t __a,mve_pred16_t __p)19450 __arm_vcvtpq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19451 {
19452   return __builtin_mve_vcvtpq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19453 }
19454 
19455 __extension__ extern __inline uint16x8_t
19456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_x_u16_f16(float16x8_t __a,mve_pred16_t __p)19457 __arm_vcvtpq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19458 {
19459   return __builtin_mve_vcvtpq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19460 }
19461 
19462 __extension__ extern __inline uint32x4_t
19463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_x_u32_f32(float32x4_t __a,mve_pred16_t __p)19464 __arm_vcvtpq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19465 {
19466   return __builtin_mve_vcvtpq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19467 }
19468 
19469 __extension__ extern __inline int16x8_t
19470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_x_s16_f16(float16x8_t __a,mve_pred16_t __p)19471 __arm_vcvtmq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19472 {
19473   return __builtin_mve_vcvtmq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19474 }
19475 
19476 __extension__ extern __inline int32x4_t
19477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_x_s32_f32(float32x4_t __a,mve_pred16_t __p)19478 __arm_vcvtmq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19479 {
19480   return __builtin_mve_vcvtmq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19481 }
19482 
19483 __extension__ extern __inline uint16x8_t
19484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_x_u16_f16(float16x8_t __a,mve_pred16_t __p)19485 __arm_vcvtmq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19486 {
19487   return __builtin_mve_vcvtmq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19488 }
19489 
19490 __extension__ extern __inline uint32x4_t
19491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_x_u32_f32(float32x4_t __a,mve_pred16_t __p)19492 __arm_vcvtmq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19493 {
19494   return __builtin_mve_vcvtmq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19495 }
19496 
19497 __extension__ extern __inline float32x4_t
19498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtbq_x_f32_f16(float16x8_t __a,mve_pred16_t __p)19499 __arm_vcvtbq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p)
19500 {
19501   return __builtin_mve_vcvtbq_m_f32_f16v4sf (__arm_vuninitializedq_f32 (), __a, __p);
19502 }
19503 
19504 __extension__ extern __inline float32x4_t
19505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvttq_x_f32_f16(float16x8_t __a,mve_pred16_t __p)19506 __arm_vcvttq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p)
19507 {
19508   return __builtin_mve_vcvttq_m_f32_f16v4sf (__arm_vuninitializedq_f32 (), __a, __p);
19509 }
19510 
19511 __extension__ extern __inline float16x8_t
19512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_f16_u16(uint16x8_t __a,mve_pred16_t __p)19513 __arm_vcvtq_x_f16_u16 (uint16x8_t __a, mve_pred16_t __p)
19514 {
19515   return __builtin_mve_vcvtq_m_to_f_uv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19516 }
19517 
19518 __extension__ extern __inline float16x8_t
19519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_f16_s16(int16x8_t __a,mve_pred16_t __p)19520 __arm_vcvtq_x_f16_s16 (int16x8_t __a, mve_pred16_t __p)
19521 {
19522   return __builtin_mve_vcvtq_m_to_f_sv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19523 }
19524 
19525 __extension__ extern __inline float32x4_t
19526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_f32_s32(int32x4_t __a,mve_pred16_t __p)19527 __arm_vcvtq_x_f32_s32 (int32x4_t __a, mve_pred16_t __p)
19528 {
19529   return __builtin_mve_vcvtq_m_to_f_sv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19530 }
19531 
19532 __extension__ extern __inline float32x4_t
19533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_f32_u32(uint32x4_t __a,mve_pred16_t __p)19534 __arm_vcvtq_x_f32_u32 (uint32x4_t __a, mve_pred16_t __p)
19535 {
19536   return __builtin_mve_vcvtq_m_to_f_uv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19537 }
19538 
19539 __extension__ extern __inline float16x8_t
19540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n_f16_s16(int16x8_t __a,const int __imm6,mve_pred16_t __p)19541 __arm_vcvtq_x_n_f16_s16 (int16x8_t __a, const int __imm6, mve_pred16_t __p)
19542 {
19543   return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__arm_vuninitializedq_f16 (), __a, __imm6, __p);
19544 }
19545 
19546 __extension__ extern __inline float16x8_t
19547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n_f16_u16(uint16x8_t __a,const int __imm6,mve_pred16_t __p)19548 __arm_vcvtq_x_n_f16_u16 (uint16x8_t __a, const int __imm6, mve_pred16_t __p)
19549 {
19550   return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__arm_vuninitializedq_f16 (), __a, __imm6, __p);
19551 }
19552 
19553 __extension__ extern __inline float32x4_t
19554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n_f32_s32(int32x4_t __a,const int __imm6,mve_pred16_t __p)19555 __arm_vcvtq_x_n_f32_s32 (int32x4_t __a, const int __imm6, mve_pred16_t __p)
19556 {
19557   return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__arm_vuninitializedq_f32 (), __a, __imm6, __p);
19558 }
19559 
19560 __extension__ extern __inline float32x4_t
19561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n_f32_u32(uint32x4_t __a,const int __imm6,mve_pred16_t __p)19562 __arm_vcvtq_x_n_f32_u32 (uint32x4_t __a, const int __imm6, mve_pred16_t __p)
19563 {
19564   return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__arm_vuninitializedq_f32 (), __a, __imm6, __p);
19565 }
19566 
19567 __extension__ extern __inline int16x8_t
19568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_s16_f16(float16x8_t __a,mve_pred16_t __p)19569 __arm_vcvtq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19570 {
19571   return __builtin_mve_vcvtq_m_from_f_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19572 }
19573 
19574 __extension__ extern __inline int32x4_t
19575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_s32_f32(float32x4_t __a,mve_pred16_t __p)19576 __arm_vcvtq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19577 {
19578   return __builtin_mve_vcvtq_m_from_f_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19579 }
19580 
19581 __extension__ extern __inline uint16x8_t
19582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_u16_f16(float16x8_t __a,mve_pred16_t __p)19583 __arm_vcvtq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19584 {
19585   return __builtin_mve_vcvtq_m_from_f_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19586 }
19587 
19588 __extension__ extern __inline uint32x4_t
19589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_u32_f32(float32x4_t __a,mve_pred16_t __p)19590 __arm_vcvtq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19591 {
19592   return __builtin_mve_vcvtq_m_from_f_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19593 }
19594 
19595 __extension__ extern __inline int16x8_t
19596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n_s16_f16(float16x8_t __a,const int __imm6,mve_pred16_t __p)19597 __arm_vcvtq_x_n_s16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p)
19598 {
19599   return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm6, __p);
19600 }
19601 
19602 __extension__ extern __inline int32x4_t
19603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n_s32_f32(float32x4_t __a,const int __imm6,mve_pred16_t __p)19604 __arm_vcvtq_x_n_s32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p)
19605 {
19606   return __builtin_mve_vcvtq_m_n_from_f_sv4si (__arm_vuninitializedq_s32 (), __a, __imm6, __p);
19607 }
19608 
19609 __extension__ extern __inline uint16x8_t
19610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n_u16_f16(float16x8_t __a,const int __imm6,mve_pred16_t __p)19611 __arm_vcvtq_x_n_u16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p)
19612 {
19613   return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm6, __p);
19614 }
19615 
19616 __extension__ extern __inline uint32x4_t
19617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n_u32_f32(float32x4_t __a,const int __imm6,mve_pred16_t __p)19618 __arm_vcvtq_x_n_u32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p)
19619 {
19620   return __builtin_mve_vcvtq_m_n_from_f_uv4si (__arm_vuninitializedq_u32 (), __a, __imm6, __p);
19621 }
19622 
19623 __extension__ extern __inline float16x8_t
19624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_x_f16(float16x8_t __a,mve_pred16_t __p)19625 __arm_vrndq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19626 {
19627   return __builtin_mve_vrndq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19628 }
19629 
19630 __extension__ extern __inline float32x4_t
19631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_x_f32(float32x4_t __a,mve_pred16_t __p)19632 __arm_vrndq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19633 {
19634   return __builtin_mve_vrndq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19635 }
19636 
19637 __extension__ extern __inline float16x8_t
19638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_x_f16(float16x8_t __a,mve_pred16_t __p)19639 __arm_vrndnq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19640 {
19641   return __builtin_mve_vrndnq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19642 }
19643 
19644 __extension__ extern __inline float32x4_t
19645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_x_f32(float32x4_t __a,mve_pred16_t __p)19646 __arm_vrndnq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19647 {
19648   return __builtin_mve_vrndnq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19649 }
19650 
19651 __extension__ extern __inline float16x8_t
19652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_x_f16(float16x8_t __a,mve_pred16_t __p)19653 __arm_vrndmq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19654 {
19655   return __builtin_mve_vrndmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19656 }
19657 
19658 __extension__ extern __inline float32x4_t
19659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_x_f32(float32x4_t __a,mve_pred16_t __p)19660 __arm_vrndmq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19661 {
19662   return __builtin_mve_vrndmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19663 }
19664 
19665 __extension__ extern __inline float16x8_t
19666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_x_f16(float16x8_t __a,mve_pred16_t __p)19667 __arm_vrndpq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19668 {
19669   return __builtin_mve_vrndpq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19670 }
19671 
19672 __extension__ extern __inline float32x4_t
19673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_x_f32(float32x4_t __a,mve_pred16_t __p)19674 __arm_vrndpq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19675 {
19676   return __builtin_mve_vrndpq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19677 }
19678 
19679 __extension__ extern __inline float16x8_t
19680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_x_f16(float16x8_t __a,mve_pred16_t __p)19681 __arm_vrndaq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19682 {
19683   return __builtin_mve_vrndaq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19684 }
19685 
19686 __extension__ extern __inline float32x4_t
19687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_x_f32(float32x4_t __a,mve_pred16_t __p)19688 __arm_vrndaq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19689 {
19690   return __builtin_mve_vrndaq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19691 }
19692 
19693 __extension__ extern __inline float16x8_t
19694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_x_f16(float16x8_t __a,mve_pred16_t __p)19695 __arm_vrndxq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19696 {
19697   return __builtin_mve_vrndxq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19698 }
19699 
19700 __extension__ extern __inline float32x4_t
19701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_x_f32(float32x4_t __a,mve_pred16_t __p)19702 __arm_vrndxq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19703 {
19704   return __builtin_mve_vrndxq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19705 }
19706 
19707 __extension__ extern __inline float16x8_t
19708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19709 __arm_vandq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19710 {
19711   return __builtin_mve_vandq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19712 }
19713 
19714 __extension__ extern __inline float32x4_t
19715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19716 __arm_vandq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19717 {
19718   return __builtin_mve_vandq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19719 }
19720 
19721 __extension__ extern __inline float16x8_t
19722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19723 __arm_vbicq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19724 {
19725   return __builtin_mve_vbicq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19726 }
19727 
19728 __extension__ extern __inline float32x4_t
19729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19730 __arm_vbicq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19731 {
19732   return __builtin_mve_vbicq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19733 }
19734 
19735 __extension__ extern __inline float16x8_t
19736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x_n_f16(float16x8_t __a,int32_t __b,mve_pred16_t __p)19737 __arm_vbrsrq_x_n_f16 (float16x8_t __a, int32_t __b, mve_pred16_t __p)
19738 {
19739   return __builtin_mve_vbrsrq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19740 }
19741 
19742 __extension__ extern __inline float32x4_t
19743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x_n_f32(float32x4_t __a,int32_t __b,mve_pred16_t __p)19744 __arm_vbrsrq_x_n_f32 (float32x4_t __a, int32_t __b, mve_pred16_t __p)
19745 {
19746   return __builtin_mve_vbrsrq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19747 }
19748 
19749 __extension__ extern __inline float16x8_t
19750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19751 __arm_veorq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19752 {
19753   return __builtin_mve_veorq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19754 }
19755 
19756 __extension__ extern __inline float32x4_t
19757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19758 __arm_veorq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19759 {
19760   return __builtin_mve_veorq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19761 }
19762 
19763 __extension__ extern __inline float16x8_t
19764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19765 __arm_vornq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19766 {
19767   return __builtin_mve_vornq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19768 }
19769 
19770 __extension__ extern __inline float32x4_t
19771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19772 __arm_vornq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19773 {
19774   return __builtin_mve_vornq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19775 }
19776 
19777 __extension__ extern __inline float16x8_t
19778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x_f16(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)19779 __arm_vorrq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19780 {
19781   return __builtin_mve_vorrq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19782 }
19783 
19784 __extension__ extern __inline float32x4_t
19785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x_f32(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)19786 __arm_vorrq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19787 {
19788   return __builtin_mve_vorrq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19789 }
19790 
19791 __extension__ extern __inline float16x8_t
19792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x_f16(float16x8_t __a,mve_pred16_t __p)19793 __arm_vrev32q_x_f16 (float16x8_t __a, mve_pred16_t __p)
19794 {
19795   return __builtin_mve_vrev32q_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19796 }
19797 
19798 __extension__ extern __inline float16x8_t
19799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x_f16(float16x8_t __a,mve_pred16_t __p)19800 __arm_vrev64q_x_f16 (float16x8_t __a, mve_pred16_t __p)
19801 {
19802   return __builtin_mve_vrev64q_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19803 }
19804 
19805 __extension__ extern __inline float32x4_t
19806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x_f32(float32x4_t __a,mve_pred16_t __p)19807 __arm_vrev64q_x_f32 (float32x4_t __a, mve_pred16_t __p)
19808 {
19809   return __builtin_mve_vrev64q_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19810 }
19811 
19812 __extension__ extern __inline float16x8x4_t
19813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q_f16(float16_t const * __addr)19814 __arm_vld4q_f16 (float16_t const * __addr)
19815 {
19816   union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv;
19817   __rv.__o = __builtin_mve_vld4qv8hf (__addr);
19818   return __rv.__i;
19819 }
19820 
19821 __extension__ extern __inline float16x8x2_t
19822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q_f16(float16_t const * __addr)19823 __arm_vld2q_f16 (float16_t const * __addr)
19824 {
19825   union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv;
19826   __rv.__o = __builtin_mve_vld2qv8hf (__addr);
19827   return __rv.__i;
19828 }
19829 
19830 __extension__ extern __inline float16x8_t
19831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z_f16(float16_t const * __base,mve_pred16_t __p)19832 __arm_vld1q_z_f16 (float16_t const *__base, mve_pred16_t __p)
19833 {
19834   return vldrhq_z_f16 (__base, __p);
19835 }
19836 
19837 __extension__ extern __inline void
19838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q_f16(float16_t * __addr,float16x8x2_t __value)19839 __arm_vst2q_f16 (float16_t * __addr, float16x8x2_t __value)
19840 {
19841   union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv;
19842   __rv.__i = __value;
19843   __builtin_mve_vst2qv8hf (__addr, __rv.__o);
19844 }
19845 
19846 __extension__ extern __inline void
19847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p_f16(float16_t * __addr,float16x8_t __value,mve_pred16_t __p)19848 __arm_vst1q_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
19849 {
19850   return vstrhq_p_f16 (__addr, __value, __p);
19851 }
19852 
19853 __extension__ extern __inline float32x4x4_t
19854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q_f32(float32_t const * __addr)19855 __arm_vld4q_f32 (float32_t const * __addr)
19856 {
19857   union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
19858   __rv.__o = __builtin_mve_vld4qv4sf (__addr);
19859   return __rv.__i;
19860 }
19861 
19862 __extension__ extern __inline float32x4x2_t
19863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q_f32(float32_t const * __addr)19864 __arm_vld2q_f32 (float32_t const * __addr)
19865 {
19866   union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
19867   __rv.__o = __builtin_mve_vld2qv4sf (__addr);
19868   return __rv.__i;
19869 }
19870 
19871 __extension__ extern __inline float32x4_t
19872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z_f32(float32_t const * __base,mve_pred16_t __p)19873 __arm_vld1q_z_f32 (float32_t const *__base, mve_pred16_t __p)
19874 {
19875   return vldrwq_z_f32 (__base, __p);
19876 }
19877 
19878 __extension__ extern __inline void
19879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q_f32(float32_t * __addr,float32x4x2_t __value)19880 __arm_vst2q_f32 (float32_t * __addr, float32x4x2_t __value)
19881 {
19882   union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
19883   __rv.__i = __value;
19884   __builtin_mve_vst2qv4sf (__addr, __rv.__o);
19885 }
19886 
19887 __extension__ extern __inline void
19888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p_f32(float32_t * __addr,float32x4_t __value,mve_pred16_t __p)19889 __arm_vst1q_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
19890 {
19891   return vstrwq_p_f32 (__addr, __value, __p);
19892 }
19893 
19894 __extension__ extern __inline float16x8_t
19895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_f16(float16_t __a,float16x8_t __b,const int __idx)19896 __arm_vsetq_lane_f16 (float16_t __a, float16x8_t __b, const int __idx)
19897 {
19898   __ARM_CHECK_LANEQ (__b, __idx);
19899   __b[__ARM_LANEQ(__b,__idx)] = __a;
19900   return __b;
19901 }
19902 
19903 __extension__ extern __inline float32x4_t
19904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane_f32(float32_t __a,float32x4_t __b,const int __idx)19905 __arm_vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __idx)
19906 {
19907   __ARM_CHECK_LANEQ (__b, __idx);
19908   __b[__ARM_LANEQ(__b,__idx)] = __a;
19909   return __b;
19910 }
19911 
19912 __extension__ extern __inline float16_t
19913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_f16(float16x8_t __a,const int __idx)19914 __arm_vgetq_lane_f16 (float16x8_t __a, const int __idx)
19915 {
19916   __ARM_CHECK_LANEQ (__a, __idx);
19917   return __a[__ARM_LANEQ(__a,__idx)];
19918 }
19919 
19920 __extension__ extern __inline float32_t
19921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane_f32(float32x4_t __a,const int __idx)19922 __arm_vgetq_lane_f32 (float32x4_t __a, const int __idx)
19923 {
19924   __ARM_CHECK_LANEQ (__a, __idx);
19925   return __a[__ARM_LANEQ(__a,__idx)];
19926 }
19927 #endif
19928 
19929 #ifdef __cplusplus
19930 __extension__ extern __inline void
19931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q(int8_t * __addr,int8x16x4_t __value)19932 __arm_vst4q (int8_t * __addr, int8x16x4_t __value)
19933 {
19934  __arm_vst4q_s8 (__addr, __value);
19935 }
19936 
19937 __extension__ extern __inline void
19938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q(int16_t * __addr,int16x8x4_t __value)19939 __arm_vst4q (int16_t * __addr, int16x8x4_t __value)
19940 {
19941  __arm_vst4q_s16 (__addr, __value);
19942 }
19943 
19944 __extension__ extern __inline void
19945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q(int32_t * __addr,int32x4x4_t __value)19946 __arm_vst4q (int32_t * __addr, int32x4x4_t __value)
19947 {
19948  __arm_vst4q_s32 (__addr, __value);
19949 }
19950 
19951 __extension__ extern __inline void
19952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q(uint8_t * __addr,uint8x16x4_t __value)19953 __arm_vst4q (uint8_t * __addr, uint8x16x4_t __value)
19954 {
19955  __arm_vst4q_u8 (__addr, __value);
19956 }
19957 
19958 __extension__ extern __inline void
19959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q(uint16_t * __addr,uint16x8x4_t __value)19960 __arm_vst4q (uint16_t * __addr, uint16x8x4_t __value)
19961 {
19962  __arm_vst4q_u16 (__addr, __value);
19963 }
19964 
19965 __extension__ extern __inline void
19966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q(uint32_t * __addr,uint32x4x4_t __value)19967 __arm_vst4q (uint32_t * __addr, uint32x4x4_t __value)
19968 {
19969  __arm_vst4q_u32 (__addr, __value);
19970 }
19971 
19972 __extension__ extern __inline int8x16_t
19973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n(int8_t __a)19974 __arm_vdupq_n (int8_t __a)
19975 {
19976  return __arm_vdupq_n_s8 (__a);
19977 }
19978 
19979 __extension__ extern __inline int16x8_t
19980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n(int16_t __a)19981 __arm_vdupq_n (int16_t __a)
19982 {
19983  return __arm_vdupq_n_s16 (__a);
19984 }
19985 
19986 __extension__ extern __inline int32x4_t
19987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n(int32_t __a)19988 __arm_vdupq_n (int32_t __a)
19989 {
19990  return __arm_vdupq_n_s32 (__a);
19991 }
19992 
19993 __extension__ extern __inline int8x16_t
19994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq(int8x16_t __a)19995 __arm_vabsq (int8x16_t __a)
19996 {
19997  return __arm_vabsq_s8 (__a);
19998 }
19999 
20000 __extension__ extern __inline int16x8_t
20001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq(int16x8_t __a)20002 __arm_vabsq (int16x8_t __a)
20003 {
20004  return __arm_vabsq_s16 (__a);
20005 }
20006 
20007 __extension__ extern __inline int32x4_t
20008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq(int32x4_t __a)20009 __arm_vabsq (int32x4_t __a)
20010 {
20011  return __arm_vabsq_s32 (__a);
20012 }
20013 
20014 __extension__ extern __inline int8x16_t
20015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq(int8x16_t __a)20016 __arm_vclsq (int8x16_t __a)
20017 {
20018  return __arm_vclsq_s8 (__a);
20019 }
20020 
20021 __extension__ extern __inline int16x8_t
20022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq(int16x8_t __a)20023 __arm_vclsq (int16x8_t __a)
20024 {
20025  return __arm_vclsq_s16 (__a);
20026 }
20027 
20028 __extension__ extern __inline int32x4_t
20029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq(int32x4_t __a)20030 __arm_vclsq (int32x4_t __a)
20031 {
20032  return __arm_vclsq_s32 (__a);
20033 }
20034 
20035 __extension__ extern __inline int8x16_t
20036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq(int8x16_t __a)20037 __arm_vclzq (int8x16_t __a)
20038 {
20039  return __arm_vclzq_s8 (__a);
20040 }
20041 
20042 __extension__ extern __inline int16x8_t
20043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq(int16x8_t __a)20044 __arm_vclzq (int16x8_t __a)
20045 {
20046  return __arm_vclzq_s16 (__a);
20047 }
20048 
20049 __extension__ extern __inline int32x4_t
20050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq(int32x4_t __a)20051 __arm_vclzq (int32x4_t __a)
20052 {
20053  return __arm_vclzq_s32 (__a);
20054 }
20055 
20056 __extension__ extern __inline int8x16_t
20057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq(int8x16_t __a)20058 __arm_vnegq (int8x16_t __a)
20059 {
20060  return __arm_vnegq_s8 (__a);
20061 }
20062 
20063 __extension__ extern __inline int16x8_t
20064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq(int16x8_t __a)20065 __arm_vnegq (int16x8_t __a)
20066 {
20067  return __arm_vnegq_s16 (__a);
20068 }
20069 
20070 __extension__ extern __inline int32x4_t
20071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq(int32x4_t __a)20072 __arm_vnegq (int32x4_t __a)
20073 {
20074  return __arm_vnegq_s32 (__a);
20075 }
20076 
20077 __extension__ extern __inline int64_t
20078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvq(int32x4_t __a)20079 __arm_vaddlvq (int32x4_t __a)
20080 {
20081  return __arm_vaddlvq_s32 (__a);
20082 }
20083 
20084 __extension__ extern __inline int32_t
20085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq(int8x16_t __a)20086 __arm_vaddvq (int8x16_t __a)
20087 {
20088  return __arm_vaddvq_s8 (__a);
20089 }
20090 
20091 __extension__ extern __inline int32_t
20092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq(int16x8_t __a)20093 __arm_vaddvq (int16x8_t __a)
20094 {
20095  return __arm_vaddvq_s16 (__a);
20096 }
20097 
20098 __extension__ extern __inline int32_t
20099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq(int32x4_t __a)20100 __arm_vaddvq (int32x4_t __a)
20101 {
20102  return __arm_vaddvq_s32 (__a);
20103 }
20104 
20105 __extension__ extern __inline int16x8_t
20106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq(int8x16_t __a)20107 __arm_vmovlbq (int8x16_t __a)
20108 {
20109  return __arm_vmovlbq_s8 (__a);
20110 }
20111 
20112 __extension__ extern __inline int32x4_t
20113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq(int16x8_t __a)20114 __arm_vmovlbq (int16x8_t __a)
20115 {
20116  return __arm_vmovlbq_s16 (__a);
20117 }
20118 
20119 __extension__ extern __inline int16x8_t
20120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq(int8x16_t __a)20121 __arm_vmovltq (int8x16_t __a)
20122 {
20123  return __arm_vmovltq_s8 (__a);
20124 }
20125 
20126 __extension__ extern __inline int32x4_t
20127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq(int16x8_t __a)20128 __arm_vmovltq (int16x8_t __a)
20129 {
20130  return __arm_vmovltq_s16 (__a);
20131 }
20132 
20133 __extension__ extern __inline int8x16_t
20134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq(int8x16_t __a)20135 __arm_vmvnq (int8x16_t __a)
20136 {
20137  return __arm_vmvnq_s8 (__a);
20138 }
20139 
20140 __extension__ extern __inline int16x8_t
20141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq(int16x8_t __a)20142 __arm_vmvnq (int16x8_t __a)
20143 {
20144  return __arm_vmvnq_s16 (__a);
20145 }
20146 
20147 __extension__ extern __inline int32x4_t
20148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq(int32x4_t __a)20149 __arm_vmvnq (int32x4_t __a)
20150 {
20151  return __arm_vmvnq_s32 (__a);
20152 }
20153 
20154 __extension__ extern __inline int8x16_t
20155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q(int8x16_t __a)20156 __arm_vrev16q (int8x16_t __a)
20157 {
20158  return __arm_vrev16q_s8 (__a);
20159 }
20160 
20161 __extension__ extern __inline int8x16_t
20162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q(int8x16_t __a)20163 __arm_vrev32q (int8x16_t __a)
20164 {
20165  return __arm_vrev32q_s8 (__a);
20166 }
20167 
20168 __extension__ extern __inline int16x8_t
20169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q(int16x8_t __a)20170 __arm_vrev32q (int16x8_t __a)
20171 {
20172  return __arm_vrev32q_s16 (__a);
20173 }
20174 
20175 __extension__ extern __inline int8x16_t
20176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q(int8x16_t __a)20177 __arm_vrev64q (int8x16_t __a)
20178 {
20179  return __arm_vrev64q_s8 (__a);
20180 }
20181 
20182 __extension__ extern __inline int16x8_t
20183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q(int16x8_t __a)20184 __arm_vrev64q (int16x8_t __a)
20185 {
20186  return __arm_vrev64q_s16 (__a);
20187 }
20188 
20189 __extension__ extern __inline int32x4_t
20190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q(int32x4_t __a)20191 __arm_vrev64q (int32x4_t __a)
20192 {
20193  return __arm_vrev64q_s32 (__a);
20194 }
20195 
20196 __extension__ extern __inline int8x16_t
20197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq(int8x16_t __a)20198 __arm_vqabsq (int8x16_t __a)
20199 {
20200  return __arm_vqabsq_s8 (__a);
20201 }
20202 
20203 __extension__ extern __inline int16x8_t
20204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq(int16x8_t __a)20205 __arm_vqabsq (int16x8_t __a)
20206 {
20207  return __arm_vqabsq_s16 (__a);
20208 }
20209 
20210 __extension__ extern __inline int32x4_t
20211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq(int32x4_t __a)20212 __arm_vqabsq (int32x4_t __a)
20213 {
20214  return __arm_vqabsq_s32 (__a);
20215 }
20216 
20217 __extension__ extern __inline int8x16_t
20218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq(int8x16_t __a)20219 __arm_vqnegq (int8x16_t __a)
20220 {
20221  return __arm_vqnegq_s8 (__a);
20222 }
20223 
20224 __extension__ extern __inline int16x8_t
20225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq(int16x8_t __a)20226 __arm_vqnegq (int16x8_t __a)
20227 {
20228  return __arm_vqnegq_s16 (__a);
20229 }
20230 
20231 __extension__ extern __inline int32x4_t
20232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq(int32x4_t __a)20233 __arm_vqnegq (int32x4_t __a)
20234 {
20235  return __arm_vqnegq_s32 (__a);
20236 }
20237 
20238 __extension__ extern __inline uint8x16_t
20239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q(uint8x16_t __a)20240 __arm_vrev64q (uint8x16_t __a)
20241 {
20242  return __arm_vrev64q_u8 (__a);
20243 }
20244 
20245 __extension__ extern __inline uint16x8_t
20246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q(uint16x8_t __a)20247 __arm_vrev64q (uint16x8_t __a)
20248 {
20249  return __arm_vrev64q_u16 (__a);
20250 }
20251 
20252 __extension__ extern __inline uint32x4_t
20253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q(uint32x4_t __a)20254 __arm_vrev64q (uint32x4_t __a)
20255 {
20256  return __arm_vrev64q_u32 (__a);
20257 }
20258 
20259 __extension__ extern __inline uint8x16_t
20260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq(uint8x16_t __a)20261 __arm_vmvnq (uint8x16_t __a)
20262 {
20263  return __arm_vmvnq_u8 (__a);
20264 }
20265 
20266 __extension__ extern __inline uint16x8_t
20267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq(uint16x8_t __a)20268 __arm_vmvnq (uint16x8_t __a)
20269 {
20270  return __arm_vmvnq_u16 (__a);
20271 }
20272 
20273 __extension__ extern __inline uint32x4_t
20274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq(uint32x4_t __a)20275 __arm_vmvnq (uint32x4_t __a)
20276 {
20277  return __arm_vmvnq_u32 (__a);
20278 }
20279 
20280 __extension__ extern __inline uint8x16_t
20281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n(uint8_t __a)20282 __arm_vdupq_n (uint8_t __a)
20283 {
20284  return __arm_vdupq_n_u8 (__a);
20285 }
20286 
20287 __extension__ extern __inline uint16x8_t
20288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n(uint16_t __a)20289 __arm_vdupq_n (uint16_t __a)
20290 {
20291  return __arm_vdupq_n_u16 (__a);
20292 }
20293 
20294 __extension__ extern __inline uint32x4_t
20295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n(uint32_t __a)20296 __arm_vdupq_n (uint32_t __a)
20297 {
20298  return __arm_vdupq_n_u32 (__a);
20299 }
20300 
20301 __extension__ extern __inline uint8x16_t
20302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq(uint8x16_t __a)20303 __arm_vclzq (uint8x16_t __a)
20304 {
20305  return __arm_vclzq_u8 (__a);
20306 }
20307 
20308 __extension__ extern __inline uint16x8_t
20309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq(uint16x8_t __a)20310 __arm_vclzq (uint16x8_t __a)
20311 {
20312  return __arm_vclzq_u16 (__a);
20313 }
20314 
20315 __extension__ extern __inline uint32x4_t
20316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq(uint32x4_t __a)20317 __arm_vclzq (uint32x4_t __a)
20318 {
20319  return __arm_vclzq_u32 (__a);
20320 }
20321 
20322 __extension__ extern __inline uint32_t
20323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq(uint8x16_t __a)20324 __arm_vaddvq (uint8x16_t __a)
20325 {
20326  return __arm_vaddvq_u8 (__a);
20327 }
20328 
20329 __extension__ extern __inline uint32_t
20330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq(uint16x8_t __a)20331 __arm_vaddvq (uint16x8_t __a)
20332 {
20333  return __arm_vaddvq_u16 (__a);
20334 }
20335 
20336 __extension__ extern __inline uint32_t
20337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq(uint32x4_t __a)20338 __arm_vaddvq (uint32x4_t __a)
20339 {
20340  return __arm_vaddvq_u32 (__a);
20341 }
20342 
20343 __extension__ extern __inline uint8x16_t
20344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q(uint8x16_t __a)20345 __arm_vrev32q (uint8x16_t __a)
20346 {
20347  return __arm_vrev32q_u8 (__a);
20348 }
20349 
20350 __extension__ extern __inline uint16x8_t
20351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q(uint16x8_t __a)20352 __arm_vrev32q (uint16x8_t __a)
20353 {
20354  return __arm_vrev32q_u16 (__a);
20355 }
20356 
20357 __extension__ extern __inline uint16x8_t
20358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq(uint8x16_t __a)20359 __arm_vmovltq (uint8x16_t __a)
20360 {
20361  return __arm_vmovltq_u8 (__a);
20362 }
20363 
20364 __extension__ extern __inline uint32x4_t
20365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq(uint16x8_t __a)20366 __arm_vmovltq (uint16x8_t __a)
20367 {
20368  return __arm_vmovltq_u16 (__a);
20369 }
20370 
20371 __extension__ extern __inline uint16x8_t
20372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq(uint8x16_t __a)20373 __arm_vmovlbq (uint8x16_t __a)
20374 {
20375  return __arm_vmovlbq_u8 (__a);
20376 }
20377 
20378 __extension__ extern __inline uint32x4_t
20379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq(uint16x8_t __a)20380 __arm_vmovlbq (uint16x8_t __a)
20381 {
20382  return __arm_vmovlbq_u16 (__a);
20383 }
20384 
20385 __extension__ extern __inline uint8x16_t
20386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q(uint8x16_t __a)20387 __arm_vrev16q (uint8x16_t __a)
20388 {
20389  return __arm_vrev16q_u8 (__a);
20390 }
20391 
20392 __extension__ extern __inline uint64_t
20393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvq(uint32x4_t __a)20394 __arm_vaddlvq (uint32x4_t __a)
20395 {
20396  return __arm_vaddlvq_u32 (__a);
20397 }
20398 
20399 __extension__ extern __inline int8x16_t
20400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq(int8x16_t __a,const int __imm)20401 __arm_vshrq (int8x16_t __a, const int __imm)
20402 {
20403  return __arm_vshrq_n_s8 (__a, __imm);
20404 }
20405 
20406 __extension__ extern __inline int16x8_t
20407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq(int16x8_t __a,const int __imm)20408 __arm_vshrq (int16x8_t __a, const int __imm)
20409 {
20410  return __arm_vshrq_n_s16 (__a, __imm);
20411 }
20412 
20413 __extension__ extern __inline int32x4_t
20414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq(int32x4_t __a,const int __imm)20415 __arm_vshrq (int32x4_t __a, const int __imm)
20416 {
20417  return __arm_vshrq_n_s32 (__a, __imm);
20418 }
20419 
20420 __extension__ extern __inline uint8x16_t
20421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq(uint8x16_t __a,const int __imm)20422 __arm_vshrq (uint8x16_t __a, const int __imm)
20423 {
20424  return __arm_vshrq_n_u8 (__a, __imm);
20425 }
20426 
20427 __extension__ extern __inline uint16x8_t
20428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq(uint16x8_t __a,const int __imm)20429 __arm_vshrq (uint16x8_t __a, const int __imm)
20430 {
20431  return __arm_vshrq_n_u16 (__a, __imm);
20432 }
20433 
20434 __extension__ extern __inline uint32x4_t
20435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq(uint32x4_t __a,const int __imm)20436 __arm_vshrq (uint32x4_t __a, const int __imm)
20437 {
20438  return __arm_vshrq_n_u32 (__a, __imm);
20439 }
20440 
20441 __extension__ extern __inline int64_t
20442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvq_p(int32x4_t __a,mve_pred16_t __p)20443 __arm_vaddlvq_p (int32x4_t __a, mve_pred16_t __p)
20444 {
20445  return __arm_vaddlvq_p_s32 (__a, __p);
20446 }
20447 
20448 __extension__ extern __inline uint64_t
20449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvq_p(uint32x4_t __a,mve_pred16_t __p)20450 __arm_vaddlvq_p (uint32x4_t __a, mve_pred16_t __p)
20451 {
20452  return __arm_vaddlvq_p_u32 (__a, __p);
20453 }
20454 
20455 __extension__ extern __inline int32_t
20456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(int8x16_t __a,int8x16_t __b)20457 __arm_vcmpneq (int8x16_t __a, int8x16_t __b)
20458 {
20459  return __arm_vcmpneq_s8 (__a, __b);
20460 }
20461 
20462 __extension__ extern __inline mve_pred16_t
20463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(int16x8_t __a,int16x8_t __b)20464 __arm_vcmpneq (int16x8_t __a, int16x8_t __b)
20465 {
20466  return __arm_vcmpneq_s16 (__a, __b);
20467 }
20468 
20469 __extension__ extern __inline mve_pred16_t
20470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(int32x4_t __a,int32x4_t __b)20471 __arm_vcmpneq (int32x4_t __a, int32x4_t __b)
20472 {
20473  return __arm_vcmpneq_s32 (__a, __b);
20474 }
20475 
20476 __extension__ extern __inline mve_pred16_t
20477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(uint8x16_t __a,uint8x16_t __b)20478 __arm_vcmpneq (uint8x16_t __a, uint8x16_t __b)
20479 {
20480  return __arm_vcmpneq_u8 (__a, __b);
20481 }
20482 
20483 __extension__ extern __inline mve_pred16_t
20484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(uint16x8_t __a,uint16x8_t __b)20485 __arm_vcmpneq (uint16x8_t __a, uint16x8_t __b)
20486 {
20487  return __arm_vcmpneq_u16 (__a, __b);
20488 }
20489 
20490 __extension__ extern __inline mve_pred16_t
20491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(uint32x4_t __a,uint32x4_t __b)20492 __arm_vcmpneq (uint32x4_t __a, uint32x4_t __b)
20493 {
20494  return __arm_vcmpneq_u32 (__a, __b);
20495 }
20496 
20497 __extension__ extern __inline int8x16_t
20498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq(int8x16_t __a,int8x16_t __b)20499 __arm_vshlq (int8x16_t __a, int8x16_t __b)
20500 {
20501  return __arm_vshlq_s8 (__a, __b);
20502 }
20503 
20504 __extension__ extern __inline int16x8_t
20505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq(int16x8_t __a,int16x8_t __b)20506 __arm_vshlq (int16x8_t __a, int16x8_t __b)
20507 {
20508  return __arm_vshlq_s16 (__a, __b);
20509 }
20510 
20511 __extension__ extern __inline int32x4_t
20512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq(int32x4_t __a,int32x4_t __b)20513 __arm_vshlq (int32x4_t __a, int32x4_t __b)
20514 {
20515  return __arm_vshlq_s32 (__a, __b);
20516 }
20517 
20518 __extension__ extern __inline uint8x16_t
20519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq(uint8x16_t __a,int8x16_t __b)20520 __arm_vshlq (uint8x16_t __a, int8x16_t __b)
20521 {
20522  return __arm_vshlq_u8 (__a, __b);
20523 }
20524 
20525 __extension__ extern __inline uint16x8_t
20526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq(uint16x8_t __a,int16x8_t __b)20527 __arm_vshlq (uint16x8_t __a, int16x8_t __b)
20528 {
20529  return __arm_vshlq_u16 (__a, __b);
20530 }
20531 
20532 __extension__ extern __inline uint32x4_t
20533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq(uint32x4_t __a,int32x4_t __b)20534 __arm_vshlq (uint32x4_t __a, int32x4_t __b)
20535 {
20536  return __arm_vshlq_u32 (__a, __b);
20537 }
20538 
20539 __extension__ extern __inline uint8x16_t
20540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(uint8x16_t __a,uint8x16_t __b)20541 __arm_vsubq (uint8x16_t __a, uint8x16_t __b)
20542 {
20543  return __arm_vsubq_u8 (__a, __b);
20544 }
20545 
20546 __extension__ extern __inline uint8x16_t
20547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(uint8x16_t __a,uint8_t __b)20548 __arm_vsubq (uint8x16_t __a, uint8_t __b)
20549 {
20550  return __arm_vsubq_n_u8 (__a, __b);
20551 }
20552 
20553 __extension__ extern __inline uint8x16_t
20554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq(uint8x16_t __a,uint8x16_t __b)20555 __arm_vrmulhq (uint8x16_t __a, uint8x16_t __b)
20556 {
20557  return __arm_vrmulhq_u8 (__a, __b);
20558 }
20559 
20560 __extension__ extern __inline uint8x16_t
20561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq(uint8x16_t __a,uint8x16_t __b)20562 __arm_vrhaddq (uint8x16_t __a, uint8x16_t __b)
20563 {
20564  return __arm_vrhaddq_u8 (__a, __b);
20565 }
20566 
20567 __extension__ extern __inline uint8x16_t
20568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(uint8x16_t __a,uint8x16_t __b)20569 __arm_vqsubq (uint8x16_t __a, uint8x16_t __b)
20570 {
20571  return __arm_vqsubq_u8 (__a, __b);
20572 }
20573 
20574 __extension__ extern __inline uint8x16_t
20575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(uint8x16_t __a,uint8_t __b)20576 __arm_vqsubq (uint8x16_t __a, uint8_t __b)
20577 {
20578  return __arm_vqsubq_n_u8 (__a, __b);
20579 }
20580 
20581 __extension__ extern __inline uint8x16_t
20582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(uint8x16_t __a,uint8x16_t __b)20583 __arm_vqaddq (uint8x16_t __a, uint8x16_t __b)
20584 {
20585  return __arm_vqaddq_u8 (__a, __b);
20586 }
20587 
20588 __extension__ extern __inline uint8x16_t
20589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(uint8x16_t __a,uint8_t __b)20590 __arm_vqaddq (uint8x16_t __a, uint8_t __b)
20591 {
20592  return __arm_vqaddq_n_u8 (__a, __b);
20593 }
20594 
20595 __extension__ extern __inline uint8x16_t
20596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(uint8x16_t __a,uint8x16_t __b)20597 __arm_vorrq (uint8x16_t __a, uint8x16_t __b)
20598 {
20599  return __arm_vorrq_u8 (__a, __b);
20600 }
20601 
20602 __extension__ extern __inline uint8x16_t
20603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq(uint8x16_t __a,uint8x16_t __b)20604 __arm_vornq (uint8x16_t __a, uint8x16_t __b)
20605 {
20606  return __arm_vornq_u8 (__a, __b);
20607 }
20608 
20609 __extension__ extern __inline uint8x16_t
20610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(uint8x16_t __a,uint8x16_t __b)20611 __arm_vmulq (uint8x16_t __a, uint8x16_t __b)
20612 {
20613  return __arm_vmulq_u8 (__a, __b);
20614 }
20615 
20616 __extension__ extern __inline uint8x16_t
20617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(uint8x16_t __a,uint8_t __b)20618 __arm_vmulq (uint8x16_t __a, uint8_t __b)
20619 {
20620  return __arm_vmulq_n_u8 (__a, __b);
20621 }
20622 
20623 __extension__ extern __inline uint16x8_t
20624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int(uint8x16_t __a,uint8x16_t __b)20625 __arm_vmulltq_int (uint8x16_t __a, uint8x16_t __b)
20626 {
20627  return __arm_vmulltq_int_u8 (__a, __b);
20628 }
20629 
20630 __extension__ extern __inline uint16x8_t
20631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int(uint8x16_t __a,uint8x16_t __b)20632 __arm_vmullbq_int (uint8x16_t __a, uint8x16_t __b)
20633 {
20634  return __arm_vmullbq_int_u8 (__a, __b);
20635 }
20636 
20637 __extension__ extern __inline uint8x16_t
20638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq(uint8x16_t __a,uint8x16_t __b)20639 __arm_vmulhq (uint8x16_t __a, uint8x16_t __b)
20640 {
20641  return __arm_vmulhq_u8 (__a, __b);
20642 }
20643 
20644 __extension__ extern __inline uint32_t
20645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq(uint8x16_t __a,uint8x16_t __b)20646 __arm_vmladavq (uint8x16_t __a, uint8x16_t __b)
20647 {
20648  return __arm_vmladavq_u8 (__a, __b);
20649 }
20650 
20651 __extension__ extern __inline uint8_t
20652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq(uint8_t __a,uint8x16_t __b)20653 __arm_vminvq (uint8_t __a, uint8x16_t __b)
20654 {
20655  return __arm_vminvq_u8 (__a, __b);
20656 }
20657 
20658 __extension__ extern __inline uint8x16_t
20659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq(uint8x16_t __a,uint8x16_t __b)20660 __arm_vminq (uint8x16_t __a, uint8x16_t __b)
20661 {
20662  return __arm_vminq_u8 (__a, __b);
20663 }
20664 
20665 __extension__ extern __inline uint8_t
20666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq(uint8_t __a,uint8x16_t __b)20667 __arm_vmaxvq (uint8_t __a, uint8x16_t __b)
20668 {
20669  return __arm_vmaxvq_u8 (__a, __b);
20670 }
20671 
20672 __extension__ extern __inline uint8x16_t
20673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq(uint8x16_t __a,uint8x16_t __b)20674 __arm_vmaxq (uint8x16_t __a, uint8x16_t __b)
20675 {
20676  return __arm_vmaxq_u8 (__a, __b);
20677 }
20678 
20679 __extension__ extern __inline uint8x16_t
20680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(uint8x16_t __a,uint8x16_t __b)20681 __arm_vhsubq (uint8x16_t __a, uint8x16_t __b)
20682 {
20683  return __arm_vhsubq_u8 (__a, __b);
20684 }
20685 
20686 __extension__ extern __inline uint8x16_t
20687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(uint8x16_t __a,uint8_t __b)20688 __arm_vhsubq (uint8x16_t __a, uint8_t __b)
20689 {
20690  return __arm_vhsubq_n_u8 (__a, __b);
20691 }
20692 
20693 __extension__ extern __inline uint8x16_t
20694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(uint8x16_t __a,uint8x16_t __b)20695 __arm_vhaddq (uint8x16_t __a, uint8x16_t __b)
20696 {
20697  return __arm_vhaddq_u8 (__a, __b);
20698 }
20699 
20700 __extension__ extern __inline uint8x16_t
20701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(uint8x16_t __a,uint8_t __b)20702 __arm_vhaddq (uint8x16_t __a, uint8_t __b)
20703 {
20704  return __arm_vhaddq_n_u8 (__a, __b);
20705 }
20706 
20707 __extension__ extern __inline uint8x16_t
20708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq(uint8x16_t __a,uint8x16_t __b)20709 __arm_veorq (uint8x16_t __a, uint8x16_t __b)
20710 {
20711  return __arm_veorq_u8 (__a, __b);
20712 }
20713 
20714 __extension__ extern __inline mve_pred16_t
20715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(uint8x16_t __a,uint8_t __b)20716 __arm_vcmpneq (uint8x16_t __a, uint8_t __b)
20717 {
20718  return __arm_vcmpneq_n_u8 (__a, __b);
20719 }
20720 
20721 __extension__ extern __inline mve_pred16_t
20722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq(uint8x16_t __a,uint8x16_t __b)20723 __arm_vcmphiq (uint8x16_t __a, uint8x16_t __b)
20724 {
20725  return __arm_vcmphiq_u8 (__a, __b);
20726 }
20727 
20728 __extension__ extern __inline mve_pred16_t
20729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq(uint8x16_t __a,uint8_t __b)20730 __arm_vcmphiq (uint8x16_t __a, uint8_t __b)
20731 {
20732  return __arm_vcmphiq_n_u8 (__a, __b);
20733 }
20734 
20735 __extension__ extern __inline mve_pred16_t
20736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(uint8x16_t __a,uint8x16_t __b)20737 __arm_vcmpeqq (uint8x16_t __a, uint8x16_t __b)
20738 {
20739  return __arm_vcmpeqq_u8 (__a, __b);
20740 }
20741 
20742 __extension__ extern __inline mve_pred16_t
20743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(uint8x16_t __a,uint8_t __b)20744 __arm_vcmpeqq (uint8x16_t __a, uint8_t __b)
20745 {
20746  return __arm_vcmpeqq_n_u8 (__a, __b);
20747 }
20748 
20749 __extension__ extern __inline mve_pred16_t
20750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq(uint8x16_t __a,uint8x16_t __b)20751 __arm_vcmpcsq (uint8x16_t __a, uint8x16_t __b)
20752 {
20753  return __arm_vcmpcsq_u8 (__a, __b);
20754 }
20755 
20756 __extension__ extern __inline mve_pred16_t
20757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq(uint8x16_t __a,uint8_t __b)20758 __arm_vcmpcsq (uint8x16_t __a, uint8_t __b)
20759 {
20760  return __arm_vcmpcsq_n_u8 (__a, __b);
20761 }
20762 
20763 __extension__ extern __inline uint8x16_t
20764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90(uint8x16_t __a,uint8x16_t __b)20765 __arm_vcaddq_rot90 (uint8x16_t __a, uint8x16_t __b)
20766 {
20767  return __arm_vcaddq_rot90_u8 (__a, __b);
20768 }
20769 
20770 __extension__ extern __inline uint8x16_t
20771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270(uint8x16_t __a,uint8x16_t __b)20772 __arm_vcaddq_rot270 (uint8x16_t __a, uint8x16_t __b)
20773 {
20774  return __arm_vcaddq_rot270_u8 (__a, __b);
20775 }
20776 
20777 __extension__ extern __inline uint8x16_t
20778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(uint8x16_t __a,uint8x16_t __b)20779 __arm_vbicq (uint8x16_t __a, uint8x16_t __b)
20780 {
20781  return __arm_vbicq_u8 (__a, __b);
20782 }
20783 
20784 __extension__ extern __inline uint8x16_t
20785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq(uint8x16_t __a,uint8x16_t __b)20786 __arm_vandq (uint8x16_t __a, uint8x16_t __b)
20787 {
20788  return __arm_vandq_u8 (__a, __b);
20789 }
20790 
20791 __extension__ extern __inline uint32_t
20792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p(uint8x16_t __a,mve_pred16_t __p)20793 __arm_vaddvq_p (uint8x16_t __a, mve_pred16_t __p)
20794 {
20795  return __arm_vaddvq_p_u8 (__a, __p);
20796 }
20797 
20798 __extension__ extern __inline uint32_t
20799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq(uint32_t __a,uint8x16_t __b)20800 __arm_vaddvaq (uint32_t __a, uint8x16_t __b)
20801 {
20802  return __arm_vaddvaq_u8 (__a, __b);
20803 }
20804 
20805 __extension__ extern __inline uint8x16_t
20806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(uint8x16_t __a,uint8_t __b)20807 __arm_vaddq (uint8x16_t __a, uint8_t __b)
20808 {
20809  return __arm_vaddq_n_u8 (__a, __b);
20810 }
20811 
20812 __extension__ extern __inline uint8x16_t
20813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq(uint8x16_t __a,uint8x16_t __b)20814 __arm_vabdq (uint8x16_t __a, uint8x16_t __b)
20815 {
20816  return __arm_vabdq_u8 (__a, __b);
20817 }
20818 
20819 __extension__ extern __inline uint8x16_t
20820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r(uint8x16_t __a,int32_t __b)20821 __arm_vshlq_r (uint8x16_t __a, int32_t __b)
20822 {
20823  return __arm_vshlq_r_u8 (__a, __b);
20824 }
20825 
20826 __extension__ extern __inline uint8x16_t
20827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(uint8x16_t __a,int8x16_t __b)20828 __arm_vrshlq (uint8x16_t __a, int8x16_t __b)
20829 {
20830  return __arm_vrshlq_u8 (__a, __b);
20831 }
20832 
20833 __extension__ extern __inline uint8x16_t
20834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(uint8x16_t __a,int32_t __b)20835 __arm_vrshlq (uint8x16_t __a, int32_t __b)
20836 {
20837  return __arm_vrshlq_n_u8 (__a, __b);
20838 }
20839 
20840 __extension__ extern __inline uint8x16_t
20841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq(uint8x16_t __a,int8x16_t __b)20842 __arm_vqshlq (uint8x16_t __a, int8x16_t __b)
20843 {
20844  return __arm_vqshlq_u8 (__a, __b);
20845 }
20846 
20847 __extension__ extern __inline uint8x16_t
20848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r(uint8x16_t __a,int32_t __b)20849 __arm_vqshlq_r (uint8x16_t __a, int32_t __b)
20850 {
20851  return __arm_vqshlq_r_u8 (__a, __b);
20852 }
20853 
20854 __extension__ extern __inline uint8x16_t
20855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(uint8x16_t __a,int8x16_t __b)20856 __arm_vqrshlq (uint8x16_t __a, int8x16_t __b)
20857 {
20858  return __arm_vqrshlq_u8 (__a, __b);
20859 }
20860 
20861 __extension__ extern __inline uint8x16_t
20862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(uint8x16_t __a,int32_t __b)20863 __arm_vqrshlq (uint8x16_t __a, int32_t __b)
20864 {
20865  return __arm_vqrshlq_n_u8 (__a, __b);
20866 }
20867 
20868 __extension__ extern __inline uint8_t
20869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq(uint8_t __a,int8x16_t __b)20870 __arm_vminavq (uint8_t __a, int8x16_t __b)
20871 {
20872  return __arm_vminavq_s8 (__a, __b);
20873 }
20874 
20875 __extension__ extern __inline uint8x16_t
20876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq(uint8x16_t __a,int8x16_t __b)20877 __arm_vminaq (uint8x16_t __a, int8x16_t __b)
20878 {
20879  return __arm_vminaq_s8 (__a, __b);
20880 }
20881 
20882 __extension__ extern __inline uint8_t
20883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq(uint8_t __a,int8x16_t __b)20884 __arm_vmaxavq (uint8_t __a, int8x16_t __b)
20885 {
20886  return __arm_vmaxavq_s8 (__a, __b);
20887 }
20888 
20889 __extension__ extern __inline uint8x16_t
20890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq(uint8x16_t __a,int8x16_t __b)20891 __arm_vmaxaq (uint8x16_t __a, int8x16_t __b)
20892 {
20893  return __arm_vmaxaq_s8 (__a, __b);
20894 }
20895 
20896 __extension__ extern __inline uint8x16_t
20897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq(uint8x16_t __a,int32_t __b)20898 __arm_vbrsrq (uint8x16_t __a, int32_t __b)
20899 {
20900  return __arm_vbrsrq_n_u8 (__a, __b);
20901 }
20902 
20903 __extension__ extern __inline uint8x16_t
20904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n(uint8x16_t __a,const int __imm)20905 __arm_vshlq_n (uint8x16_t __a, const int __imm)
20906 {
20907  return __arm_vshlq_n_u8 (__a, __imm);
20908 }
20909 
20910 __extension__ extern __inline uint8x16_t
20911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq(uint8x16_t __a,const int __imm)20912 __arm_vrshrq (uint8x16_t __a, const int __imm)
20913 {
20914  return __arm_vrshrq_n_u8 (__a, __imm);
20915 }
20916 
20917 __extension__ extern __inline uint8x16_t
20918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n(uint8x16_t __a,const int __imm)20919 __arm_vqshlq_n (uint8x16_t __a, const int __imm)
20920 {
20921  return __arm_vqshlq_n_u8 (__a, __imm);
20922 }
20923 
20924 __extension__ extern __inline mve_pred16_t
20925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(int8x16_t __a,int8_t __b)20926 __arm_vcmpneq (int8x16_t __a, int8_t __b)
20927 {
20928  return __arm_vcmpneq_n_s8 (__a, __b);
20929 }
20930 
20931 __extension__ extern __inline mve_pred16_t
20932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(int8x16_t __a,int8x16_t __b)20933 __arm_vcmpltq (int8x16_t __a, int8x16_t __b)
20934 {
20935  return __arm_vcmpltq_s8 (__a, __b);
20936 }
20937 
20938 __extension__ extern __inline mve_pred16_t
20939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(int8x16_t __a,int8_t __b)20940 __arm_vcmpltq (int8x16_t __a, int8_t __b)
20941 {
20942  return __arm_vcmpltq_n_s8 (__a, __b);
20943 }
20944 
20945 __extension__ extern __inline mve_pred16_t
20946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(int8x16_t __a,int8x16_t __b)20947 __arm_vcmpleq (int8x16_t __a, int8x16_t __b)
20948 {
20949  return __arm_vcmpleq_s8 (__a, __b);
20950 }
20951 
20952 __extension__ extern __inline mve_pred16_t
20953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(int8x16_t __a,int8_t __b)20954 __arm_vcmpleq (int8x16_t __a, int8_t __b)
20955 {
20956  return __arm_vcmpleq_n_s8 (__a, __b);
20957 }
20958 
20959 __extension__ extern __inline mve_pred16_t
20960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(int8x16_t __a,int8x16_t __b)20961 __arm_vcmpgtq (int8x16_t __a, int8x16_t __b)
20962 {
20963  return __arm_vcmpgtq_s8 (__a, __b);
20964 }
20965 
20966 __extension__ extern __inline mve_pred16_t
20967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(int8x16_t __a,int8_t __b)20968 __arm_vcmpgtq (int8x16_t __a, int8_t __b)
20969 {
20970  return __arm_vcmpgtq_n_s8 (__a, __b);
20971 }
20972 
20973 __extension__ extern __inline mve_pred16_t
20974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(int8x16_t __a,int8x16_t __b)20975 __arm_vcmpgeq (int8x16_t __a, int8x16_t __b)
20976 {
20977  return __arm_vcmpgeq_s8 (__a, __b);
20978 }
20979 
20980 __extension__ extern __inline mve_pred16_t
20981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(int8x16_t __a,int8_t __b)20982 __arm_vcmpgeq (int8x16_t __a, int8_t __b)
20983 {
20984  return __arm_vcmpgeq_n_s8 (__a, __b);
20985 }
20986 
20987 __extension__ extern __inline mve_pred16_t
20988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(int8x16_t __a,int8x16_t __b)20989 __arm_vcmpeqq (int8x16_t __a, int8x16_t __b)
20990 {
20991  return __arm_vcmpeqq_s8 (__a, __b);
20992 }
20993 
20994 __extension__ extern __inline mve_pred16_t
20995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(int8x16_t __a,int8_t __b)20996 __arm_vcmpeqq (int8x16_t __a, int8_t __b)
20997 {
20998  return __arm_vcmpeqq_n_s8 (__a, __b);
20999 }
21000 
21001 __extension__ extern __inline uint8x16_t
21002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq(int8x16_t __a,const int __imm)21003 __arm_vqshluq (int8x16_t __a, const int __imm)
21004 {
21005  return __arm_vqshluq_n_s8 (__a, __imm);
21006 }
21007 
21008 __extension__ extern __inline int32_t
21009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p(int8x16_t __a,mve_pred16_t __p)21010 __arm_vaddvq_p (int8x16_t __a, mve_pred16_t __p)
21011 {
21012  return __arm_vaddvq_p_s8 (__a, __p);
21013 }
21014 
21015 __extension__ extern __inline int8x16_t
21016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(int8x16_t __a,int8x16_t __b)21017 __arm_vsubq (int8x16_t __a, int8x16_t __b)
21018 {
21019  return __arm_vsubq_s8 (__a, __b);
21020 }
21021 
21022 __extension__ extern __inline int8x16_t
21023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(int8x16_t __a,int8_t __b)21024 __arm_vsubq (int8x16_t __a, int8_t __b)
21025 {
21026  return __arm_vsubq_n_s8 (__a, __b);
21027 }
21028 
21029 __extension__ extern __inline int8x16_t
21030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r(int8x16_t __a,int32_t __b)21031 __arm_vshlq_r (int8x16_t __a, int32_t __b)
21032 {
21033  return __arm_vshlq_r_s8 (__a, __b);
21034 }
21035 
21036 __extension__ extern __inline int8x16_t
21037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(int8x16_t __a,int8x16_t __b)21038 __arm_vrshlq (int8x16_t __a, int8x16_t __b)
21039 {
21040  return __arm_vrshlq_s8 (__a, __b);
21041 }
21042 
21043 __extension__ extern __inline int8x16_t
21044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(int8x16_t __a,int32_t __b)21045 __arm_vrshlq (int8x16_t __a, int32_t __b)
21046 {
21047  return __arm_vrshlq_n_s8 (__a, __b);
21048 }
21049 
21050 __extension__ extern __inline int8x16_t
21051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq(int8x16_t __a,int8x16_t __b)21052 __arm_vrmulhq (int8x16_t __a, int8x16_t __b)
21053 {
21054  return __arm_vrmulhq_s8 (__a, __b);
21055 }
21056 
21057 __extension__ extern __inline int8x16_t
21058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq(int8x16_t __a,int8x16_t __b)21059 __arm_vrhaddq (int8x16_t __a, int8x16_t __b)
21060 {
21061  return __arm_vrhaddq_s8 (__a, __b);
21062 }
21063 
21064 __extension__ extern __inline int8x16_t
21065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(int8x16_t __a,int8x16_t __b)21066 __arm_vqsubq (int8x16_t __a, int8x16_t __b)
21067 {
21068  return __arm_vqsubq_s8 (__a, __b);
21069 }
21070 
21071 __extension__ extern __inline int8x16_t
21072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(int8x16_t __a,int8_t __b)21073 __arm_vqsubq (int8x16_t __a, int8_t __b)
21074 {
21075  return __arm_vqsubq_n_s8 (__a, __b);
21076 }
21077 
21078 __extension__ extern __inline int8x16_t
21079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq(int8x16_t __a,int8x16_t __b)21080 __arm_vqshlq (int8x16_t __a, int8x16_t __b)
21081 {
21082  return __arm_vqshlq_s8 (__a, __b);
21083 }
21084 
21085 __extension__ extern __inline int8x16_t
21086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r(int8x16_t __a,int32_t __b)21087 __arm_vqshlq_r (int8x16_t __a, int32_t __b)
21088 {
21089  return __arm_vqshlq_r_s8 (__a, __b);
21090 }
21091 
21092 __extension__ extern __inline int8x16_t
21093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(int8x16_t __a,int8x16_t __b)21094 __arm_vqrshlq (int8x16_t __a, int8x16_t __b)
21095 {
21096  return __arm_vqrshlq_s8 (__a, __b);
21097 }
21098 
21099 __extension__ extern __inline int8x16_t
21100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(int8x16_t __a,int32_t __b)21101 __arm_vqrshlq (int8x16_t __a, int32_t __b)
21102 {
21103  return __arm_vqrshlq_n_s8 (__a, __b);
21104 }
21105 
21106 __extension__ extern __inline int8x16_t
21107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq(int8x16_t __a,int8x16_t __b)21108 __arm_vqrdmulhq (int8x16_t __a, int8x16_t __b)
21109 {
21110  return __arm_vqrdmulhq_s8 (__a, __b);
21111 }
21112 
21113 __extension__ extern __inline int8x16_t
21114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq(int8x16_t __a,int8_t __b)21115 __arm_vqrdmulhq (int8x16_t __a, int8_t __b)
21116 {
21117  return __arm_vqrdmulhq_n_s8 (__a, __b);
21118 }
21119 
21120 __extension__ extern __inline int8x16_t
21121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq(int8x16_t __a,int8x16_t __b)21122 __arm_vqdmulhq (int8x16_t __a, int8x16_t __b)
21123 {
21124  return __arm_vqdmulhq_s8 (__a, __b);
21125 }
21126 
21127 __extension__ extern __inline int8x16_t
21128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq(int8x16_t __a,int8_t __b)21129 __arm_vqdmulhq (int8x16_t __a, int8_t __b)
21130 {
21131  return __arm_vqdmulhq_n_s8 (__a, __b);
21132 }
21133 
21134 __extension__ extern __inline int8x16_t
21135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(int8x16_t __a,int8x16_t __b)21136 __arm_vqaddq (int8x16_t __a, int8x16_t __b)
21137 {
21138  return __arm_vqaddq_s8 (__a, __b);
21139 }
21140 
21141 __extension__ extern __inline int8x16_t
21142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(int8x16_t __a,int8_t __b)21143 __arm_vqaddq (int8x16_t __a, int8_t __b)
21144 {
21145  return __arm_vqaddq_n_s8 (__a, __b);
21146 }
21147 
21148 __extension__ extern __inline int8x16_t
21149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(int8x16_t __a,int8x16_t __b)21150 __arm_vorrq (int8x16_t __a, int8x16_t __b)
21151 {
21152  return __arm_vorrq_s8 (__a, __b);
21153 }
21154 
21155 __extension__ extern __inline int8x16_t
21156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq(int8x16_t __a,int8x16_t __b)21157 __arm_vornq (int8x16_t __a, int8x16_t __b)
21158 {
21159  return __arm_vornq_s8 (__a, __b);
21160 }
21161 
21162 __extension__ extern __inline int8x16_t
21163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(int8x16_t __a,int8x16_t __b)21164 __arm_vmulq (int8x16_t __a, int8x16_t __b)
21165 {
21166  return __arm_vmulq_s8 (__a, __b);
21167 }
21168 
21169 __extension__ extern __inline int8x16_t
21170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(int8x16_t __a,int8_t __b)21171 __arm_vmulq (int8x16_t __a, int8_t __b)
21172 {
21173  return __arm_vmulq_n_s8 (__a, __b);
21174 }
21175 
21176 __extension__ extern __inline int16x8_t
21177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int(int8x16_t __a,int8x16_t __b)21178 __arm_vmulltq_int (int8x16_t __a, int8x16_t __b)
21179 {
21180  return __arm_vmulltq_int_s8 (__a, __b);
21181 }
21182 
21183 __extension__ extern __inline int16x8_t
21184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int(int8x16_t __a,int8x16_t __b)21185 __arm_vmullbq_int (int8x16_t __a, int8x16_t __b)
21186 {
21187  return __arm_vmullbq_int_s8 (__a, __b);
21188 }
21189 
21190 __extension__ extern __inline int8x16_t
21191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq(int8x16_t __a,int8x16_t __b)21192 __arm_vmulhq (int8x16_t __a, int8x16_t __b)
21193 {
21194  return __arm_vmulhq_s8 (__a, __b);
21195 }
21196 
21197 __extension__ extern __inline int32_t
21198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq(int8x16_t __a,int8x16_t __b)21199 __arm_vmlsdavxq (int8x16_t __a, int8x16_t __b)
21200 {
21201  return __arm_vmlsdavxq_s8 (__a, __b);
21202 }
21203 
21204 __extension__ extern __inline int32_t
21205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq(int8x16_t __a,int8x16_t __b)21206 __arm_vmlsdavq (int8x16_t __a, int8x16_t __b)
21207 {
21208  return __arm_vmlsdavq_s8 (__a, __b);
21209 }
21210 
21211 __extension__ extern __inline int32_t
21212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq(int8x16_t __a,int8x16_t __b)21213 __arm_vmladavxq (int8x16_t __a, int8x16_t __b)
21214 {
21215  return __arm_vmladavxq_s8 (__a, __b);
21216 }
21217 
21218 __extension__ extern __inline int32_t
21219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq(int8x16_t __a,int8x16_t __b)21220 __arm_vmladavq (int8x16_t __a, int8x16_t __b)
21221 {
21222  return __arm_vmladavq_s8 (__a, __b);
21223 }
21224 
21225 __extension__ extern __inline int8_t
21226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq(int8_t __a,int8x16_t __b)21227 __arm_vminvq (int8_t __a, int8x16_t __b)
21228 {
21229  return __arm_vminvq_s8 (__a, __b);
21230 }
21231 
21232 __extension__ extern __inline int8x16_t
21233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq(int8x16_t __a,int8x16_t __b)21234 __arm_vminq (int8x16_t __a, int8x16_t __b)
21235 {
21236  return __arm_vminq_s8 (__a, __b);
21237 }
21238 
21239 __extension__ extern __inline int8_t
21240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq(int8_t __a,int8x16_t __b)21241 __arm_vmaxvq (int8_t __a, int8x16_t __b)
21242 {
21243  return __arm_vmaxvq_s8 (__a, __b);
21244 }
21245 
21246 __extension__ extern __inline int8x16_t
21247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq(int8x16_t __a,int8x16_t __b)21248 __arm_vmaxq (int8x16_t __a, int8x16_t __b)
21249 {
21250  return __arm_vmaxq_s8 (__a, __b);
21251 }
21252 
21253 __extension__ extern __inline int8x16_t
21254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(int8x16_t __a,int8x16_t __b)21255 __arm_vhsubq (int8x16_t __a, int8x16_t __b)
21256 {
21257  return __arm_vhsubq_s8 (__a, __b);
21258 }
21259 
21260 __extension__ extern __inline int8x16_t
21261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(int8x16_t __a,int8_t __b)21262 __arm_vhsubq (int8x16_t __a, int8_t __b)
21263 {
21264  return __arm_vhsubq_n_s8 (__a, __b);
21265 }
21266 
21267 __extension__ extern __inline int8x16_t
21268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90(int8x16_t __a,int8x16_t __b)21269 __arm_vhcaddq_rot90 (int8x16_t __a, int8x16_t __b)
21270 {
21271  return __arm_vhcaddq_rot90_s8 (__a, __b);
21272 }
21273 
21274 __extension__ extern __inline int8x16_t
21275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270(int8x16_t __a,int8x16_t __b)21276 __arm_vhcaddq_rot270 (int8x16_t __a, int8x16_t __b)
21277 {
21278  return __arm_vhcaddq_rot270_s8 (__a, __b);
21279 }
21280 
21281 __extension__ extern __inline int8x16_t
21282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(int8x16_t __a,int8x16_t __b)21283 __arm_vhaddq (int8x16_t __a, int8x16_t __b)
21284 {
21285  return __arm_vhaddq_s8 (__a, __b);
21286 }
21287 
21288 __extension__ extern __inline int8x16_t
21289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(int8x16_t __a,int8_t __b)21290 __arm_vhaddq (int8x16_t __a, int8_t __b)
21291 {
21292  return __arm_vhaddq_n_s8 (__a, __b);
21293 }
21294 
21295 __extension__ extern __inline int8x16_t
21296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq(int8x16_t __a,int8x16_t __b)21297 __arm_veorq (int8x16_t __a, int8x16_t __b)
21298 {
21299  return __arm_veorq_s8 (__a, __b);
21300 }
21301 
21302 __extension__ extern __inline int8x16_t
21303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90(int8x16_t __a,int8x16_t __b)21304 __arm_vcaddq_rot90 (int8x16_t __a, int8x16_t __b)
21305 {
21306  return __arm_vcaddq_rot90_s8 (__a, __b);
21307 }
21308 
21309 __extension__ extern __inline int8x16_t
21310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270(int8x16_t __a,int8x16_t __b)21311 __arm_vcaddq_rot270 (int8x16_t __a, int8x16_t __b)
21312 {
21313  return __arm_vcaddq_rot270_s8 (__a, __b);
21314 }
21315 
21316 __extension__ extern __inline int8x16_t
21317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq(int8x16_t __a,int32_t __b)21318 __arm_vbrsrq (int8x16_t __a, int32_t __b)
21319 {
21320  return __arm_vbrsrq_n_s8 (__a, __b);
21321 }
21322 
21323 __extension__ extern __inline int8x16_t
21324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(int8x16_t __a,int8x16_t __b)21325 __arm_vbicq (int8x16_t __a, int8x16_t __b)
21326 {
21327  return __arm_vbicq_s8 (__a, __b);
21328 }
21329 
21330 __extension__ extern __inline int8x16_t
21331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq(int8x16_t __a,int8x16_t __b)21332 __arm_vandq (int8x16_t __a, int8x16_t __b)
21333 {
21334  return __arm_vandq_s8 (__a, __b);
21335 }
21336 
21337 __extension__ extern __inline int32_t
21338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq(int32_t __a,int8x16_t __b)21339 __arm_vaddvaq (int32_t __a, int8x16_t __b)
21340 {
21341  return __arm_vaddvaq_s8 (__a, __b);
21342 }
21343 
21344 __extension__ extern __inline int8x16_t
21345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(int8x16_t __a,int8_t __b)21346 __arm_vaddq (int8x16_t __a, int8_t __b)
21347 {
21348  return __arm_vaddq_n_s8 (__a, __b);
21349 }
21350 
21351 __extension__ extern __inline int8x16_t
21352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq(int8x16_t __a,int8x16_t __b)21353 __arm_vabdq (int8x16_t __a, int8x16_t __b)
21354 {
21355  return __arm_vabdq_s8 (__a, __b);
21356 }
21357 
21358 __extension__ extern __inline int8x16_t
21359 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n(int8x16_t __a,const int __imm)21360 __arm_vshlq_n (int8x16_t __a, const int __imm)
21361 {
21362  return __arm_vshlq_n_s8 (__a, __imm);
21363 }
21364 
21365 __extension__ extern __inline int8x16_t
21366 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq(int8x16_t __a,const int __imm)21367 __arm_vrshrq (int8x16_t __a, const int __imm)
21368 {
21369  return __arm_vrshrq_n_s8 (__a, __imm);
21370 }
21371 
21372 __extension__ extern __inline int8x16_t
21373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n(int8x16_t __a,const int __imm)21374 __arm_vqshlq_n (int8x16_t __a, const int __imm)
21375 {
21376  return __arm_vqshlq_n_s8 (__a, __imm);
21377 }
21378 
21379 __extension__ extern __inline uint16x8_t
21380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(uint16x8_t __a,uint16x8_t __b)21381 __arm_vsubq (uint16x8_t __a, uint16x8_t __b)
21382 {
21383  return __arm_vsubq_u16 (__a, __b);
21384 }
21385 
21386 __extension__ extern __inline uint16x8_t
21387 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(uint16x8_t __a,uint16_t __b)21388 __arm_vsubq (uint16x8_t __a, uint16_t __b)
21389 {
21390  return __arm_vsubq_n_u16 (__a, __b);
21391 }
21392 
21393 __extension__ extern __inline uint16x8_t
21394 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq(uint16x8_t __a,uint16x8_t __b)21395 __arm_vrmulhq (uint16x8_t __a, uint16x8_t __b)
21396 {
21397  return __arm_vrmulhq_u16 (__a, __b);
21398 }
21399 
21400 __extension__ extern __inline uint16x8_t
21401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq(uint16x8_t __a,uint16x8_t __b)21402 __arm_vrhaddq (uint16x8_t __a, uint16x8_t __b)
21403 {
21404  return __arm_vrhaddq_u16 (__a, __b);
21405 }
21406 
21407 __extension__ extern __inline uint16x8_t
21408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(uint16x8_t __a,uint16x8_t __b)21409 __arm_vqsubq (uint16x8_t __a, uint16x8_t __b)
21410 {
21411  return __arm_vqsubq_u16 (__a, __b);
21412 }
21413 
21414 __extension__ extern __inline uint16x8_t
21415 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(uint16x8_t __a,uint16_t __b)21416 __arm_vqsubq (uint16x8_t __a, uint16_t __b)
21417 {
21418  return __arm_vqsubq_n_u16 (__a, __b);
21419 }
21420 
21421 __extension__ extern __inline uint16x8_t
21422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(uint16x8_t __a,uint16x8_t __b)21423 __arm_vqaddq (uint16x8_t __a, uint16x8_t __b)
21424 {
21425  return __arm_vqaddq_u16 (__a, __b);
21426 }
21427 
21428 __extension__ extern __inline uint16x8_t
21429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(uint16x8_t __a,uint16_t __b)21430 __arm_vqaddq (uint16x8_t __a, uint16_t __b)
21431 {
21432  return __arm_vqaddq_n_u16 (__a, __b);
21433 }
21434 
21435 __extension__ extern __inline uint16x8_t
21436 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(uint16x8_t __a,uint16x8_t __b)21437 __arm_vorrq (uint16x8_t __a, uint16x8_t __b)
21438 {
21439  return __arm_vorrq_u16 (__a, __b);
21440 }
21441 
21442 __extension__ extern __inline uint16x8_t
21443 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq(uint16x8_t __a,uint16x8_t __b)21444 __arm_vornq (uint16x8_t __a, uint16x8_t __b)
21445 {
21446  return __arm_vornq_u16 (__a, __b);
21447 }
21448 
21449 __extension__ extern __inline uint16x8_t
21450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(uint16x8_t __a,uint16x8_t __b)21451 __arm_vmulq (uint16x8_t __a, uint16x8_t __b)
21452 {
21453  return __arm_vmulq_u16 (__a, __b);
21454 }
21455 
21456 __extension__ extern __inline uint16x8_t
21457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(uint16x8_t __a,uint16_t __b)21458 __arm_vmulq (uint16x8_t __a, uint16_t __b)
21459 {
21460  return __arm_vmulq_n_u16 (__a, __b);
21461 }
21462 
21463 __extension__ extern __inline uint32x4_t
21464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int(uint16x8_t __a,uint16x8_t __b)21465 __arm_vmulltq_int (uint16x8_t __a, uint16x8_t __b)
21466 {
21467  return __arm_vmulltq_int_u16 (__a, __b);
21468 }
21469 
21470 __extension__ extern __inline uint32x4_t
21471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int(uint16x8_t __a,uint16x8_t __b)21472 __arm_vmullbq_int (uint16x8_t __a, uint16x8_t __b)
21473 {
21474  return __arm_vmullbq_int_u16 (__a, __b);
21475 }
21476 
21477 __extension__ extern __inline uint16x8_t
21478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq(uint16x8_t __a,uint16x8_t __b)21479 __arm_vmulhq (uint16x8_t __a, uint16x8_t __b)
21480 {
21481  return __arm_vmulhq_u16 (__a, __b);
21482 }
21483 
21484 __extension__ extern __inline uint32_t
21485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq(uint16x8_t __a,uint16x8_t __b)21486 __arm_vmladavq (uint16x8_t __a, uint16x8_t __b)
21487 {
21488  return __arm_vmladavq_u16 (__a, __b);
21489 }
21490 
21491 __extension__ extern __inline uint16_t
21492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq(uint16_t __a,uint16x8_t __b)21493 __arm_vminvq (uint16_t __a, uint16x8_t __b)
21494 {
21495  return __arm_vminvq_u16 (__a, __b);
21496 }
21497 
21498 __extension__ extern __inline uint16x8_t
21499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq(uint16x8_t __a,uint16x8_t __b)21500 __arm_vminq (uint16x8_t __a, uint16x8_t __b)
21501 {
21502  return __arm_vminq_u16 (__a, __b);
21503 }
21504 
21505 __extension__ extern __inline uint16_t
21506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq(uint16_t __a,uint16x8_t __b)21507 __arm_vmaxvq (uint16_t __a, uint16x8_t __b)
21508 {
21509  return __arm_vmaxvq_u16 (__a, __b);
21510 }
21511 
21512 __extension__ extern __inline uint16x8_t
21513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq(uint16x8_t __a,uint16x8_t __b)21514 __arm_vmaxq (uint16x8_t __a, uint16x8_t __b)
21515 {
21516  return __arm_vmaxq_u16 (__a, __b);
21517 }
21518 
21519 __extension__ extern __inline uint16x8_t
21520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(uint16x8_t __a,uint16x8_t __b)21521 __arm_vhsubq (uint16x8_t __a, uint16x8_t __b)
21522 {
21523  return __arm_vhsubq_u16 (__a, __b);
21524 }
21525 
21526 __extension__ extern __inline uint16x8_t
21527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(uint16x8_t __a,uint16_t __b)21528 __arm_vhsubq (uint16x8_t __a, uint16_t __b)
21529 {
21530  return __arm_vhsubq_n_u16 (__a, __b);
21531 }
21532 
21533 __extension__ extern __inline uint16x8_t
21534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(uint16x8_t __a,uint16x8_t __b)21535 __arm_vhaddq (uint16x8_t __a, uint16x8_t __b)
21536 {
21537  return __arm_vhaddq_u16 (__a, __b);
21538 }
21539 
21540 __extension__ extern __inline uint16x8_t
21541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(uint16x8_t __a,uint16_t __b)21542 __arm_vhaddq (uint16x8_t __a, uint16_t __b)
21543 {
21544  return __arm_vhaddq_n_u16 (__a, __b);
21545 }
21546 
21547 __extension__ extern __inline uint16x8_t
21548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq(uint16x8_t __a,uint16x8_t __b)21549 __arm_veorq (uint16x8_t __a, uint16x8_t __b)
21550 {
21551  return __arm_veorq_u16 (__a, __b);
21552 }
21553 
21554 __extension__ extern __inline mve_pred16_t
21555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(uint16x8_t __a,uint16_t __b)21556 __arm_vcmpneq (uint16x8_t __a, uint16_t __b)
21557 {
21558  return __arm_vcmpneq_n_u16 (__a, __b);
21559 }
21560 
21561 __extension__ extern __inline mve_pred16_t
21562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq(uint16x8_t __a,uint16x8_t __b)21563 __arm_vcmphiq (uint16x8_t __a, uint16x8_t __b)
21564 {
21565  return __arm_vcmphiq_u16 (__a, __b);
21566 }
21567 
21568 __extension__ extern __inline mve_pred16_t
21569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq(uint16x8_t __a,uint16_t __b)21570 __arm_vcmphiq (uint16x8_t __a, uint16_t __b)
21571 {
21572  return __arm_vcmphiq_n_u16 (__a, __b);
21573 }
21574 
21575 __extension__ extern __inline mve_pred16_t
21576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(uint16x8_t __a,uint16x8_t __b)21577 __arm_vcmpeqq (uint16x8_t __a, uint16x8_t __b)
21578 {
21579  return __arm_vcmpeqq_u16 (__a, __b);
21580 }
21581 
21582 __extension__ extern __inline mve_pred16_t
21583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(uint16x8_t __a,uint16_t __b)21584 __arm_vcmpeqq (uint16x8_t __a, uint16_t __b)
21585 {
21586  return __arm_vcmpeqq_n_u16 (__a, __b);
21587 }
21588 
21589 __extension__ extern __inline mve_pred16_t
21590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq(uint16x8_t __a,uint16x8_t __b)21591 __arm_vcmpcsq (uint16x8_t __a, uint16x8_t __b)
21592 {
21593  return __arm_vcmpcsq_u16 (__a, __b);
21594 }
21595 
21596 __extension__ extern __inline mve_pred16_t
21597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq(uint16x8_t __a,uint16_t __b)21598 __arm_vcmpcsq (uint16x8_t __a, uint16_t __b)
21599 {
21600  return __arm_vcmpcsq_n_u16 (__a, __b);
21601 }
21602 
21603 __extension__ extern __inline uint16x8_t
21604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90(uint16x8_t __a,uint16x8_t __b)21605 __arm_vcaddq_rot90 (uint16x8_t __a, uint16x8_t __b)
21606 {
21607  return __arm_vcaddq_rot90_u16 (__a, __b);
21608 }
21609 
21610 __extension__ extern __inline uint16x8_t
21611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270(uint16x8_t __a,uint16x8_t __b)21612 __arm_vcaddq_rot270 (uint16x8_t __a, uint16x8_t __b)
21613 {
21614  return __arm_vcaddq_rot270_u16 (__a, __b);
21615 }
21616 
21617 __extension__ extern __inline uint16x8_t
21618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(uint16x8_t __a,uint16x8_t __b)21619 __arm_vbicq (uint16x8_t __a, uint16x8_t __b)
21620 {
21621  return __arm_vbicq_u16 (__a, __b);
21622 }
21623 
21624 __extension__ extern __inline uint16x8_t
21625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq(uint16x8_t __a,uint16x8_t __b)21626 __arm_vandq (uint16x8_t __a, uint16x8_t __b)
21627 {
21628  return __arm_vandq_u16 (__a, __b);
21629 }
21630 
21631 __extension__ extern __inline uint32_t
21632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p(uint16x8_t __a,mve_pred16_t __p)21633 __arm_vaddvq_p (uint16x8_t __a, mve_pred16_t __p)
21634 {
21635  return __arm_vaddvq_p_u16 (__a, __p);
21636 }
21637 
21638 __extension__ extern __inline uint32_t
21639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq(uint32_t __a,uint16x8_t __b)21640 __arm_vaddvaq (uint32_t __a, uint16x8_t __b)
21641 {
21642  return __arm_vaddvaq_u16 (__a, __b);
21643 }
21644 
21645 __extension__ extern __inline uint16x8_t
21646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(uint16x8_t __a,uint16_t __b)21647 __arm_vaddq (uint16x8_t __a, uint16_t __b)
21648 {
21649  return __arm_vaddq_n_u16 (__a, __b);
21650 }
21651 
21652 __extension__ extern __inline uint16x8_t
21653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq(uint16x8_t __a,uint16x8_t __b)21654 __arm_vabdq (uint16x8_t __a, uint16x8_t __b)
21655 {
21656  return __arm_vabdq_u16 (__a, __b);
21657 }
21658 
21659 __extension__ extern __inline uint16x8_t
21660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r(uint16x8_t __a,int32_t __b)21661 __arm_vshlq_r (uint16x8_t __a, int32_t __b)
21662 {
21663  return __arm_vshlq_r_u16 (__a, __b);
21664 }
21665 
21666 __extension__ extern __inline uint16x8_t
21667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(uint16x8_t __a,int16x8_t __b)21668 __arm_vrshlq (uint16x8_t __a, int16x8_t __b)
21669 {
21670  return __arm_vrshlq_u16 (__a, __b);
21671 }
21672 
21673 __extension__ extern __inline uint16x8_t
21674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(uint16x8_t __a,int32_t __b)21675 __arm_vrshlq (uint16x8_t __a, int32_t __b)
21676 {
21677  return __arm_vrshlq_n_u16 (__a, __b);
21678 }
21679 
21680 __extension__ extern __inline uint16x8_t
21681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq(uint16x8_t __a,int16x8_t __b)21682 __arm_vqshlq (uint16x8_t __a, int16x8_t __b)
21683 {
21684  return __arm_vqshlq_u16 (__a, __b);
21685 }
21686 
21687 __extension__ extern __inline uint16x8_t
21688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r(uint16x8_t __a,int32_t __b)21689 __arm_vqshlq_r (uint16x8_t __a, int32_t __b)
21690 {
21691  return __arm_vqshlq_r_u16 (__a, __b);
21692 }
21693 
21694 __extension__ extern __inline uint16x8_t
21695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(uint16x8_t __a,int16x8_t __b)21696 __arm_vqrshlq (uint16x8_t __a, int16x8_t __b)
21697 {
21698  return __arm_vqrshlq_u16 (__a, __b);
21699 }
21700 
21701 __extension__ extern __inline uint16x8_t
21702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(uint16x8_t __a,int32_t __b)21703 __arm_vqrshlq (uint16x8_t __a, int32_t __b)
21704 {
21705  return __arm_vqrshlq_n_u16 (__a, __b);
21706 }
21707 
21708 __extension__ extern __inline uint16_t
21709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq(uint16_t __a,int16x8_t __b)21710 __arm_vminavq (uint16_t __a, int16x8_t __b)
21711 {
21712  return __arm_vminavq_s16 (__a, __b);
21713 }
21714 
21715 __extension__ extern __inline uint16x8_t
21716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq(uint16x8_t __a,int16x8_t __b)21717 __arm_vminaq (uint16x8_t __a, int16x8_t __b)
21718 {
21719  return __arm_vminaq_s16 (__a, __b);
21720 }
21721 
21722 __extension__ extern __inline uint16_t
21723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq(uint16_t __a,int16x8_t __b)21724 __arm_vmaxavq (uint16_t __a, int16x8_t __b)
21725 {
21726  return __arm_vmaxavq_s16 (__a, __b);
21727 }
21728 
21729 __extension__ extern __inline uint16x8_t
21730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq(uint16x8_t __a,int16x8_t __b)21731 __arm_vmaxaq (uint16x8_t __a, int16x8_t __b)
21732 {
21733  return __arm_vmaxaq_s16 (__a, __b);
21734 }
21735 
21736 __extension__ extern __inline uint16x8_t
21737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq(uint16x8_t __a,int32_t __b)21738 __arm_vbrsrq (uint16x8_t __a, int32_t __b)
21739 {
21740  return __arm_vbrsrq_n_u16 (__a, __b);
21741 }
21742 
21743 __extension__ extern __inline uint16x8_t
21744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n(uint16x8_t __a,const int __imm)21745 __arm_vshlq_n (uint16x8_t __a, const int __imm)
21746 {
21747  return __arm_vshlq_n_u16 (__a, __imm);
21748 }
21749 
21750 __extension__ extern __inline uint16x8_t
21751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq(uint16x8_t __a,const int __imm)21752 __arm_vrshrq (uint16x8_t __a, const int __imm)
21753 {
21754  return __arm_vrshrq_n_u16 (__a, __imm);
21755 }
21756 
21757 __extension__ extern __inline uint16x8_t
21758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n(uint16x8_t __a,const int __imm)21759 __arm_vqshlq_n (uint16x8_t __a, const int __imm)
21760 {
21761  return __arm_vqshlq_n_u16 (__a, __imm);
21762 }
21763 
21764 __extension__ extern __inline mve_pred16_t
21765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(int16x8_t __a,int16_t __b)21766 __arm_vcmpneq (int16x8_t __a, int16_t __b)
21767 {
21768  return __arm_vcmpneq_n_s16 (__a, __b);
21769 }
21770 
21771 __extension__ extern __inline mve_pred16_t
21772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(int16x8_t __a,int16x8_t __b)21773 __arm_vcmpltq (int16x8_t __a, int16x8_t __b)
21774 {
21775  return __arm_vcmpltq_s16 (__a, __b);
21776 }
21777 
21778 __extension__ extern __inline mve_pred16_t
21779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(int16x8_t __a,int16_t __b)21780 __arm_vcmpltq (int16x8_t __a, int16_t __b)
21781 {
21782  return __arm_vcmpltq_n_s16 (__a, __b);
21783 }
21784 
21785 __extension__ extern __inline mve_pred16_t
21786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(int16x8_t __a,int16x8_t __b)21787 __arm_vcmpleq (int16x8_t __a, int16x8_t __b)
21788 {
21789  return __arm_vcmpleq_s16 (__a, __b);
21790 }
21791 
21792 __extension__ extern __inline mve_pred16_t
21793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(int16x8_t __a,int16_t __b)21794 __arm_vcmpleq (int16x8_t __a, int16_t __b)
21795 {
21796  return __arm_vcmpleq_n_s16 (__a, __b);
21797 }
21798 
21799 __extension__ extern __inline mve_pred16_t
21800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(int16x8_t __a,int16x8_t __b)21801 __arm_vcmpgtq (int16x8_t __a, int16x8_t __b)
21802 {
21803  return __arm_vcmpgtq_s16 (__a, __b);
21804 }
21805 
21806 __extension__ extern __inline mve_pred16_t
21807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(int16x8_t __a,int16_t __b)21808 __arm_vcmpgtq (int16x8_t __a, int16_t __b)
21809 {
21810  return __arm_vcmpgtq_n_s16 (__a, __b);
21811 }
21812 
21813 __extension__ extern __inline mve_pred16_t
21814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(int16x8_t __a,int16x8_t __b)21815 __arm_vcmpgeq (int16x8_t __a, int16x8_t __b)
21816 {
21817  return __arm_vcmpgeq_s16 (__a, __b);
21818 }
21819 
21820 __extension__ extern __inline mve_pred16_t
21821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(int16x8_t __a,int16_t __b)21822 __arm_vcmpgeq (int16x8_t __a, int16_t __b)
21823 {
21824  return __arm_vcmpgeq_n_s16 (__a, __b);
21825 }
21826 
21827 __extension__ extern __inline mve_pred16_t
21828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(int16x8_t __a,int16x8_t __b)21829 __arm_vcmpeqq (int16x8_t __a, int16x8_t __b)
21830 {
21831  return __arm_vcmpeqq_s16 (__a, __b);
21832 }
21833 
21834 __extension__ extern __inline mve_pred16_t
21835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(int16x8_t __a,int16_t __b)21836 __arm_vcmpeqq (int16x8_t __a, int16_t __b)
21837 {
21838  return __arm_vcmpeqq_n_s16 (__a, __b);
21839 }
21840 
21841 __extension__ extern __inline uint16x8_t
21842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq(int16x8_t __a,const int __imm)21843 __arm_vqshluq (int16x8_t __a, const int __imm)
21844 {
21845  return __arm_vqshluq_n_s16 (__a, __imm);
21846 }
21847 
21848 __extension__ extern __inline int32_t
21849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p(int16x8_t __a,mve_pred16_t __p)21850 __arm_vaddvq_p (int16x8_t __a, mve_pred16_t __p)
21851 {
21852  return __arm_vaddvq_p_s16 (__a, __p);
21853 }
21854 
21855 __extension__ extern __inline int16x8_t
21856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(int16x8_t __a,int16x8_t __b)21857 __arm_vsubq (int16x8_t __a, int16x8_t __b)
21858 {
21859  return __arm_vsubq_s16 (__a, __b);
21860 }
21861 
21862 __extension__ extern __inline int16x8_t
21863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(int16x8_t __a,int16_t __b)21864 __arm_vsubq (int16x8_t __a, int16_t __b)
21865 {
21866  return __arm_vsubq_n_s16 (__a, __b);
21867 }
21868 
21869 __extension__ extern __inline int16x8_t
21870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r(int16x8_t __a,int32_t __b)21871 __arm_vshlq_r (int16x8_t __a, int32_t __b)
21872 {
21873  return __arm_vshlq_r_s16 (__a, __b);
21874 }
21875 
21876 __extension__ extern __inline int16x8_t
21877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(int16x8_t __a,int16x8_t __b)21878 __arm_vrshlq (int16x8_t __a, int16x8_t __b)
21879 {
21880  return __arm_vrshlq_s16 (__a, __b);
21881 }
21882 
21883 __extension__ extern __inline int16x8_t
21884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(int16x8_t __a,int32_t __b)21885 __arm_vrshlq (int16x8_t __a, int32_t __b)
21886 {
21887  return __arm_vrshlq_n_s16 (__a, __b);
21888 }
21889 
21890 __extension__ extern __inline int16x8_t
21891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq(int16x8_t __a,int16x8_t __b)21892 __arm_vrmulhq (int16x8_t __a, int16x8_t __b)
21893 {
21894  return __arm_vrmulhq_s16 (__a, __b);
21895 }
21896 
21897 __extension__ extern __inline int16x8_t
21898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq(int16x8_t __a,int16x8_t __b)21899 __arm_vrhaddq (int16x8_t __a, int16x8_t __b)
21900 {
21901  return __arm_vrhaddq_s16 (__a, __b);
21902 }
21903 
21904 __extension__ extern __inline int16x8_t
21905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(int16x8_t __a,int16x8_t __b)21906 __arm_vqsubq (int16x8_t __a, int16x8_t __b)
21907 {
21908  return __arm_vqsubq_s16 (__a, __b);
21909 }
21910 
21911 __extension__ extern __inline int16x8_t
21912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(int16x8_t __a,int16_t __b)21913 __arm_vqsubq (int16x8_t __a, int16_t __b)
21914 {
21915  return __arm_vqsubq_n_s16 (__a, __b);
21916 }
21917 
21918 __extension__ extern __inline int16x8_t
21919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq(int16x8_t __a,int16x8_t __b)21920 __arm_vqshlq (int16x8_t __a, int16x8_t __b)
21921 {
21922  return __arm_vqshlq_s16 (__a, __b);
21923 }
21924 
21925 __extension__ extern __inline int16x8_t
21926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r(int16x8_t __a,int32_t __b)21927 __arm_vqshlq_r (int16x8_t __a, int32_t __b)
21928 {
21929  return __arm_vqshlq_r_s16 (__a, __b);
21930 }
21931 
21932 __extension__ extern __inline int16x8_t
21933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(int16x8_t __a,int16x8_t __b)21934 __arm_vqrshlq (int16x8_t __a, int16x8_t __b)
21935 {
21936  return __arm_vqrshlq_s16 (__a, __b);
21937 }
21938 
21939 __extension__ extern __inline int16x8_t
21940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(int16x8_t __a,int32_t __b)21941 __arm_vqrshlq (int16x8_t __a, int32_t __b)
21942 {
21943  return __arm_vqrshlq_n_s16 (__a, __b);
21944 }
21945 
21946 __extension__ extern __inline int16x8_t
21947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq(int16x8_t __a,int16x8_t __b)21948 __arm_vqrdmulhq (int16x8_t __a, int16x8_t __b)
21949 {
21950  return __arm_vqrdmulhq_s16 (__a, __b);
21951 }
21952 
21953 __extension__ extern __inline int16x8_t
21954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq(int16x8_t __a,int16_t __b)21955 __arm_vqrdmulhq (int16x8_t __a, int16_t __b)
21956 {
21957  return __arm_vqrdmulhq_n_s16 (__a, __b);
21958 }
21959 
21960 __extension__ extern __inline int16x8_t
21961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq(int16x8_t __a,int16x8_t __b)21962 __arm_vqdmulhq (int16x8_t __a, int16x8_t __b)
21963 {
21964  return __arm_vqdmulhq_s16 (__a, __b);
21965 }
21966 
21967 __extension__ extern __inline int16x8_t
21968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq(int16x8_t __a,int16_t __b)21969 __arm_vqdmulhq (int16x8_t __a, int16_t __b)
21970 {
21971  return __arm_vqdmulhq_n_s16 (__a, __b);
21972 }
21973 
21974 __extension__ extern __inline int16x8_t
21975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(int16x8_t __a,int16x8_t __b)21976 __arm_vqaddq (int16x8_t __a, int16x8_t __b)
21977 {
21978  return __arm_vqaddq_s16 (__a, __b);
21979 }
21980 
21981 __extension__ extern __inline int16x8_t
21982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(int16x8_t __a,int16_t __b)21983 __arm_vqaddq (int16x8_t __a, int16_t __b)
21984 {
21985  return __arm_vqaddq_n_s16 (__a, __b);
21986 }
21987 
21988 __extension__ extern __inline int16x8_t
21989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(int16x8_t __a,int16x8_t __b)21990 __arm_vorrq (int16x8_t __a, int16x8_t __b)
21991 {
21992  return __arm_vorrq_s16 (__a, __b);
21993 }
21994 
21995 __extension__ extern __inline int16x8_t
21996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq(int16x8_t __a,int16x8_t __b)21997 __arm_vornq (int16x8_t __a, int16x8_t __b)
21998 {
21999  return __arm_vornq_s16 (__a, __b);
22000 }
22001 
22002 __extension__ extern __inline int16x8_t
22003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(int16x8_t __a,int16x8_t __b)22004 __arm_vmulq (int16x8_t __a, int16x8_t __b)
22005 {
22006  return __arm_vmulq_s16 (__a, __b);
22007 }
22008 
22009 __extension__ extern __inline int16x8_t
22010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(int16x8_t __a,int16_t __b)22011 __arm_vmulq (int16x8_t __a, int16_t __b)
22012 {
22013  return __arm_vmulq_n_s16 (__a, __b);
22014 }
22015 
22016 __extension__ extern __inline int32x4_t
22017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int(int16x8_t __a,int16x8_t __b)22018 __arm_vmulltq_int (int16x8_t __a, int16x8_t __b)
22019 {
22020  return __arm_vmulltq_int_s16 (__a, __b);
22021 }
22022 
22023 __extension__ extern __inline int32x4_t
22024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int(int16x8_t __a,int16x8_t __b)22025 __arm_vmullbq_int (int16x8_t __a, int16x8_t __b)
22026 {
22027  return __arm_vmullbq_int_s16 (__a, __b);
22028 }
22029 
22030 __extension__ extern __inline int16x8_t
22031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq(int16x8_t __a,int16x8_t __b)22032 __arm_vmulhq (int16x8_t __a, int16x8_t __b)
22033 {
22034  return __arm_vmulhq_s16 (__a, __b);
22035 }
22036 
22037 __extension__ extern __inline int32_t
22038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq(int16x8_t __a,int16x8_t __b)22039 __arm_vmlsdavxq (int16x8_t __a, int16x8_t __b)
22040 {
22041  return __arm_vmlsdavxq_s16 (__a, __b);
22042 }
22043 
22044 __extension__ extern __inline int32_t
22045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq(int16x8_t __a,int16x8_t __b)22046 __arm_vmlsdavq (int16x8_t __a, int16x8_t __b)
22047 {
22048  return __arm_vmlsdavq_s16 (__a, __b);
22049 }
22050 
22051 __extension__ extern __inline int32_t
22052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq(int16x8_t __a,int16x8_t __b)22053 __arm_vmladavxq (int16x8_t __a, int16x8_t __b)
22054 {
22055  return __arm_vmladavxq_s16 (__a, __b);
22056 }
22057 
22058 __extension__ extern __inline int32_t
22059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq(int16x8_t __a,int16x8_t __b)22060 __arm_vmladavq (int16x8_t __a, int16x8_t __b)
22061 {
22062  return __arm_vmladavq_s16 (__a, __b);
22063 }
22064 
22065 __extension__ extern __inline int16_t
22066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq(int16_t __a,int16x8_t __b)22067 __arm_vminvq (int16_t __a, int16x8_t __b)
22068 {
22069  return __arm_vminvq_s16 (__a, __b);
22070 }
22071 
22072 __extension__ extern __inline int16x8_t
22073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq(int16x8_t __a,int16x8_t __b)22074 __arm_vminq (int16x8_t __a, int16x8_t __b)
22075 {
22076  return __arm_vminq_s16 (__a, __b);
22077 }
22078 
22079 __extension__ extern __inline int16_t
22080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq(int16_t __a,int16x8_t __b)22081 __arm_vmaxvq (int16_t __a, int16x8_t __b)
22082 {
22083  return __arm_vmaxvq_s16 (__a, __b);
22084 }
22085 
22086 __extension__ extern __inline int16x8_t
22087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq(int16x8_t __a,int16x8_t __b)22088 __arm_vmaxq (int16x8_t __a, int16x8_t __b)
22089 {
22090  return __arm_vmaxq_s16 (__a, __b);
22091 }
22092 
22093 __extension__ extern __inline int16x8_t
22094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(int16x8_t __a,int16x8_t __b)22095 __arm_vhsubq (int16x8_t __a, int16x8_t __b)
22096 {
22097  return __arm_vhsubq_s16 (__a, __b);
22098 }
22099 
22100 __extension__ extern __inline int16x8_t
22101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(int16x8_t __a,int16_t __b)22102 __arm_vhsubq (int16x8_t __a, int16_t __b)
22103 {
22104  return __arm_vhsubq_n_s16 (__a, __b);
22105 }
22106 
22107 __extension__ extern __inline int16x8_t
22108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90(int16x8_t __a,int16x8_t __b)22109 __arm_vhcaddq_rot90 (int16x8_t __a, int16x8_t __b)
22110 {
22111  return __arm_vhcaddq_rot90_s16 (__a, __b);
22112 }
22113 
22114 __extension__ extern __inline int16x8_t
22115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270(int16x8_t __a,int16x8_t __b)22116 __arm_vhcaddq_rot270 (int16x8_t __a, int16x8_t __b)
22117 {
22118  return __arm_vhcaddq_rot270_s16 (__a, __b);
22119 }
22120 
22121 __extension__ extern __inline int16x8_t
22122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(int16x8_t __a,int16x8_t __b)22123 __arm_vhaddq (int16x8_t __a, int16x8_t __b)
22124 {
22125  return __arm_vhaddq_s16 (__a, __b);
22126 }
22127 
22128 __extension__ extern __inline int16x8_t
22129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(int16x8_t __a,int16_t __b)22130 __arm_vhaddq (int16x8_t __a, int16_t __b)
22131 {
22132  return __arm_vhaddq_n_s16 (__a, __b);
22133 }
22134 
22135 __extension__ extern __inline int16x8_t
22136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq(int16x8_t __a,int16x8_t __b)22137 __arm_veorq (int16x8_t __a, int16x8_t __b)
22138 {
22139  return __arm_veorq_s16 (__a, __b);
22140 }
22141 
22142 __extension__ extern __inline int16x8_t
22143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90(int16x8_t __a,int16x8_t __b)22144 __arm_vcaddq_rot90 (int16x8_t __a, int16x8_t __b)
22145 {
22146  return __arm_vcaddq_rot90_s16 (__a, __b);
22147 }
22148 
22149 __extension__ extern __inline int16x8_t
22150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270(int16x8_t __a,int16x8_t __b)22151 __arm_vcaddq_rot270 (int16x8_t __a, int16x8_t __b)
22152 {
22153  return __arm_vcaddq_rot270_s16 (__a, __b);
22154 }
22155 
22156 __extension__ extern __inline int16x8_t
22157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq(int16x8_t __a,int32_t __b)22158 __arm_vbrsrq (int16x8_t __a, int32_t __b)
22159 {
22160  return __arm_vbrsrq_n_s16 (__a, __b);
22161 }
22162 
22163 __extension__ extern __inline int16x8_t
22164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(int16x8_t __a,int16x8_t __b)22165 __arm_vbicq (int16x8_t __a, int16x8_t __b)
22166 {
22167  return __arm_vbicq_s16 (__a, __b);
22168 }
22169 
22170 __extension__ extern __inline int16x8_t
22171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq(int16x8_t __a,int16x8_t __b)22172 __arm_vandq (int16x8_t __a, int16x8_t __b)
22173 {
22174  return __arm_vandq_s16 (__a, __b);
22175 }
22176 
22177 __extension__ extern __inline int32_t
22178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq(int32_t __a,int16x8_t __b)22179 __arm_vaddvaq (int32_t __a, int16x8_t __b)
22180 {
22181  return __arm_vaddvaq_s16 (__a, __b);
22182 }
22183 
22184 __extension__ extern __inline int16x8_t
22185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(int16x8_t __a,int16_t __b)22186 __arm_vaddq (int16x8_t __a, int16_t __b)
22187 {
22188  return __arm_vaddq_n_s16 (__a, __b);
22189 }
22190 
22191 __extension__ extern __inline int16x8_t
22192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq(int16x8_t __a,int16x8_t __b)22193 __arm_vabdq (int16x8_t __a, int16x8_t __b)
22194 {
22195  return __arm_vabdq_s16 (__a, __b);
22196 }
22197 
22198 __extension__ extern __inline int16x8_t
22199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n(int16x8_t __a,const int __imm)22200 __arm_vshlq_n (int16x8_t __a, const int __imm)
22201 {
22202  return __arm_vshlq_n_s16 (__a, __imm);
22203 }
22204 
22205 __extension__ extern __inline int16x8_t
22206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq(int16x8_t __a,const int __imm)22207 __arm_vrshrq (int16x8_t __a, const int __imm)
22208 {
22209  return __arm_vrshrq_n_s16 (__a, __imm);
22210 }
22211 
22212 __extension__ extern __inline int16x8_t
22213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n(int16x8_t __a,const int __imm)22214 __arm_vqshlq_n (int16x8_t __a, const int __imm)
22215 {
22216  return __arm_vqshlq_n_s16 (__a, __imm);
22217 }
22218 
22219 __extension__ extern __inline uint32x4_t
22220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(uint32x4_t __a,uint32x4_t __b)22221 __arm_vsubq (uint32x4_t __a, uint32x4_t __b)
22222 {
22223  return __arm_vsubq_u32 (__a, __b);
22224 }
22225 
22226 __extension__ extern __inline uint32x4_t
22227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(uint32x4_t __a,uint32_t __b)22228 __arm_vsubq (uint32x4_t __a, uint32_t __b)
22229 {
22230  return __arm_vsubq_n_u32 (__a, __b);
22231 }
22232 
22233 __extension__ extern __inline uint32x4_t
22234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq(uint32x4_t __a,uint32x4_t __b)22235 __arm_vrmulhq (uint32x4_t __a, uint32x4_t __b)
22236 {
22237  return __arm_vrmulhq_u32 (__a, __b);
22238 }
22239 
22240 __extension__ extern __inline uint32x4_t
22241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq(uint32x4_t __a,uint32x4_t __b)22242 __arm_vrhaddq (uint32x4_t __a, uint32x4_t __b)
22243 {
22244  return __arm_vrhaddq_u32 (__a, __b);
22245 }
22246 
22247 __extension__ extern __inline uint32x4_t
22248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(uint32x4_t __a,uint32x4_t __b)22249 __arm_vqsubq (uint32x4_t __a, uint32x4_t __b)
22250 {
22251  return __arm_vqsubq_u32 (__a, __b);
22252 }
22253 
22254 __extension__ extern __inline uint32x4_t
22255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(uint32x4_t __a,uint32_t __b)22256 __arm_vqsubq (uint32x4_t __a, uint32_t __b)
22257 {
22258  return __arm_vqsubq_n_u32 (__a, __b);
22259 }
22260 
22261 __extension__ extern __inline uint32x4_t
22262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(uint32x4_t __a,uint32x4_t __b)22263 __arm_vqaddq (uint32x4_t __a, uint32x4_t __b)
22264 {
22265  return __arm_vqaddq_u32 (__a, __b);
22266 }
22267 
22268 __extension__ extern __inline uint32x4_t
22269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(uint32x4_t __a,uint32_t __b)22270 __arm_vqaddq (uint32x4_t __a, uint32_t __b)
22271 {
22272  return __arm_vqaddq_n_u32 (__a, __b);
22273 }
22274 
22275 __extension__ extern __inline uint32x4_t
22276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(uint32x4_t __a,uint32x4_t __b)22277 __arm_vorrq (uint32x4_t __a, uint32x4_t __b)
22278 {
22279  return __arm_vorrq_u32 (__a, __b);
22280 }
22281 
22282 __extension__ extern __inline uint32x4_t
22283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq(uint32x4_t __a,uint32x4_t __b)22284 __arm_vornq (uint32x4_t __a, uint32x4_t __b)
22285 {
22286  return __arm_vornq_u32 (__a, __b);
22287 }
22288 
22289 __extension__ extern __inline uint32x4_t
22290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(uint32x4_t __a,uint32x4_t __b)22291 __arm_vmulq (uint32x4_t __a, uint32x4_t __b)
22292 {
22293  return __arm_vmulq_u32 (__a, __b);
22294 }
22295 
22296 __extension__ extern __inline uint32x4_t
22297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(uint32x4_t __a,uint32_t __b)22298 __arm_vmulq (uint32x4_t __a, uint32_t __b)
22299 {
22300  return __arm_vmulq_n_u32 (__a, __b);
22301 }
22302 
22303 __extension__ extern __inline uint64x2_t
22304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int(uint32x4_t __a,uint32x4_t __b)22305 __arm_vmulltq_int (uint32x4_t __a, uint32x4_t __b)
22306 {
22307  return __arm_vmulltq_int_u32 (__a, __b);
22308 }
22309 
22310 __extension__ extern __inline uint64x2_t
22311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int(uint32x4_t __a,uint32x4_t __b)22312 __arm_vmullbq_int (uint32x4_t __a, uint32x4_t __b)
22313 {
22314  return __arm_vmullbq_int_u32 (__a, __b);
22315 }
22316 
22317 __extension__ extern __inline uint32x4_t
22318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq(uint32x4_t __a,uint32x4_t __b)22319 __arm_vmulhq (uint32x4_t __a, uint32x4_t __b)
22320 {
22321  return __arm_vmulhq_u32 (__a, __b);
22322 }
22323 
22324 __extension__ extern __inline uint32_t
22325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq(uint32x4_t __a,uint32x4_t __b)22326 __arm_vmladavq (uint32x4_t __a, uint32x4_t __b)
22327 {
22328  return __arm_vmladavq_u32 (__a, __b);
22329 }
22330 
22331 __extension__ extern __inline uint32_t
22332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq(uint32_t __a,uint32x4_t __b)22333 __arm_vminvq (uint32_t __a, uint32x4_t __b)
22334 {
22335  return __arm_vminvq_u32 (__a, __b);
22336 }
22337 
22338 __extension__ extern __inline uint32x4_t
22339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq(uint32x4_t __a,uint32x4_t __b)22340 __arm_vminq (uint32x4_t __a, uint32x4_t __b)
22341 {
22342  return __arm_vminq_u32 (__a, __b);
22343 }
22344 
22345 __extension__ extern __inline uint32_t
22346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq(uint32_t __a,uint32x4_t __b)22347 __arm_vmaxvq (uint32_t __a, uint32x4_t __b)
22348 {
22349  return __arm_vmaxvq_u32 (__a, __b);
22350 }
22351 
22352 __extension__ extern __inline uint32x4_t
22353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq(uint32x4_t __a,uint32x4_t __b)22354 __arm_vmaxq (uint32x4_t __a, uint32x4_t __b)
22355 {
22356  return __arm_vmaxq_u32 (__a, __b);
22357 }
22358 
22359 __extension__ extern __inline uint32x4_t
22360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(uint32x4_t __a,uint32x4_t __b)22361 __arm_vhsubq (uint32x4_t __a, uint32x4_t __b)
22362 {
22363  return __arm_vhsubq_u32 (__a, __b);
22364 }
22365 
22366 __extension__ extern __inline uint32x4_t
22367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(uint32x4_t __a,uint32_t __b)22368 __arm_vhsubq (uint32x4_t __a, uint32_t __b)
22369 {
22370  return __arm_vhsubq_n_u32 (__a, __b);
22371 }
22372 
22373 __extension__ extern __inline uint32x4_t
22374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(uint32x4_t __a,uint32x4_t __b)22375 __arm_vhaddq (uint32x4_t __a, uint32x4_t __b)
22376 {
22377  return __arm_vhaddq_u32 (__a, __b);
22378 }
22379 
22380 __extension__ extern __inline uint32x4_t
22381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(uint32x4_t __a,uint32_t __b)22382 __arm_vhaddq (uint32x4_t __a, uint32_t __b)
22383 {
22384  return __arm_vhaddq_n_u32 (__a, __b);
22385 }
22386 
22387 __extension__ extern __inline uint32x4_t
22388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq(uint32x4_t __a,uint32x4_t __b)22389 __arm_veorq (uint32x4_t __a, uint32x4_t __b)
22390 {
22391  return __arm_veorq_u32 (__a, __b);
22392 }
22393 
22394 __extension__ extern __inline mve_pred16_t
22395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(uint32x4_t __a,uint32_t __b)22396 __arm_vcmpneq (uint32x4_t __a, uint32_t __b)
22397 {
22398  return __arm_vcmpneq_n_u32 (__a, __b);
22399 }
22400 
22401 __extension__ extern __inline mve_pred16_t
22402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq(uint32x4_t __a,uint32x4_t __b)22403 __arm_vcmphiq (uint32x4_t __a, uint32x4_t __b)
22404 {
22405  return __arm_vcmphiq_u32 (__a, __b);
22406 }
22407 
22408 __extension__ extern __inline mve_pred16_t
22409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq(uint32x4_t __a,uint32_t __b)22410 __arm_vcmphiq (uint32x4_t __a, uint32_t __b)
22411 {
22412  return __arm_vcmphiq_n_u32 (__a, __b);
22413 }
22414 
22415 __extension__ extern __inline mve_pred16_t
22416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(uint32x4_t __a,uint32x4_t __b)22417 __arm_vcmpeqq (uint32x4_t __a, uint32x4_t __b)
22418 {
22419  return __arm_vcmpeqq_u32 (__a, __b);
22420 }
22421 
22422 __extension__ extern __inline mve_pred16_t
22423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(uint32x4_t __a,uint32_t __b)22424 __arm_vcmpeqq (uint32x4_t __a, uint32_t __b)
22425 {
22426  return __arm_vcmpeqq_n_u32 (__a, __b);
22427 }
22428 
22429 __extension__ extern __inline mve_pred16_t
22430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq(uint32x4_t __a,uint32x4_t __b)22431 __arm_vcmpcsq (uint32x4_t __a, uint32x4_t __b)
22432 {
22433  return __arm_vcmpcsq_u32 (__a, __b);
22434 }
22435 
22436 __extension__ extern __inline mve_pred16_t
22437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq(uint32x4_t __a,uint32_t __b)22438 __arm_vcmpcsq (uint32x4_t __a, uint32_t __b)
22439 {
22440  return __arm_vcmpcsq_n_u32 (__a, __b);
22441 }
22442 
22443 __extension__ extern __inline uint32x4_t
22444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90(uint32x4_t __a,uint32x4_t __b)22445 __arm_vcaddq_rot90 (uint32x4_t __a, uint32x4_t __b)
22446 {
22447  return __arm_vcaddq_rot90_u32 (__a, __b);
22448 }
22449 
22450 __extension__ extern __inline uint32x4_t
22451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270(uint32x4_t __a,uint32x4_t __b)22452 __arm_vcaddq_rot270 (uint32x4_t __a, uint32x4_t __b)
22453 {
22454  return __arm_vcaddq_rot270_u32 (__a, __b);
22455 }
22456 
22457 __extension__ extern __inline uint32x4_t
22458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(uint32x4_t __a,uint32x4_t __b)22459 __arm_vbicq (uint32x4_t __a, uint32x4_t __b)
22460 {
22461  return __arm_vbicq_u32 (__a, __b);
22462 }
22463 
22464 __extension__ extern __inline uint32x4_t
22465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq(uint32x4_t __a,uint32x4_t __b)22466 __arm_vandq (uint32x4_t __a, uint32x4_t __b)
22467 {
22468  return __arm_vandq_u32 (__a, __b);
22469 }
22470 
22471 __extension__ extern __inline uint32_t
22472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p(uint32x4_t __a,mve_pred16_t __p)22473 __arm_vaddvq_p (uint32x4_t __a, mve_pred16_t __p)
22474 {
22475  return __arm_vaddvq_p_u32 (__a, __p);
22476 }
22477 
22478 __extension__ extern __inline uint32_t
22479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq(uint32_t __a,uint32x4_t __b)22480 __arm_vaddvaq (uint32_t __a, uint32x4_t __b)
22481 {
22482  return __arm_vaddvaq_u32 (__a, __b);
22483 }
22484 
22485 __extension__ extern __inline uint32x4_t
22486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(uint32x4_t __a,uint32_t __b)22487 __arm_vaddq (uint32x4_t __a, uint32_t __b)
22488 {
22489  return __arm_vaddq_n_u32 (__a, __b);
22490 }
22491 
22492 __extension__ extern __inline uint32x4_t
22493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq(uint32x4_t __a,uint32x4_t __b)22494 __arm_vabdq (uint32x4_t __a, uint32x4_t __b)
22495 {
22496  return __arm_vabdq_u32 (__a, __b);
22497 }
22498 
22499 __extension__ extern __inline uint32x4_t
22500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r(uint32x4_t __a,int32_t __b)22501 __arm_vshlq_r (uint32x4_t __a, int32_t __b)
22502 {
22503  return __arm_vshlq_r_u32 (__a, __b);
22504 }
22505 
22506 __extension__ extern __inline uint32x4_t
22507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(uint32x4_t __a,int32x4_t __b)22508 __arm_vrshlq (uint32x4_t __a, int32x4_t __b)
22509 {
22510  return __arm_vrshlq_u32 (__a, __b);
22511 }
22512 
22513 __extension__ extern __inline uint32x4_t
22514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(uint32x4_t __a,int32_t __b)22515 __arm_vrshlq (uint32x4_t __a, int32_t __b)
22516 {
22517  return __arm_vrshlq_n_u32 (__a, __b);
22518 }
22519 
22520 __extension__ extern __inline uint32x4_t
22521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq(uint32x4_t __a,int32x4_t __b)22522 __arm_vqshlq (uint32x4_t __a, int32x4_t __b)
22523 {
22524  return __arm_vqshlq_u32 (__a, __b);
22525 }
22526 
22527 __extension__ extern __inline uint32x4_t
22528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r(uint32x4_t __a,int32_t __b)22529 __arm_vqshlq_r (uint32x4_t __a, int32_t __b)
22530 {
22531  return __arm_vqshlq_r_u32 (__a, __b);
22532 }
22533 
22534 __extension__ extern __inline uint32x4_t
22535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(uint32x4_t __a,int32x4_t __b)22536 __arm_vqrshlq (uint32x4_t __a, int32x4_t __b)
22537 {
22538  return __arm_vqrshlq_u32 (__a, __b);
22539 }
22540 
22541 __extension__ extern __inline uint32x4_t
22542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(uint32x4_t __a,int32_t __b)22543 __arm_vqrshlq (uint32x4_t __a, int32_t __b)
22544 {
22545  return __arm_vqrshlq_n_u32 (__a, __b);
22546 }
22547 
22548 __extension__ extern __inline uint32_t
22549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq(uint32_t __a,int32x4_t __b)22550 __arm_vminavq (uint32_t __a, int32x4_t __b)
22551 {
22552  return __arm_vminavq_s32 (__a, __b);
22553 }
22554 
22555 __extension__ extern __inline uint32x4_t
22556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq(uint32x4_t __a,int32x4_t __b)22557 __arm_vminaq (uint32x4_t __a, int32x4_t __b)
22558 {
22559  return __arm_vminaq_s32 (__a, __b);
22560 }
22561 
22562 __extension__ extern __inline uint32_t
22563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq(uint32_t __a,int32x4_t __b)22564 __arm_vmaxavq (uint32_t __a, int32x4_t __b)
22565 {
22566  return __arm_vmaxavq_s32 (__a, __b);
22567 }
22568 
22569 __extension__ extern __inline uint32x4_t
22570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq(uint32x4_t __a,int32x4_t __b)22571 __arm_vmaxaq (uint32x4_t __a, int32x4_t __b)
22572 {
22573  return __arm_vmaxaq_s32 (__a, __b);
22574 }
22575 
22576 __extension__ extern __inline uint32x4_t
22577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq(uint32x4_t __a,int32_t __b)22578 __arm_vbrsrq (uint32x4_t __a, int32_t __b)
22579 {
22580  return __arm_vbrsrq_n_u32 (__a, __b);
22581 }
22582 
22583 __extension__ extern __inline uint32x4_t
22584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n(uint32x4_t __a,const int __imm)22585 __arm_vshlq_n (uint32x4_t __a, const int __imm)
22586 {
22587  return __arm_vshlq_n_u32 (__a, __imm);
22588 }
22589 
22590 __extension__ extern __inline uint32x4_t
22591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq(uint32x4_t __a,const int __imm)22592 __arm_vrshrq (uint32x4_t __a, const int __imm)
22593 {
22594  return __arm_vrshrq_n_u32 (__a, __imm);
22595 }
22596 
22597 __extension__ extern __inline uint32x4_t
22598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n(uint32x4_t __a,const int __imm)22599 __arm_vqshlq_n (uint32x4_t __a, const int __imm)
22600 {
22601  return __arm_vqshlq_n_u32 (__a, __imm);
22602 }
22603 
22604 __extension__ extern __inline mve_pred16_t
22605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(int32x4_t __a,int32_t __b)22606 __arm_vcmpneq (int32x4_t __a, int32_t __b)
22607 {
22608  return __arm_vcmpneq_n_s32 (__a, __b);
22609 }
22610 
22611 __extension__ extern __inline mve_pred16_t
22612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(int32x4_t __a,int32x4_t __b)22613 __arm_vcmpltq (int32x4_t __a, int32x4_t __b)
22614 {
22615  return __arm_vcmpltq_s32 (__a, __b);
22616 }
22617 
22618 __extension__ extern __inline mve_pred16_t
22619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(int32x4_t __a,int32_t __b)22620 __arm_vcmpltq (int32x4_t __a, int32_t __b)
22621 {
22622  return __arm_vcmpltq_n_s32 (__a, __b);
22623 }
22624 
22625 __extension__ extern __inline mve_pred16_t
22626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(int32x4_t __a,int32x4_t __b)22627 __arm_vcmpleq (int32x4_t __a, int32x4_t __b)
22628 {
22629  return __arm_vcmpleq_s32 (__a, __b);
22630 }
22631 
22632 __extension__ extern __inline mve_pred16_t
22633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(int32x4_t __a,int32_t __b)22634 __arm_vcmpleq (int32x4_t __a, int32_t __b)
22635 {
22636  return __arm_vcmpleq_n_s32 (__a, __b);
22637 }
22638 
22639 __extension__ extern __inline mve_pred16_t
22640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(int32x4_t __a,int32x4_t __b)22641 __arm_vcmpgtq (int32x4_t __a, int32x4_t __b)
22642 {
22643  return __arm_vcmpgtq_s32 (__a, __b);
22644 }
22645 
22646 __extension__ extern __inline mve_pred16_t
22647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(int32x4_t __a,int32_t __b)22648 __arm_vcmpgtq (int32x4_t __a, int32_t __b)
22649 {
22650  return __arm_vcmpgtq_n_s32 (__a, __b);
22651 }
22652 
22653 __extension__ extern __inline mve_pred16_t
22654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(int32x4_t __a,int32x4_t __b)22655 __arm_vcmpgeq (int32x4_t __a, int32x4_t __b)
22656 {
22657  return __arm_vcmpgeq_s32 (__a, __b);
22658 }
22659 
22660 __extension__ extern __inline mve_pred16_t
22661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(int32x4_t __a,int32_t __b)22662 __arm_vcmpgeq (int32x4_t __a, int32_t __b)
22663 {
22664  return __arm_vcmpgeq_n_s32 (__a, __b);
22665 }
22666 
22667 __extension__ extern __inline mve_pred16_t
22668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(int32x4_t __a,int32x4_t __b)22669 __arm_vcmpeqq (int32x4_t __a, int32x4_t __b)
22670 {
22671  return __arm_vcmpeqq_s32 (__a, __b);
22672 }
22673 
22674 __extension__ extern __inline mve_pred16_t
22675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(int32x4_t __a,int32_t __b)22676 __arm_vcmpeqq (int32x4_t __a, int32_t __b)
22677 {
22678  return __arm_vcmpeqq_n_s32 (__a, __b);
22679 }
22680 
22681 __extension__ extern __inline uint32x4_t
22682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq(int32x4_t __a,const int __imm)22683 __arm_vqshluq (int32x4_t __a, const int __imm)
22684 {
22685  return __arm_vqshluq_n_s32 (__a, __imm);
22686 }
22687 
22688 __extension__ extern __inline int32_t
22689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvq_p(int32x4_t __a,mve_pred16_t __p)22690 __arm_vaddvq_p (int32x4_t __a, mve_pred16_t __p)
22691 {
22692  return __arm_vaddvq_p_s32 (__a, __p);
22693 }
22694 
22695 __extension__ extern __inline int32x4_t
22696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(int32x4_t __a,int32x4_t __b)22697 __arm_vsubq (int32x4_t __a, int32x4_t __b)
22698 {
22699  return __arm_vsubq_s32 (__a, __b);
22700 }
22701 
22702 __extension__ extern __inline int32x4_t
22703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(int32x4_t __a,int32_t __b)22704 __arm_vsubq (int32x4_t __a, int32_t __b)
22705 {
22706  return __arm_vsubq_n_s32 (__a, __b);
22707 }
22708 
22709 __extension__ extern __inline int32x4_t
22710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_r(int32x4_t __a,int32_t __b)22711 __arm_vshlq_r (int32x4_t __a, int32_t __b)
22712 {
22713  return __arm_vshlq_r_s32 (__a, __b);
22714 }
22715 
22716 __extension__ extern __inline int32x4_t
22717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(int32x4_t __a,int32x4_t __b)22718 __arm_vrshlq (int32x4_t __a, int32x4_t __b)
22719 {
22720  return __arm_vrshlq_s32 (__a, __b);
22721 }
22722 
22723 __extension__ extern __inline int32x4_t
22724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq(int32x4_t __a,int32_t __b)22725 __arm_vrshlq (int32x4_t __a, int32_t __b)
22726 {
22727  return __arm_vrshlq_n_s32 (__a, __b);
22728 }
22729 
22730 __extension__ extern __inline int32x4_t
22731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq(int32x4_t __a,int32x4_t __b)22732 __arm_vrmulhq (int32x4_t __a, int32x4_t __b)
22733 {
22734  return __arm_vrmulhq_s32 (__a, __b);
22735 }
22736 
22737 __extension__ extern __inline int32x4_t
22738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq(int32x4_t __a,int32x4_t __b)22739 __arm_vrhaddq (int32x4_t __a, int32x4_t __b)
22740 {
22741  return __arm_vrhaddq_s32 (__a, __b);
22742 }
22743 
22744 __extension__ extern __inline int32x4_t
22745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(int32x4_t __a,int32x4_t __b)22746 __arm_vqsubq (int32x4_t __a, int32x4_t __b)
22747 {
22748  return __arm_vqsubq_s32 (__a, __b);
22749 }
22750 
22751 __extension__ extern __inline int32x4_t
22752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq(int32x4_t __a,int32_t __b)22753 __arm_vqsubq (int32x4_t __a, int32_t __b)
22754 {
22755  return __arm_vqsubq_n_s32 (__a, __b);
22756 }
22757 
22758 __extension__ extern __inline int32x4_t
22759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq(int32x4_t __a,int32x4_t __b)22760 __arm_vqshlq (int32x4_t __a, int32x4_t __b)
22761 {
22762  return __arm_vqshlq_s32 (__a, __b);
22763 }
22764 
22765 __extension__ extern __inline int32x4_t
22766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_r(int32x4_t __a,int32_t __b)22767 __arm_vqshlq_r (int32x4_t __a, int32_t __b)
22768 {
22769  return __arm_vqshlq_r_s32 (__a, __b);
22770 }
22771 
22772 __extension__ extern __inline int32x4_t
22773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(int32x4_t __a,int32x4_t __b)22774 __arm_vqrshlq (int32x4_t __a, int32x4_t __b)
22775 {
22776  return __arm_vqrshlq_s32 (__a, __b);
22777 }
22778 
22779 __extension__ extern __inline int32x4_t
22780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq(int32x4_t __a,int32_t __b)22781 __arm_vqrshlq (int32x4_t __a, int32_t __b)
22782 {
22783  return __arm_vqrshlq_n_s32 (__a, __b);
22784 }
22785 
22786 __extension__ extern __inline int32x4_t
22787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq(int32x4_t __a,int32x4_t __b)22788 __arm_vqrdmulhq (int32x4_t __a, int32x4_t __b)
22789 {
22790  return __arm_vqrdmulhq_s32 (__a, __b);
22791 }
22792 
22793 __extension__ extern __inline int32x4_t
22794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq(int32x4_t __a,int32_t __b)22795 __arm_vqrdmulhq (int32x4_t __a, int32_t __b)
22796 {
22797  return __arm_vqrdmulhq_n_s32 (__a, __b);
22798 }
22799 
22800 __extension__ extern __inline int32x4_t
22801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq(int32x4_t __a,int32x4_t __b)22802 __arm_vqdmulhq (int32x4_t __a, int32x4_t __b)
22803 {
22804  return __arm_vqdmulhq_s32 (__a, __b);
22805 }
22806 
22807 __extension__ extern __inline int32x4_t
22808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq(int32x4_t __a,int32_t __b)22809 __arm_vqdmulhq (int32x4_t __a, int32_t __b)
22810 {
22811  return __arm_vqdmulhq_n_s32 (__a, __b);
22812 }
22813 
22814 __extension__ extern __inline int32x4_t
22815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(int32x4_t __a,int32x4_t __b)22816 __arm_vqaddq (int32x4_t __a, int32x4_t __b)
22817 {
22818  return __arm_vqaddq_s32 (__a, __b);
22819 }
22820 
22821 __extension__ extern __inline int32x4_t
22822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq(int32x4_t __a,int32_t __b)22823 __arm_vqaddq (int32x4_t __a, int32_t __b)
22824 {
22825  return __arm_vqaddq_n_s32 (__a, __b);
22826 }
22827 
22828 __extension__ extern __inline int32x4_t
22829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(int32x4_t __a,int32x4_t __b)22830 __arm_vorrq (int32x4_t __a, int32x4_t __b)
22831 {
22832  return __arm_vorrq_s32 (__a, __b);
22833 }
22834 
22835 __extension__ extern __inline int32x4_t
22836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq(int32x4_t __a,int32x4_t __b)22837 __arm_vornq (int32x4_t __a, int32x4_t __b)
22838 {
22839  return __arm_vornq_s32 (__a, __b);
22840 }
22841 
22842 __extension__ extern __inline int32x4_t
22843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(int32x4_t __a,int32x4_t __b)22844 __arm_vmulq (int32x4_t __a, int32x4_t __b)
22845 {
22846  return __arm_vmulq_s32 (__a, __b);
22847 }
22848 
22849 __extension__ extern __inline int32x4_t
22850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(int32x4_t __a,int32_t __b)22851 __arm_vmulq (int32x4_t __a, int32_t __b)
22852 {
22853  return __arm_vmulq_n_s32 (__a, __b);
22854 }
22855 
22856 __extension__ extern __inline int64x2_t
22857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int(int32x4_t __a,int32x4_t __b)22858 __arm_vmulltq_int (int32x4_t __a, int32x4_t __b)
22859 {
22860  return __arm_vmulltq_int_s32 (__a, __b);
22861 }
22862 
22863 __extension__ extern __inline int64x2_t
22864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int(int32x4_t __a,int32x4_t __b)22865 __arm_vmullbq_int (int32x4_t __a, int32x4_t __b)
22866 {
22867  return __arm_vmullbq_int_s32 (__a, __b);
22868 }
22869 
22870 __extension__ extern __inline int32x4_t
22871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq(int32x4_t __a,int32x4_t __b)22872 __arm_vmulhq (int32x4_t __a, int32x4_t __b)
22873 {
22874  return __arm_vmulhq_s32 (__a, __b);
22875 }
22876 
22877 __extension__ extern __inline int32_t
22878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq(int32x4_t __a,int32x4_t __b)22879 __arm_vmlsdavxq (int32x4_t __a, int32x4_t __b)
22880 {
22881  return __arm_vmlsdavxq_s32 (__a, __b);
22882 }
22883 
22884 __extension__ extern __inline int32_t
22885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq(int32x4_t __a,int32x4_t __b)22886 __arm_vmlsdavq (int32x4_t __a, int32x4_t __b)
22887 {
22888  return __arm_vmlsdavq_s32 (__a, __b);
22889 }
22890 
22891 __extension__ extern __inline int32_t
22892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq(int32x4_t __a,int32x4_t __b)22893 __arm_vmladavxq (int32x4_t __a, int32x4_t __b)
22894 {
22895  return __arm_vmladavxq_s32 (__a, __b);
22896 }
22897 
22898 __extension__ extern __inline int32_t
22899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq(int32x4_t __a,int32x4_t __b)22900 __arm_vmladavq (int32x4_t __a, int32x4_t __b)
22901 {
22902  return __arm_vmladavq_s32 (__a, __b);
22903 }
22904 
22905 __extension__ extern __inline int32_t
22906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq(int32_t __a,int32x4_t __b)22907 __arm_vminvq (int32_t __a, int32x4_t __b)
22908 {
22909  return __arm_vminvq_s32 (__a, __b);
22910 }
22911 
22912 __extension__ extern __inline int32x4_t
22913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq(int32x4_t __a,int32x4_t __b)22914 __arm_vminq (int32x4_t __a, int32x4_t __b)
22915 {
22916  return __arm_vminq_s32 (__a, __b);
22917 }
22918 
22919 __extension__ extern __inline int32_t
22920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq(int32_t __a,int32x4_t __b)22921 __arm_vmaxvq (int32_t __a, int32x4_t __b)
22922 {
22923  return __arm_vmaxvq_s32 (__a, __b);
22924 }
22925 
22926 __extension__ extern __inline int32x4_t
22927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq(int32x4_t __a,int32x4_t __b)22928 __arm_vmaxq (int32x4_t __a, int32x4_t __b)
22929 {
22930  return __arm_vmaxq_s32 (__a, __b);
22931 }
22932 
22933 __extension__ extern __inline int32x4_t
22934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(int32x4_t __a,int32x4_t __b)22935 __arm_vhsubq (int32x4_t __a, int32x4_t __b)
22936 {
22937  return __arm_vhsubq_s32 (__a, __b);
22938 }
22939 
22940 __extension__ extern __inline int32x4_t
22941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq(int32x4_t __a,int32_t __b)22942 __arm_vhsubq (int32x4_t __a, int32_t __b)
22943 {
22944  return __arm_vhsubq_n_s32 (__a, __b);
22945 }
22946 
22947 __extension__ extern __inline int32x4_t
22948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90(int32x4_t __a,int32x4_t __b)22949 __arm_vhcaddq_rot90 (int32x4_t __a, int32x4_t __b)
22950 {
22951  return __arm_vhcaddq_rot90_s32 (__a, __b);
22952 }
22953 
22954 __extension__ extern __inline int32x4_t
22955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270(int32x4_t __a,int32x4_t __b)22956 __arm_vhcaddq_rot270 (int32x4_t __a, int32x4_t __b)
22957 {
22958  return __arm_vhcaddq_rot270_s32 (__a, __b);
22959 }
22960 
22961 __extension__ extern __inline int32x4_t
22962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(int32x4_t __a,int32x4_t __b)22963 __arm_vhaddq (int32x4_t __a, int32x4_t __b)
22964 {
22965  return __arm_vhaddq_s32 (__a, __b);
22966 }
22967 
22968 __extension__ extern __inline int32x4_t
22969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq(int32x4_t __a,int32_t __b)22970 __arm_vhaddq (int32x4_t __a, int32_t __b)
22971 {
22972  return __arm_vhaddq_n_s32 (__a, __b);
22973 }
22974 
22975 __extension__ extern __inline int32x4_t
22976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq(int32x4_t __a,int32x4_t __b)22977 __arm_veorq (int32x4_t __a, int32x4_t __b)
22978 {
22979  return __arm_veorq_s32 (__a, __b);
22980 }
22981 
22982 __extension__ extern __inline int32x4_t
22983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90(int32x4_t __a,int32x4_t __b)22984 __arm_vcaddq_rot90 (int32x4_t __a, int32x4_t __b)
22985 {
22986  return __arm_vcaddq_rot90_s32 (__a, __b);
22987 }
22988 
22989 __extension__ extern __inline int32x4_t
22990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270(int32x4_t __a,int32x4_t __b)22991 __arm_vcaddq_rot270 (int32x4_t __a, int32x4_t __b)
22992 {
22993  return __arm_vcaddq_rot270_s32 (__a, __b);
22994 }
22995 
22996 __extension__ extern __inline int32x4_t
22997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq(int32x4_t __a,int32_t __b)22998 __arm_vbrsrq (int32x4_t __a, int32_t __b)
22999 {
23000  return __arm_vbrsrq_n_s32 (__a, __b);
23001 }
23002 
23003 __extension__ extern __inline int32x4_t
23004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(int32x4_t __a,int32x4_t __b)23005 __arm_vbicq (int32x4_t __a, int32x4_t __b)
23006 {
23007  return __arm_vbicq_s32 (__a, __b);
23008 }
23009 
23010 __extension__ extern __inline int32x4_t
23011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq(int32x4_t __a,int32x4_t __b)23012 __arm_vandq (int32x4_t __a, int32x4_t __b)
23013 {
23014  return __arm_vandq_s32 (__a, __b);
23015 }
23016 
23017 __extension__ extern __inline int32_t
23018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq(int32_t __a,int32x4_t __b)23019 __arm_vaddvaq (int32_t __a, int32x4_t __b)
23020 {
23021  return __arm_vaddvaq_s32 (__a, __b);
23022 }
23023 
23024 __extension__ extern __inline int32x4_t
23025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(int32x4_t __a,int32_t __b)23026 __arm_vaddq (int32x4_t __a, int32_t __b)
23027 {
23028  return __arm_vaddq_n_s32 (__a, __b);
23029 }
23030 
23031 __extension__ extern __inline int32x4_t
23032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq(int32x4_t __a,int32x4_t __b)23033 __arm_vabdq (int32x4_t __a, int32x4_t __b)
23034 {
23035  return __arm_vabdq_s32 (__a, __b);
23036 }
23037 
23038 __extension__ extern __inline int32x4_t
23039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_n(int32x4_t __a,const int __imm)23040 __arm_vshlq_n (int32x4_t __a, const int __imm)
23041 {
23042  return __arm_vshlq_n_s32 (__a, __imm);
23043 }
23044 
23045 __extension__ extern __inline int32x4_t
23046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq(int32x4_t __a,const int __imm)23047 __arm_vrshrq (int32x4_t __a, const int __imm)
23048 {
23049  return __arm_vrshrq_n_s32 (__a, __imm);
23050 }
23051 
23052 __extension__ extern __inline int32x4_t
23053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_n(int32x4_t __a,const int __imm)23054 __arm_vqshlq_n (int32x4_t __a, const int __imm)
23055 {
23056  return __arm_vqshlq_n_s32 (__a, __imm);
23057 }
23058 
23059 __extension__ extern __inline uint8x16_t
23060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq(uint8x16_t __a,uint16x8_t __b)23061 __arm_vqmovntq (uint8x16_t __a, uint16x8_t __b)
23062 {
23063  return __arm_vqmovntq_u16 (__a, __b);
23064 }
23065 
23066 __extension__ extern __inline uint8x16_t
23067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq(uint8x16_t __a,uint16x8_t __b)23068 __arm_vqmovnbq (uint8x16_t __a, uint16x8_t __b)
23069 {
23070  return __arm_vqmovnbq_u16 (__a, __b);
23071 }
23072 
23073 __extension__ extern __inline uint16x8_t
23074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly(uint8x16_t __a,uint8x16_t __b)23075 __arm_vmulltq_poly (uint8x16_t __a, uint8x16_t __b)
23076 {
23077  return __arm_vmulltq_poly_p8 (__a, __b);
23078 }
23079 
23080 __extension__ extern __inline uint16x8_t
23081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly(uint8x16_t __a,uint8x16_t __b)23082 __arm_vmullbq_poly (uint8x16_t __a, uint8x16_t __b)
23083 {
23084  return __arm_vmullbq_poly_p8 (__a, __b);
23085 }
23086 
23087 __extension__ extern __inline uint8x16_t
23088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq(uint8x16_t __a,uint16x8_t __b)23089 __arm_vmovntq (uint8x16_t __a, uint16x8_t __b)
23090 {
23091  return __arm_vmovntq_u16 (__a, __b);
23092 }
23093 
23094 __extension__ extern __inline uint8x16_t
23095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq(uint8x16_t __a,uint16x8_t __b)23096 __arm_vmovnbq (uint8x16_t __a, uint16x8_t __b)
23097 {
23098  return __arm_vmovnbq_u16 (__a, __b);
23099 }
23100 
23101 __extension__ extern __inline uint64_t
23102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq(uint16x8_t __a,uint16x8_t __b)23103 __arm_vmlaldavq (uint16x8_t __a, uint16x8_t __b)
23104 {
23105  return __arm_vmlaldavq_u16 (__a, __b);
23106 }
23107 
23108 __extension__ extern __inline uint8x16_t
23109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovuntq(uint8x16_t __a,int16x8_t __b)23110 __arm_vqmovuntq (uint8x16_t __a, int16x8_t __b)
23111 {
23112  return __arm_vqmovuntq_s16 (__a, __b);
23113 }
23114 
23115 __extension__ extern __inline uint8x16_t
23116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovunbq(uint8x16_t __a,int16x8_t __b)23117 __arm_vqmovunbq (uint8x16_t __a, int16x8_t __b)
23118 {
23119  return __arm_vqmovunbq_s16 (__a, __b);
23120 }
23121 
23122 __extension__ extern __inline uint16x8_t
23123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq(uint8x16_t __a,const int __imm)23124 __arm_vshlltq (uint8x16_t __a, const int __imm)
23125 {
23126  return __arm_vshlltq_n_u8 (__a, __imm);
23127 }
23128 
23129 __extension__ extern __inline uint16x8_t
23130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq(uint8x16_t __a,const int __imm)23131 __arm_vshllbq (uint8x16_t __a, const int __imm)
23132 {
23133  return __arm_vshllbq_n_u8 (__a, __imm);
23134 }
23135 
23136 __extension__ extern __inline uint16x8_t
23137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(uint16x8_t __a,const int __imm)23138 __arm_vorrq (uint16x8_t __a, const int __imm)
23139 {
23140  return __arm_vorrq_n_u16 (__a, __imm);
23141 }
23142 
23143 __extension__ extern __inline uint16x8_t
23144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(uint16x8_t __a,const int __imm)23145 __arm_vbicq (uint16x8_t __a, const int __imm)
23146 {
23147  return __arm_vbicq_n_u16 (__a, __imm);
23148 }
23149 
23150 __extension__ extern __inline int8x16_t
23151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq(int8x16_t __a,int16x8_t __b)23152 __arm_vqmovntq (int8x16_t __a, int16x8_t __b)
23153 {
23154  return __arm_vqmovntq_s16 (__a, __b);
23155 }
23156 
23157 __extension__ extern __inline int8x16_t
23158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq(int8x16_t __a,int16x8_t __b)23159 __arm_vqmovnbq (int8x16_t __a, int16x8_t __b)
23160 {
23161  return __arm_vqmovnbq_s16 (__a, __b);
23162 }
23163 
23164 __extension__ extern __inline int32x4_t
23165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq(int16x8_t __a,int16x8_t __b)23166 __arm_vqdmulltq (int16x8_t __a, int16x8_t __b)
23167 {
23168  return __arm_vqdmulltq_s16 (__a, __b);
23169 }
23170 
23171 __extension__ extern __inline int32x4_t
23172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq(int16x8_t __a,int16_t __b)23173 __arm_vqdmulltq (int16x8_t __a, int16_t __b)
23174 {
23175  return __arm_vqdmulltq_n_s16 (__a, __b);
23176 }
23177 
23178 __extension__ extern __inline int32x4_t
23179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq(int16x8_t __a,int16x8_t __b)23180 __arm_vqdmullbq (int16x8_t __a, int16x8_t __b)
23181 {
23182  return __arm_vqdmullbq_s16 (__a, __b);
23183 }
23184 
23185 __extension__ extern __inline int32x4_t
23186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq(int16x8_t __a,int16_t __b)23187 __arm_vqdmullbq (int16x8_t __a, int16_t __b)
23188 {
23189  return __arm_vqdmullbq_n_s16 (__a, __b);
23190 }
23191 
23192 __extension__ extern __inline int8x16_t
23193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq(int8x16_t __a,int16x8_t __b)23194 __arm_vmovntq (int8x16_t __a, int16x8_t __b)
23195 {
23196  return __arm_vmovntq_s16 (__a, __b);
23197 }
23198 
23199 __extension__ extern __inline int8x16_t
23200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq(int8x16_t __a,int16x8_t __b)23201 __arm_vmovnbq (int8x16_t __a, int16x8_t __b)
23202 {
23203  return __arm_vmovnbq_s16 (__a, __b);
23204 }
23205 
23206 __extension__ extern __inline int64_t
23207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavxq(int16x8_t __a,int16x8_t __b)23208 __arm_vmlsldavxq (int16x8_t __a, int16x8_t __b)
23209 {
23210  return __arm_vmlsldavxq_s16 (__a, __b);
23211 }
23212 
23213 __extension__ extern __inline int64_t
23214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavq(int16x8_t __a,int16x8_t __b)23215 __arm_vmlsldavq (int16x8_t __a, int16x8_t __b)
23216 {
23217  return __arm_vmlsldavq_s16 (__a, __b);
23218 }
23219 
23220 __extension__ extern __inline int64_t
23221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavxq(int16x8_t __a,int16x8_t __b)23222 __arm_vmlaldavxq (int16x8_t __a, int16x8_t __b)
23223 {
23224  return __arm_vmlaldavxq_s16 (__a, __b);
23225 }
23226 
23227 __extension__ extern __inline int64_t
23228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq(int16x8_t __a,int16x8_t __b)23229 __arm_vmlaldavq (int16x8_t __a, int16x8_t __b)
23230 {
23231  return __arm_vmlaldavq_s16 (__a, __b);
23232 }
23233 
23234 __extension__ extern __inline int16x8_t
23235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq(int8x16_t __a,const int __imm)23236 __arm_vshlltq (int8x16_t __a, const int __imm)
23237 {
23238  return __arm_vshlltq_n_s8 (__a, __imm);
23239 }
23240 
23241 __extension__ extern __inline int16x8_t
23242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq(int8x16_t __a,const int __imm)23243 __arm_vshllbq (int8x16_t __a, const int __imm)
23244 {
23245  return __arm_vshllbq_n_s8 (__a, __imm);
23246 }
23247 
23248 __extension__ extern __inline int16x8_t
23249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(int16x8_t __a,const int __imm)23250 __arm_vorrq (int16x8_t __a, const int __imm)
23251 {
23252  return __arm_vorrq_n_s16 (__a, __imm);
23253 }
23254 
23255 __extension__ extern __inline int16x8_t
23256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(int16x8_t __a,const int __imm)23257 __arm_vbicq (int16x8_t __a, const int __imm)
23258 {
23259  return __arm_vbicq_n_s16 (__a, __imm);
23260 }
23261 
23262 __extension__ extern __inline uint16x8_t
23263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq(uint16x8_t __a,uint32x4_t __b)23264 __arm_vqmovntq (uint16x8_t __a, uint32x4_t __b)
23265 {
23266  return __arm_vqmovntq_u32 (__a, __b);
23267 }
23268 
23269 __extension__ extern __inline uint16x8_t
23270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq(uint16x8_t __a,uint32x4_t __b)23271 __arm_vqmovnbq (uint16x8_t __a, uint32x4_t __b)
23272 {
23273  return __arm_vqmovnbq_u32 (__a, __b);
23274 }
23275 
23276 __extension__ extern __inline uint32x4_t
23277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly(uint16x8_t __a,uint16x8_t __b)23278 __arm_vmulltq_poly (uint16x8_t __a, uint16x8_t __b)
23279 {
23280  return __arm_vmulltq_poly_p16 (__a, __b);
23281 }
23282 
23283 __extension__ extern __inline uint32x4_t
23284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly(uint16x8_t __a,uint16x8_t __b)23285 __arm_vmullbq_poly (uint16x8_t __a, uint16x8_t __b)
23286 {
23287  return __arm_vmullbq_poly_p16 (__a, __b);
23288 }
23289 
23290 __extension__ extern __inline uint16x8_t
23291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq(uint16x8_t __a,uint32x4_t __b)23292 __arm_vmovntq (uint16x8_t __a, uint32x4_t __b)
23293 {
23294  return __arm_vmovntq_u32 (__a, __b);
23295 }
23296 
23297 __extension__ extern __inline uint16x8_t
23298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq(uint16x8_t __a,uint32x4_t __b)23299 __arm_vmovnbq (uint16x8_t __a, uint32x4_t __b)
23300 {
23301  return __arm_vmovnbq_u32 (__a, __b);
23302 }
23303 
23304 __extension__ extern __inline uint64_t
23305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq(uint32x4_t __a,uint32x4_t __b)23306 __arm_vmlaldavq (uint32x4_t __a, uint32x4_t __b)
23307 {
23308  return __arm_vmlaldavq_u32 (__a, __b);
23309 }
23310 
23311 __extension__ extern __inline uint16x8_t
23312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovuntq(uint16x8_t __a,int32x4_t __b)23313 __arm_vqmovuntq (uint16x8_t __a, int32x4_t __b)
23314 {
23315  return __arm_vqmovuntq_s32 (__a, __b);
23316 }
23317 
23318 __extension__ extern __inline uint16x8_t
23319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovunbq(uint16x8_t __a,int32x4_t __b)23320 __arm_vqmovunbq (uint16x8_t __a, int32x4_t __b)
23321 {
23322  return __arm_vqmovunbq_s32 (__a, __b);
23323 }
23324 
23325 __extension__ extern __inline uint32x4_t
23326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq(uint16x8_t __a,const int __imm)23327 __arm_vshlltq (uint16x8_t __a, const int __imm)
23328 {
23329  return __arm_vshlltq_n_u16 (__a, __imm);
23330 }
23331 
23332 __extension__ extern __inline uint32x4_t
23333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq(uint16x8_t __a,const int __imm)23334 __arm_vshllbq (uint16x8_t __a, const int __imm)
23335 {
23336  return __arm_vshllbq_n_u16 (__a, __imm);
23337 }
23338 
23339 __extension__ extern __inline uint32x4_t
23340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(uint32x4_t __a,const int __imm)23341 __arm_vorrq (uint32x4_t __a, const int __imm)
23342 {
23343  return __arm_vorrq_n_u32 (__a, __imm);
23344 }
23345 
23346 __extension__ extern __inline uint32x4_t
23347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(uint32x4_t __a,const int __imm)23348 __arm_vbicq (uint32x4_t __a, const int __imm)
23349 {
23350  return __arm_vbicq_n_u32 (__a, __imm);
23351 }
23352 
23353 __extension__ extern __inline int16x8_t
23354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq(int16x8_t __a,int32x4_t __b)23355 __arm_vqmovntq (int16x8_t __a, int32x4_t __b)
23356 {
23357  return __arm_vqmovntq_s32 (__a, __b);
23358 }
23359 
23360 __extension__ extern __inline int16x8_t
23361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq(int16x8_t __a,int32x4_t __b)23362 __arm_vqmovnbq (int16x8_t __a, int32x4_t __b)
23363 {
23364  return __arm_vqmovnbq_s32 (__a, __b);
23365 }
23366 
23367 __extension__ extern __inline int64x2_t
23368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq(int32x4_t __a,int32x4_t __b)23369 __arm_vqdmulltq (int32x4_t __a, int32x4_t __b)
23370 {
23371  return __arm_vqdmulltq_s32 (__a, __b);
23372 }
23373 
23374 __extension__ extern __inline int64x2_t
23375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq(int32x4_t __a,int32_t __b)23376 __arm_vqdmulltq (int32x4_t __a, int32_t __b)
23377 {
23378  return __arm_vqdmulltq_n_s32 (__a, __b);
23379 }
23380 
23381 __extension__ extern __inline int64x2_t
23382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq(int32x4_t __a,int32x4_t __b)23383 __arm_vqdmullbq (int32x4_t __a, int32x4_t __b)
23384 {
23385  return __arm_vqdmullbq_s32 (__a, __b);
23386 }
23387 
23388 __extension__ extern __inline int64x2_t
23389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq(int32x4_t __a,int32_t __b)23390 __arm_vqdmullbq (int32x4_t __a, int32_t __b)
23391 {
23392  return __arm_vqdmullbq_n_s32 (__a, __b);
23393 }
23394 
23395 __extension__ extern __inline int16x8_t
23396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq(int16x8_t __a,int32x4_t __b)23397 __arm_vmovntq (int16x8_t __a, int32x4_t __b)
23398 {
23399  return __arm_vmovntq_s32 (__a, __b);
23400 }
23401 
23402 __extension__ extern __inline int16x8_t
23403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq(int16x8_t __a,int32x4_t __b)23404 __arm_vmovnbq (int16x8_t __a, int32x4_t __b)
23405 {
23406  return __arm_vmovnbq_s32 (__a, __b);
23407 }
23408 
23409 __extension__ extern __inline int64_t
23410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavxq(int32x4_t __a,int32x4_t __b)23411 __arm_vmlsldavxq (int32x4_t __a, int32x4_t __b)
23412 {
23413  return __arm_vmlsldavxq_s32 (__a, __b);
23414 }
23415 
23416 __extension__ extern __inline int64_t
23417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavq(int32x4_t __a,int32x4_t __b)23418 __arm_vmlsldavq (int32x4_t __a, int32x4_t __b)
23419 {
23420  return __arm_vmlsldavq_s32 (__a, __b);
23421 }
23422 
23423 __extension__ extern __inline int64_t
23424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavxq(int32x4_t __a,int32x4_t __b)23425 __arm_vmlaldavxq (int32x4_t __a, int32x4_t __b)
23426 {
23427  return __arm_vmlaldavxq_s32 (__a, __b);
23428 }
23429 
23430 __extension__ extern __inline int64_t
23431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq(int32x4_t __a,int32x4_t __b)23432 __arm_vmlaldavq (int32x4_t __a, int32x4_t __b)
23433 {
23434  return __arm_vmlaldavq_s32 (__a, __b);
23435 }
23436 
23437 __extension__ extern __inline int32x4_t
23438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq(int16x8_t __a,const int __imm)23439 __arm_vshlltq (int16x8_t __a, const int __imm)
23440 {
23441  return __arm_vshlltq_n_s16 (__a, __imm);
23442 }
23443 
23444 __extension__ extern __inline int32x4_t
23445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq(int16x8_t __a,const int __imm)23446 __arm_vshllbq (int16x8_t __a, const int __imm)
23447 {
23448  return __arm_vshllbq_n_s16 (__a, __imm);
23449 }
23450 
23451 __extension__ extern __inline int32x4_t
23452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(int32x4_t __a,const int __imm)23453 __arm_vorrq (int32x4_t __a, const int __imm)
23454 {
23455  return __arm_vorrq_n_s32 (__a, __imm);
23456 }
23457 
23458 __extension__ extern __inline int32x4_t
23459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(int32x4_t __a,const int __imm)23460 __arm_vbicq (int32x4_t __a, const int __imm)
23461 {
23462  return __arm_vbicq_n_s32 (__a, __imm);
23463 }
23464 
23465 __extension__ extern __inline uint64_t
23466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhq(uint32x4_t __a,uint32x4_t __b)23467 __arm_vrmlaldavhq (uint32x4_t __a, uint32x4_t __b)
23468 {
23469  return __arm_vrmlaldavhq_u32 (__a, __b);
23470 }
23471 
23472 __extension__ extern __inline uint64_t
23473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvaq(uint64_t __a,uint32x4_t __b)23474 __arm_vaddlvaq (uint64_t __a, uint32x4_t __b)
23475 {
23476  return __arm_vaddlvaq_u32 (__a, __b);
23477 }
23478 
23479 __extension__ extern __inline int64_t
23480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhxq(int32x4_t __a,int32x4_t __b)23481 __arm_vrmlsldavhxq (int32x4_t __a, int32x4_t __b)
23482 {
23483  return __arm_vrmlsldavhxq_s32 (__a, __b);
23484 }
23485 
23486 __extension__ extern __inline int64_t
23487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhq(int32x4_t __a,int32x4_t __b)23488 __arm_vrmlsldavhq (int32x4_t __a, int32x4_t __b)
23489 {
23490  return __arm_vrmlsldavhq_s32 (__a, __b);
23491 }
23492 
23493 __extension__ extern __inline int64_t
23494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhxq(int32x4_t __a,int32x4_t __b)23495 __arm_vrmlaldavhxq (int32x4_t __a, int32x4_t __b)
23496 {
23497  return __arm_vrmlaldavhxq_s32 (__a, __b);
23498 }
23499 
23500 __extension__ extern __inline int64_t
23501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhq(int32x4_t __a,int32x4_t __b)23502 __arm_vrmlaldavhq (int32x4_t __a, int32x4_t __b)
23503 {
23504  return __arm_vrmlaldavhq_s32 (__a, __b);
23505 }
23506 
23507 __extension__ extern __inline int64_t
23508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvaq(int64_t __a,int32x4_t __b)23509 __arm_vaddlvaq (int64_t __a, int32x4_t __b)
23510 {
23511  return __arm_vaddlvaq_s32 (__a, __b);
23512 }
23513 
23514 __extension__ extern __inline uint32_t
23515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq(uint32_t __a,int8x16_t __b,int8x16_t __c)23516 __arm_vabavq (uint32_t __a, int8x16_t __b, int8x16_t __c)
23517 {
23518  return __arm_vabavq_s8 (__a, __b, __c);
23519 }
23520 
23521 __extension__ extern __inline uint32_t
23522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq(uint32_t __a,int16x8_t __b,int16x8_t __c)23523 __arm_vabavq (uint32_t __a, int16x8_t __b, int16x8_t __c)
23524 {
23525  return __arm_vabavq_s16 (__a, __b, __c);
23526 }
23527 
23528 __extension__ extern __inline uint32_t
23529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq(uint32_t __a,int32x4_t __b,int32x4_t __c)23530 __arm_vabavq (uint32_t __a, int32x4_t __b, int32x4_t __c)
23531 {
23532  return __arm_vabavq_s32 (__a, __b, __c);
23533 }
23534 
23535 __extension__ extern __inline uint32_t
23536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq(uint32_t __a,uint8x16_t __b,uint8x16_t __c)23537 __arm_vabavq (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
23538 {
23539  return __arm_vabavq_u8 (__a, __b, __c);
23540 }
23541 
23542 __extension__ extern __inline uint32_t
23543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq(uint32_t __a,uint16x8_t __b,uint16x8_t __c)23544 __arm_vabavq (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
23545 {
23546  return __arm_vabavq_u16 (__a, __b, __c);
23547 }
23548 
23549 __extension__ extern __inline uint32_t
23550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq(uint32_t __a,uint32x4_t __b,uint32x4_t __c)23551 __arm_vabavq (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
23552 {
23553  return __arm_vabavq_u32 (__a, __b, __c);
23554 }
23555 
23556 __extension__ extern __inline int16x8_t
23557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_n(int16x8_t __a,const int __imm,mve_pred16_t __p)23558 __arm_vbicq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p)
23559 {
23560  return __arm_vbicq_m_n_s16 (__a, __imm, __p);
23561 }
23562 
23563 __extension__ extern __inline int32x4_t
23564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_n(int32x4_t __a,const int __imm,mve_pred16_t __p)23565 __arm_vbicq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p)
23566 {
23567  return __arm_vbicq_m_n_s32 (__a, __imm, __p);
23568 }
23569 
23570 __extension__ extern __inline uint16x8_t
23571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_n(uint16x8_t __a,const int __imm,mve_pred16_t __p)23572 __arm_vbicq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p)
23573 {
23574  return __arm_vbicq_m_n_u16 (__a, __imm, __p);
23575 }
23576 
23577 __extension__ extern __inline uint32x4_t
23578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m_n(uint32x4_t __a,const int __imm,mve_pred16_t __p)23579 __arm_vbicq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p)
23580 {
23581  return __arm_vbicq_m_n_u32 (__a, __imm, __p);
23582 }
23583 
23584 __extension__ extern __inline int8x16_t
23585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq(int8x16_t __a,int16x8_t __b,const int __imm)23586 __arm_vqrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
23587 {
23588  return __arm_vqrshrnbq_n_s16 (__a, __b, __imm);
23589 }
23590 
23591 __extension__ extern __inline uint8x16_t
23592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq(uint8x16_t __a,uint16x8_t __b,const int __imm)23593 __arm_vqrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
23594 {
23595  return __arm_vqrshrnbq_n_u16 (__a, __b, __imm);
23596 }
23597 
23598 __extension__ extern __inline int16x8_t
23599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq(int16x8_t __a,int32x4_t __b,const int __imm)23600 __arm_vqrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
23601 {
23602  return __arm_vqrshrnbq_n_s32 (__a, __b, __imm);
23603 }
23604 
23605 __extension__ extern __inline uint16x8_t
23606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq(uint16x8_t __a,uint32x4_t __b,const int __imm)23607 __arm_vqrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
23608 {
23609  return __arm_vqrshrnbq_n_u32 (__a, __b, __imm);
23610 }
23611 
23612 __extension__ extern __inline uint8x16_t
23613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrunbq(uint8x16_t __a,int16x8_t __b,const int __imm)23614 __arm_vqrshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm)
23615 {
23616  return __arm_vqrshrunbq_n_s16 (__a, __b, __imm);
23617 }
23618 
23619 __extension__ extern __inline uint16x8_t
23620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrunbq(uint16x8_t __a,int32x4_t __b,const int __imm)23621 __arm_vqrshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm)
23622 {
23623  return __arm_vqrshrunbq_n_s32 (__a, __b, __imm);
23624 }
23625 
23626 __extension__ extern __inline int64_t
23627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaq(int64_t __a,int32x4_t __b,int32x4_t __c)23628 __arm_vrmlaldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c)
23629 {
23630  return __arm_vrmlaldavhaq_s32 (__a, __b, __c);
23631 }
23632 
23633 __extension__ extern __inline uint64_t
23634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaq(uint64_t __a,uint32x4_t __b,uint32x4_t __c)23635 __arm_vrmlaldavhaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
23636 {
23637  return __arm_vrmlaldavhaq_u32 (__a, __b, __c);
23638 }
23639 
23640 __extension__ extern __inline int8x16_t
23641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq(int8x16_t __a,uint32_t * __b,const int __imm)23642 __arm_vshlcq (int8x16_t __a, uint32_t * __b, const int __imm)
23643 {
23644  return __arm_vshlcq_s8 (__a, __b, __imm);
23645 }
23646 
23647 __extension__ extern __inline uint8x16_t
23648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq(uint8x16_t __a,uint32_t * __b,const int __imm)23649 __arm_vshlcq (uint8x16_t __a, uint32_t * __b, const int __imm)
23650 {
23651  return __arm_vshlcq_u8 (__a, __b, __imm);
23652 }
23653 
23654 __extension__ extern __inline int16x8_t
23655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq(int16x8_t __a,uint32_t * __b,const int __imm)23656 __arm_vshlcq (int16x8_t __a, uint32_t * __b, const int __imm)
23657 {
23658  return __arm_vshlcq_s16 (__a, __b, __imm);
23659 }
23660 
23661 __extension__ extern __inline uint16x8_t
23662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq(uint16x8_t __a,uint32_t * __b,const int __imm)23663 __arm_vshlcq (uint16x8_t __a, uint32_t * __b, const int __imm)
23664 {
23665  return __arm_vshlcq_u16 (__a, __b, __imm);
23666 }
23667 
23668 __extension__ extern __inline int32x4_t
23669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq(int32x4_t __a,uint32_t * __b,const int __imm)23670 __arm_vshlcq (int32x4_t __a, uint32_t * __b, const int __imm)
23671 {
23672  return __arm_vshlcq_s32 (__a, __b, __imm);
23673 }
23674 
23675 __extension__ extern __inline uint32x4_t
23676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq(uint32x4_t __a,uint32_t * __b,const int __imm)23677 __arm_vshlcq (uint32x4_t __a, uint32_t * __b, const int __imm)
23678 {
23679  return __arm_vshlcq_u32 (__a, __b, __imm);
23680 }
23681 
23682 __extension__ extern __inline uint8x16_t
23683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)23684 __arm_vpselq (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23685 {
23686  return __arm_vpselq_u8 (__a, __b, __p);
23687 }
23688 
23689 __extension__ extern __inline int8x16_t
23690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)23691 __arm_vpselq (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23692 {
23693  return __arm_vpselq_s8 (__a, __b, __p);
23694 }
23695 
23696 __extension__ extern __inline uint8x16_t
23697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)23698 __arm_vrev64q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
23699 {
23700  return __arm_vrev64q_m_u8 (__inactive, __a, __p);
23701 }
23702 
23703 __extension__ extern __inline uint8x16_t
23704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)23705 __arm_vmvnq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
23706 {
23707  return __arm_vmvnq_m_u8 (__inactive, __a, __p);
23708 }
23709 
23710 __extension__ extern __inline uint8x16_t
23711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq(uint8x16_t __a,uint8x16_t __b,uint8_t __c)23712 __arm_vmlasq (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
23713 {
23714  return __arm_vmlasq_n_u8 (__a, __b, __c);
23715 }
23716 
23717 __extension__ extern __inline uint8x16_t
23718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq(uint8x16_t __a,uint8x16_t __b,uint8_t __c)23719 __arm_vmlaq (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
23720 {
23721  return __arm_vmlaq_n_u8 (__a, __b, __c);
23722 }
23723 
23724 __extension__ extern __inline uint32_t
23725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)23726 __arm_vmladavq_p (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23727 {
23728  return __arm_vmladavq_p_u8 (__a, __b, __p);
23729 }
23730 
23731 __extension__ extern __inline uint32_t
23732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq(uint32_t __a,uint8x16_t __b,uint8x16_t __c)23733 __arm_vmladavaq (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
23734 {
23735  return __arm_vmladavaq_u8 (__a, __b, __c);
23736 }
23737 
23738 __extension__ extern __inline uint8_t
23739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p(uint8_t __a,uint8x16_t __b,mve_pred16_t __p)23740 __arm_vminvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
23741 {
23742  return __arm_vminvq_p_u8 (__a, __b, __p);
23743 }
23744 
23745 __extension__ extern __inline uint8_t
23746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p(uint8_t __a,uint8x16_t __b,mve_pred16_t __p)23747 __arm_vmaxvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
23748 {
23749  return __arm_vmaxvq_p_u8 (__a, __b, __p);
23750 }
23751 
23752 __extension__ extern __inline uint8x16_t
23753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m(uint8x16_t __inactive,uint8_t __a,mve_pred16_t __p)23754 __arm_vdupq_m (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p)
23755 {
23756  return __arm_vdupq_m_n_u8 (__inactive, __a, __p);
23757 }
23758 
23759 __extension__ extern __inline mve_pred16_t
23760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)23761 __arm_vcmpneq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23762 {
23763  return __arm_vcmpneq_m_u8 (__a, __b, __p);
23764 }
23765 
23766 __extension__ extern __inline mve_pred16_t
23767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)23768 __arm_vcmpneq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
23769 {
23770  return __arm_vcmpneq_m_n_u8 (__a, __b, __p);
23771 }
23772 
23773 __extension__ extern __inline mve_pred16_t
23774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)23775 __arm_vcmphiq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23776 {
23777  return __arm_vcmphiq_m_u8 (__a, __b, __p);
23778 }
23779 
23780 __extension__ extern __inline mve_pred16_t
23781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)23782 __arm_vcmphiq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
23783 {
23784  return __arm_vcmphiq_m_n_u8 (__a, __b, __p);
23785 }
23786 
23787 __extension__ extern __inline mve_pred16_t
23788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)23789 __arm_vcmpeqq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23790 {
23791  return __arm_vcmpeqq_m_u8 (__a, __b, __p);
23792 }
23793 
23794 __extension__ extern __inline mve_pred16_t
23795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)23796 __arm_vcmpeqq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
23797 {
23798  return __arm_vcmpeqq_m_n_u8 (__a, __b, __p);
23799 }
23800 
23801 __extension__ extern __inline mve_pred16_t
23802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)23803 __arm_vcmpcsq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23804 {
23805  return __arm_vcmpcsq_m_u8 (__a, __b, __p);
23806 }
23807 
23808 __extension__ extern __inline mve_pred16_t
23809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)23810 __arm_vcmpcsq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
23811 {
23812  return __arm_vcmpcsq_m_n_u8 (__a, __b, __p);
23813 }
23814 
23815 __extension__ extern __inline uint8x16_t
23816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)23817 __arm_vclzq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
23818 {
23819  return __arm_vclzq_m_u8 (__inactive, __a, __p);
23820 }
23821 
23822 __extension__ extern __inline uint32_t
23823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p(uint32_t __a,uint8x16_t __b,mve_pred16_t __p)23824 __arm_vaddvaq_p (uint32_t __a, uint8x16_t __b, mve_pred16_t __p)
23825 {
23826  return __arm_vaddvaq_p_u8 (__a, __b, __p);
23827 }
23828 
23829 __extension__ extern __inline uint8x16_t
23830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq(uint8x16_t __a,uint8x16_t __b,const int __imm)23831 __arm_vsriq (uint8x16_t __a, uint8x16_t __b, const int __imm)
23832 {
23833  return __arm_vsriq_n_u8 (__a, __b, __imm);
23834 }
23835 
23836 __extension__ extern __inline uint8x16_t
23837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq(uint8x16_t __a,uint8x16_t __b,const int __imm)23838 __arm_vsliq (uint8x16_t __a, uint8x16_t __b, const int __imm)
23839 {
23840  return __arm_vsliq_n_u8 (__a, __b, __imm);
23841 }
23842 
23843 __extension__ extern __inline uint8x16_t
23844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r(uint8x16_t __a,int32_t __b,mve_pred16_t __p)23845 __arm_vshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
23846 {
23847  return __arm_vshlq_m_r_u8 (__a, __b, __p);
23848 }
23849 
23850 __extension__ extern __inline uint8x16_t
23851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n(uint8x16_t __a,int32_t __b,mve_pred16_t __p)23852 __arm_vrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
23853 {
23854  return __arm_vrshlq_m_n_u8 (__a, __b, __p);
23855 }
23856 
23857 __extension__ extern __inline uint8x16_t
23858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r(uint8x16_t __a,int32_t __b,mve_pred16_t __p)23859 __arm_vqshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
23860 {
23861  return __arm_vqshlq_m_r_u8 (__a, __b, __p);
23862 }
23863 
23864 __extension__ extern __inline uint8x16_t
23865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n(uint8x16_t __a,int32_t __b,mve_pred16_t __p)23866 __arm_vqrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
23867 {
23868  return __arm_vqrshlq_m_n_u8 (__a, __b, __p);
23869 }
23870 
23871 __extension__ extern __inline uint8_t
23872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p(uint8_t __a,int8x16_t __b,mve_pred16_t __p)23873 __arm_vminavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
23874 {
23875  return __arm_vminavq_p_s8 (__a, __b, __p);
23876 }
23877 
23878 __extension__ extern __inline uint8x16_t
23879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq_m(uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)23880 __arm_vminaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23881 {
23882  return __arm_vminaq_m_s8 (__a, __b, __p);
23883 }
23884 
23885 __extension__ extern __inline uint8_t
23886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq_p(uint8_t __a,int8x16_t __b,mve_pred16_t __p)23887 __arm_vmaxavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
23888 {
23889  return __arm_vmaxavq_p_s8 (__a, __b, __p);
23890 }
23891 
23892 __extension__ extern __inline uint8x16_t
23893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq_m(uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)23894 __arm_vmaxaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23895 {
23896  return __arm_vmaxaq_m_s8 (__a, __b, __p);
23897 }
23898 
23899 __extension__ extern __inline mve_pred16_t
23900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)23901 __arm_vcmpneq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23902 {
23903  return __arm_vcmpneq_m_s8 (__a, __b, __p);
23904 }
23905 
23906 __extension__ extern __inline mve_pred16_t
23907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(int8x16_t __a,int8_t __b,mve_pred16_t __p)23908 __arm_vcmpneq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23909 {
23910  return __arm_vcmpneq_m_n_s8 (__a, __b, __p);
23911 }
23912 
23913 __extension__ extern __inline mve_pred16_t
23914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)23915 __arm_vcmpltq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23916 {
23917  return __arm_vcmpltq_m_s8 (__a, __b, __p);
23918 }
23919 
23920 __extension__ extern __inline mve_pred16_t
23921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(int8x16_t __a,int8_t __b,mve_pred16_t __p)23922 __arm_vcmpltq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23923 {
23924  return __arm_vcmpltq_m_n_s8 (__a, __b, __p);
23925 }
23926 
23927 __extension__ extern __inline mve_pred16_t
23928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)23929 __arm_vcmpleq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23930 {
23931  return __arm_vcmpleq_m_s8 (__a, __b, __p);
23932 }
23933 
23934 __extension__ extern __inline mve_pred16_t
23935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(int8x16_t __a,int8_t __b,mve_pred16_t __p)23936 __arm_vcmpleq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23937 {
23938  return __arm_vcmpleq_m_n_s8 (__a, __b, __p);
23939 }
23940 
23941 __extension__ extern __inline mve_pred16_t
23942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)23943 __arm_vcmpgtq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23944 {
23945  return __arm_vcmpgtq_m_s8 (__a, __b, __p);
23946 }
23947 
23948 __extension__ extern __inline mve_pred16_t
23949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(int8x16_t __a,int8_t __b,mve_pred16_t __p)23950 __arm_vcmpgtq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23951 {
23952  return __arm_vcmpgtq_m_n_s8 (__a, __b, __p);
23953 }
23954 
23955 __extension__ extern __inline mve_pred16_t
23956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)23957 __arm_vcmpgeq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23958 {
23959  return __arm_vcmpgeq_m_s8 (__a, __b, __p);
23960 }
23961 
23962 __extension__ extern __inline mve_pred16_t
23963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(int8x16_t __a,int8_t __b,mve_pred16_t __p)23964 __arm_vcmpgeq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23965 {
23966  return __arm_vcmpgeq_m_n_s8 (__a, __b, __p);
23967 }
23968 
23969 __extension__ extern __inline mve_pred16_t
23970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)23971 __arm_vcmpeqq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23972 {
23973  return __arm_vcmpeqq_m_s8 (__a, __b, __p);
23974 }
23975 
23976 __extension__ extern __inline mve_pred16_t
23977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(int8x16_t __a,int8_t __b,mve_pred16_t __p)23978 __arm_vcmpeqq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23979 {
23980  return __arm_vcmpeqq_m_n_s8 (__a, __b, __p);
23981 }
23982 
23983 __extension__ extern __inline int8x16_t
23984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r(int8x16_t __a,int32_t __b,mve_pred16_t __p)23985 __arm_vshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p)
23986 {
23987  return __arm_vshlq_m_r_s8 (__a, __b, __p);
23988 }
23989 
23990 __extension__ extern __inline int8x16_t
23991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n(int8x16_t __a,int32_t __b,mve_pred16_t __p)23992 __arm_vrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
23993 {
23994  return __arm_vrshlq_m_n_s8 (__a, __b, __p);
23995 }
23996 
23997 __extension__ extern __inline int8x16_t
23998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)23999 __arm_vrev64q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24000 {
24001  return __arm_vrev64q_m_s8 (__inactive, __a, __p);
24002 }
24003 
24004 __extension__ extern __inline int8x16_t
24005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r(int8x16_t __a,int32_t __b,mve_pred16_t __p)24006 __arm_vqshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p)
24007 {
24008  return __arm_vqshlq_m_r_s8 (__a, __b, __p);
24009 }
24010 
24011 __extension__ extern __inline int8x16_t
24012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n(int8x16_t __a,int32_t __b,mve_pred16_t __p)24013 __arm_vqrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
24014 {
24015  return __arm_vqrshlq_m_n_s8 (__a, __b, __p);
24016 }
24017 
24018 __extension__ extern __inline int8x16_t
24019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)24020 __arm_vqnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24021 {
24022  return __arm_vqnegq_m_s8 (__inactive, __a, __p);
24023 }
24024 
24025 __extension__ extern __inline int8x16_t
24026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)24027 __arm_vqabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24028 {
24029  return __arm_vqabsq_m_s8 (__inactive, __a, __p);
24030 }
24031 
24032 __extension__ extern __inline int8x16_t
24033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)24034 __arm_vnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24035 {
24036  return __arm_vnegq_m_s8 (__inactive, __a, __p);
24037 }
24038 
24039 __extension__ extern __inline int8x16_t
24040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)24041 __arm_vmvnq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24042 {
24043  return __arm_vmvnq_m_s8 (__inactive, __a, __p);
24044 }
24045 
24046 __extension__ extern __inline int32_t
24047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq_p(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)24048 __arm_vmlsdavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
24049 {
24050  return __arm_vmlsdavxq_p_s8 (__a, __b, __p);
24051 }
24052 
24053 __extension__ extern __inline int32_t
24054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq_p(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)24055 __arm_vmlsdavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
24056 {
24057  return __arm_vmlsdavq_p_s8 (__a, __b, __p);
24058 }
24059 
24060 __extension__ extern __inline int32_t
24061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq_p(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)24062 __arm_vmladavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
24063 {
24064  return __arm_vmladavxq_p_s8 (__a, __b, __p);
24065 }
24066 
24067 __extension__ extern __inline int32_t
24068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)24069 __arm_vmladavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
24070 {
24071  return __arm_vmladavq_p_s8 (__a, __b, __p);
24072 }
24073 
24074 __extension__ extern __inline int8_t
24075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p(int8_t __a,int8x16_t __b,mve_pred16_t __p)24076 __arm_vminvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p)
24077 {
24078  return __arm_vminvq_p_s8 (__a, __b, __p);
24079 }
24080 
24081 __extension__ extern __inline int8_t
24082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p(int8_t __a,int8x16_t __b,mve_pred16_t __p)24083 __arm_vmaxvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p)
24084 {
24085  return __arm_vmaxvq_p_s8 (__a, __b, __p);
24086 }
24087 
24088 __extension__ extern __inline int8x16_t
24089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m(int8x16_t __inactive,int8_t __a,mve_pred16_t __p)24090 __arm_vdupq_m (int8x16_t __inactive, int8_t __a, mve_pred16_t __p)
24091 {
24092  return __arm_vdupq_m_n_s8 (__inactive, __a, __p);
24093 }
24094 
24095 __extension__ extern __inline int8x16_t
24096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)24097 __arm_vclzq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24098 {
24099  return __arm_vclzq_m_s8 (__inactive, __a, __p);
24100 }
24101 
24102 __extension__ extern __inline int8x16_t
24103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)24104 __arm_vclsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24105 {
24106  return __arm_vclsq_m_s8 (__inactive, __a, __p);
24107 }
24108 
24109 __extension__ extern __inline int32_t
24110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p(int32_t __a,int8x16_t __b,mve_pred16_t __p)24111 __arm_vaddvaq_p (int32_t __a, int8x16_t __b, mve_pred16_t __p)
24112 {
24113  return __arm_vaddvaq_p_s8 (__a, __b, __p);
24114 }
24115 
24116 __extension__ extern __inline int8x16_t
24117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)24118 __arm_vabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24119 {
24120  return __arm_vabsq_m_s8 (__inactive, __a, __p);
24121 }
24122 
24123 __extension__ extern __inline int8x16_t
24124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)24125 __arm_vqrdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24126 {
24127  return __arm_vqrdmlsdhxq_s8 (__inactive, __a, __b);
24128 }
24129 
24130 __extension__ extern __inline int8x16_t
24131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)24132 __arm_vqrdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24133 {
24134  return __arm_vqrdmlsdhq_s8 (__inactive, __a, __b);
24135 }
24136 
24137 __extension__ extern __inline int8x16_t
24138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq(int8x16_t __a,int8x16_t __b,int8_t __c)24139 __arm_vqrdmlashq (int8x16_t __a, int8x16_t __b, int8_t __c)
24140 {
24141  return __arm_vqrdmlashq_n_s8 (__a, __b, __c);
24142 }
24143 
24144 __extension__ extern __inline int8x16_t
24145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq(int8x16_t __a,int8x16_t __b,int8_t __c)24146 __arm_vqdmlashq (int8x16_t __a, int8x16_t __b, int8_t __c)
24147 {
24148  return __arm_vqdmlashq_n_s8 (__a, __b, __c);
24149 }
24150 
24151 __extension__ extern __inline int8x16_t
24152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq(int8x16_t __a,int8x16_t __b,int8_t __c)24153 __arm_vqrdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c)
24154 {
24155  return __arm_vqrdmlahq_n_s8 (__a, __b, __c);
24156 }
24157 
24158 __extension__ extern __inline int8x16_t
24159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)24160 __arm_vqrdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24161 {
24162  return __arm_vqrdmladhxq_s8 (__inactive, __a, __b);
24163 }
24164 
24165 __extension__ extern __inline int8x16_t
24166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)24167 __arm_vqrdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24168 {
24169  return __arm_vqrdmladhq_s8 (__inactive, __a, __b);
24170 }
24171 
24172 __extension__ extern __inline int8x16_t
24173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)24174 __arm_vqdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24175 {
24176  return __arm_vqdmlsdhxq_s8 (__inactive, __a, __b);
24177 }
24178 
24179 __extension__ extern __inline int8x16_t
24180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)24181 __arm_vqdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24182 {
24183  return __arm_vqdmlsdhq_s8 (__inactive, __a, __b);
24184 }
24185 
24186 __extension__ extern __inline int8x16_t
24187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq(int8x16_t __a,int8x16_t __b,int8_t __c)24188 __arm_vqdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c)
24189 {
24190  return __arm_vqdmlahq_n_s8 (__a, __b, __c);
24191 }
24192 
24193 __extension__ extern __inline int8x16_t
24194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)24195 __arm_vqdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24196 {
24197  return __arm_vqdmladhxq_s8 (__inactive, __a, __b);
24198 }
24199 
24200 __extension__ extern __inline int8x16_t
24201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq(int8x16_t __inactive,int8x16_t __a,int8x16_t __b)24202 __arm_vqdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24203 {
24204  return __arm_vqdmladhq_s8 (__inactive, __a, __b);
24205 }
24206 
24207 __extension__ extern __inline int32_t
24208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq(int32_t __a,int8x16_t __b,int8x16_t __c)24209 __arm_vmlsdavaxq (int32_t __a, int8x16_t __b, int8x16_t __c)
24210 {
24211  return __arm_vmlsdavaxq_s8 (__a, __b, __c);
24212 }
24213 
24214 __extension__ extern __inline int32_t
24215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq(int32_t __a,int8x16_t __b,int8x16_t __c)24216 __arm_vmlsdavaq (int32_t __a, int8x16_t __b, int8x16_t __c)
24217 {
24218  return __arm_vmlsdavaq_s8 (__a, __b, __c);
24219 }
24220 
24221 __extension__ extern __inline int8x16_t
24222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq(int8x16_t __a,int8x16_t __b,int8_t __c)24223 __arm_vmlasq (int8x16_t __a, int8x16_t __b, int8_t __c)
24224 {
24225  return __arm_vmlasq_n_s8 (__a, __b, __c);
24226 }
24227 
24228 __extension__ extern __inline int8x16_t
24229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq(int8x16_t __a,int8x16_t __b,int8_t __c)24230 __arm_vmlaq (int8x16_t __a, int8x16_t __b, int8_t __c)
24231 {
24232  return __arm_vmlaq_n_s8 (__a, __b, __c);
24233 }
24234 
24235 __extension__ extern __inline int32_t
24236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq(int32_t __a,int8x16_t __b,int8x16_t __c)24237 __arm_vmladavaxq (int32_t __a, int8x16_t __b, int8x16_t __c)
24238 {
24239  return __arm_vmladavaxq_s8 (__a, __b, __c);
24240 }
24241 
24242 __extension__ extern __inline int32_t
24243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq(int32_t __a,int8x16_t __b,int8x16_t __c)24244 __arm_vmladavaq (int32_t __a, int8x16_t __b, int8x16_t __c)
24245 {
24246  return __arm_vmladavaq_s8 (__a, __b, __c);
24247 }
24248 
24249 __extension__ extern __inline int8x16_t
24250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq(int8x16_t __a,int8x16_t __b,const int __imm)24251 __arm_vsriq (int8x16_t __a, int8x16_t __b, const int __imm)
24252 {
24253  return __arm_vsriq_n_s8 (__a, __b, __imm);
24254 }
24255 
24256 __extension__ extern __inline int8x16_t
24257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq(int8x16_t __a,int8x16_t __b,const int __imm)24258 __arm_vsliq (int8x16_t __a, int8x16_t __b, const int __imm)
24259 {
24260  return __arm_vsliq_n_s8 (__a, __b, __imm);
24261 }
24262 
24263 __extension__ extern __inline uint16x8_t
24264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)24265 __arm_vpselq (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24266 {
24267  return __arm_vpselq_u16 (__a, __b, __p);
24268 }
24269 
24270 __extension__ extern __inline int16x8_t
24271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24272 __arm_vpselq (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24273 {
24274  return __arm_vpselq_s16 (__a, __b, __p);
24275 }
24276 
24277 __extension__ extern __inline uint16x8_t
24278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m(uint16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)24279 __arm_vrev64q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
24280 {
24281  return __arm_vrev64q_m_u16 (__inactive, __a, __p);
24282 }
24283 
24284 __extension__ extern __inline uint16x8_t
24285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(uint16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)24286 __arm_vmvnq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
24287 {
24288  return __arm_vmvnq_m_u16 (__inactive, __a, __p);
24289 }
24290 
24291 __extension__ extern __inline uint16x8_t
24292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq(uint16x8_t __a,uint16x8_t __b,uint16_t __c)24293 __arm_vmlasq (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
24294 {
24295  return __arm_vmlasq_n_u16 (__a, __b, __c);
24296 }
24297 
24298 __extension__ extern __inline uint16x8_t
24299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq(uint16x8_t __a,uint16x8_t __b,uint16_t __c)24300 __arm_vmlaq (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
24301 {
24302  return __arm_vmlaq_n_u16 (__a, __b, __c);
24303 }
24304 
24305 __extension__ extern __inline uint32_t
24306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)24307 __arm_vmladavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24308 {
24309  return __arm_vmladavq_p_u16 (__a, __b, __p);
24310 }
24311 
24312 __extension__ extern __inline uint32_t
24313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq(uint32_t __a,uint16x8_t __b,uint16x8_t __c)24314 __arm_vmladavaq (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
24315 {
24316  return __arm_vmladavaq_u16 (__a, __b, __c);
24317 }
24318 
24319 __extension__ extern __inline uint16_t
24320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p(uint16_t __a,uint16x8_t __b,mve_pred16_t __p)24321 __arm_vminvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
24322 {
24323  return __arm_vminvq_p_u16 (__a, __b, __p);
24324 }
24325 
24326 __extension__ extern __inline uint16_t
24327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p(uint16_t __a,uint16x8_t __b,mve_pred16_t __p)24328 __arm_vmaxvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
24329 {
24330  return __arm_vmaxvq_p_u16 (__a, __b, __p);
24331 }
24332 
24333 __extension__ extern __inline uint16x8_t
24334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m(uint16x8_t __inactive,uint16_t __a,mve_pred16_t __p)24335 __arm_vdupq_m (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p)
24336 {
24337  return __arm_vdupq_m_n_u16 (__inactive, __a, __p);
24338 }
24339 
24340 __extension__ extern __inline mve_pred16_t
24341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)24342 __arm_vcmpneq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24343 {
24344  return __arm_vcmpneq_m_u16 (__a, __b, __p);
24345 }
24346 
24347 __extension__ extern __inline mve_pred16_t
24348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)24349 __arm_vcmpneq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
24350 {
24351  return __arm_vcmpneq_m_n_u16 (__a, __b, __p);
24352 }
24353 
24354 __extension__ extern __inline mve_pred16_t
24355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)24356 __arm_vcmphiq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24357 {
24358  return __arm_vcmphiq_m_u16 (__a, __b, __p);
24359 }
24360 
24361 __extension__ extern __inline mve_pred16_t
24362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)24363 __arm_vcmphiq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
24364 {
24365  return __arm_vcmphiq_m_n_u16 (__a, __b, __p);
24366 }
24367 
24368 __extension__ extern __inline mve_pred16_t
24369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)24370 __arm_vcmpeqq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24371 {
24372  return __arm_vcmpeqq_m_u16 (__a, __b, __p);
24373 }
24374 
24375 __extension__ extern __inline mve_pred16_t
24376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)24377 __arm_vcmpeqq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
24378 {
24379  return __arm_vcmpeqq_m_n_u16 (__a, __b, __p);
24380 }
24381 
24382 __extension__ extern __inline mve_pred16_t
24383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)24384 __arm_vcmpcsq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24385 {
24386  return __arm_vcmpcsq_m_u16 (__a, __b, __p);
24387 }
24388 
24389 __extension__ extern __inline mve_pred16_t
24390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)24391 __arm_vcmpcsq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
24392 {
24393  return __arm_vcmpcsq_m_n_u16 (__a, __b, __p);
24394 }
24395 
24396 __extension__ extern __inline uint16x8_t
24397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m(uint16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)24398 __arm_vclzq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
24399 {
24400  return __arm_vclzq_m_u16 (__inactive, __a, __p);
24401 }
24402 
24403 __extension__ extern __inline uint32_t
24404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p(uint32_t __a,uint16x8_t __b,mve_pred16_t __p)24405 __arm_vaddvaq_p (uint32_t __a, uint16x8_t __b, mve_pred16_t __p)
24406 {
24407  return __arm_vaddvaq_p_u16 (__a, __b, __p);
24408 }
24409 
24410 __extension__ extern __inline uint16x8_t
24411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq(uint16x8_t __a,uint16x8_t __b,const int __imm)24412 __arm_vsriq (uint16x8_t __a, uint16x8_t __b, const int __imm)
24413 {
24414  return __arm_vsriq_n_u16 (__a, __b, __imm);
24415 }
24416 
24417 __extension__ extern __inline uint16x8_t
24418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq(uint16x8_t __a,uint16x8_t __b,const int __imm)24419 __arm_vsliq (uint16x8_t __a, uint16x8_t __b, const int __imm)
24420 {
24421  return __arm_vsliq_n_u16 (__a, __b, __imm);
24422 }
24423 
24424 __extension__ extern __inline uint16x8_t
24425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r(uint16x8_t __a,int32_t __b,mve_pred16_t __p)24426 __arm_vshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
24427 {
24428  return __arm_vshlq_m_r_u16 (__a, __b, __p);
24429 }
24430 
24431 __extension__ extern __inline uint16x8_t
24432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n(uint16x8_t __a,int32_t __b,mve_pred16_t __p)24433 __arm_vrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
24434 {
24435  return __arm_vrshlq_m_n_u16 (__a, __b, __p);
24436 }
24437 
24438 __extension__ extern __inline uint16x8_t
24439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r(uint16x8_t __a,int32_t __b,mve_pred16_t __p)24440 __arm_vqshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
24441 {
24442  return __arm_vqshlq_m_r_u16 (__a, __b, __p);
24443 }
24444 
24445 __extension__ extern __inline uint16x8_t
24446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n(uint16x8_t __a,int32_t __b,mve_pred16_t __p)24447 __arm_vqrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
24448 {
24449  return __arm_vqrshlq_m_n_u16 (__a, __b, __p);
24450 }
24451 
24452 __extension__ extern __inline uint16_t
24453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p(uint16_t __a,int16x8_t __b,mve_pred16_t __p)24454 __arm_vminavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
24455 {
24456  return __arm_vminavq_p_s16 (__a, __b, __p);
24457 }
24458 
24459 __extension__ extern __inline uint16x8_t
24460 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq_m(uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)24461 __arm_vminaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24462 {
24463  return __arm_vminaq_m_s16 (__a, __b, __p);
24464 }
24465 
24466 __extension__ extern __inline uint16_t
24467 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq_p(uint16_t __a,int16x8_t __b,mve_pred16_t __p)24468 __arm_vmaxavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
24469 {
24470  return __arm_vmaxavq_p_s16 (__a, __b, __p);
24471 }
24472 
24473 __extension__ extern __inline uint16x8_t
24474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq_m(uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)24475 __arm_vmaxaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24476 {
24477  return __arm_vmaxaq_m_s16 (__a, __b, __p);
24478 }
24479 
24480 __extension__ extern __inline mve_pred16_t
24481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24482 __arm_vcmpneq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24483 {
24484  return __arm_vcmpneq_m_s16 (__a, __b, __p);
24485 }
24486 
24487 __extension__ extern __inline mve_pred16_t
24488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(int16x8_t __a,int16_t __b,mve_pred16_t __p)24489 __arm_vcmpneq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24490 {
24491  return __arm_vcmpneq_m_n_s16 (__a, __b, __p);
24492 }
24493 
24494 __extension__ extern __inline mve_pred16_t
24495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24496 __arm_vcmpltq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24497 {
24498  return __arm_vcmpltq_m_s16 (__a, __b, __p);
24499 }
24500 
24501 __extension__ extern __inline mve_pred16_t
24502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(int16x8_t __a,int16_t __b,mve_pred16_t __p)24503 __arm_vcmpltq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24504 {
24505  return __arm_vcmpltq_m_n_s16 (__a, __b, __p);
24506 }
24507 
24508 __extension__ extern __inline mve_pred16_t
24509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24510 __arm_vcmpleq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24511 {
24512  return __arm_vcmpleq_m_s16 (__a, __b, __p);
24513 }
24514 
24515 __extension__ extern __inline mve_pred16_t
24516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(int16x8_t __a,int16_t __b,mve_pred16_t __p)24517 __arm_vcmpleq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24518 {
24519  return __arm_vcmpleq_m_n_s16 (__a, __b, __p);
24520 }
24521 
24522 __extension__ extern __inline mve_pred16_t
24523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24524 __arm_vcmpgtq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24525 {
24526  return __arm_vcmpgtq_m_s16 (__a, __b, __p);
24527 }
24528 
24529 __extension__ extern __inline mve_pred16_t
24530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(int16x8_t __a,int16_t __b,mve_pred16_t __p)24531 __arm_vcmpgtq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24532 {
24533  return __arm_vcmpgtq_m_n_s16 (__a, __b, __p);
24534 }
24535 
24536 __extension__ extern __inline mve_pred16_t
24537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24538 __arm_vcmpgeq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24539 {
24540  return __arm_vcmpgeq_m_s16 (__a, __b, __p);
24541 }
24542 
24543 __extension__ extern __inline mve_pred16_t
24544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(int16x8_t __a,int16_t __b,mve_pred16_t __p)24545 __arm_vcmpgeq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24546 {
24547  return __arm_vcmpgeq_m_n_s16 (__a, __b, __p);
24548 }
24549 
24550 __extension__ extern __inline mve_pred16_t
24551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24552 __arm_vcmpeqq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24553 {
24554  return __arm_vcmpeqq_m_s16 (__a, __b, __p);
24555 }
24556 
24557 __extension__ extern __inline mve_pred16_t
24558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(int16x8_t __a,int16_t __b,mve_pred16_t __p)24559 __arm_vcmpeqq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24560 {
24561  return __arm_vcmpeqq_m_n_s16 (__a, __b, __p);
24562 }
24563 
24564 __extension__ extern __inline int16x8_t
24565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r(int16x8_t __a,int32_t __b,mve_pred16_t __p)24566 __arm_vshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p)
24567 {
24568  return __arm_vshlq_m_r_s16 (__a, __b, __p);
24569 }
24570 
24571 __extension__ extern __inline int16x8_t
24572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n(int16x8_t __a,int32_t __b,mve_pred16_t __p)24573 __arm_vrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
24574 {
24575  return __arm_vrshlq_m_n_s16 (__a, __b, __p);
24576 }
24577 
24578 __extension__ extern __inline int16x8_t
24579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)24580 __arm_vrev64q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24581 {
24582  return __arm_vrev64q_m_s16 (__inactive, __a, __p);
24583 }
24584 
24585 __extension__ extern __inline int16x8_t
24586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r(int16x8_t __a,int32_t __b,mve_pred16_t __p)24587 __arm_vqshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p)
24588 {
24589  return __arm_vqshlq_m_r_s16 (__a, __b, __p);
24590 }
24591 
24592 __extension__ extern __inline int16x8_t
24593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n(int16x8_t __a,int32_t __b,mve_pred16_t __p)24594 __arm_vqrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
24595 {
24596  return __arm_vqrshlq_m_n_s16 (__a, __b, __p);
24597 }
24598 
24599 __extension__ extern __inline int16x8_t
24600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)24601 __arm_vqnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24602 {
24603  return __arm_vqnegq_m_s16 (__inactive, __a, __p);
24604 }
24605 
24606 __extension__ extern __inline int16x8_t
24607 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq_m(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)24608 __arm_vqabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24609 {
24610  return __arm_vqabsq_m_s16 (__inactive, __a, __p);
24611 }
24612 
24613 __extension__ extern __inline int16x8_t
24614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)24615 __arm_vnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24616 {
24617  return __arm_vnegq_m_s16 (__inactive, __a, __p);
24618 }
24619 
24620 __extension__ extern __inline int16x8_t
24621 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)24622 __arm_vmvnq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24623 {
24624  return __arm_vmvnq_m_s16 (__inactive, __a, __p);
24625 }
24626 
24627 __extension__ extern __inline int32_t
24628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq_p(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24629 __arm_vmlsdavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24630 {
24631  return __arm_vmlsdavxq_p_s16 (__a, __b, __p);
24632 }
24633 
24634 __extension__ extern __inline int32_t
24635 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq_p(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24636 __arm_vmlsdavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24637 {
24638  return __arm_vmlsdavq_p_s16 (__a, __b, __p);
24639 }
24640 
24641 __extension__ extern __inline int32_t
24642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq_p(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24643 __arm_vmladavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24644 {
24645  return __arm_vmladavxq_p_s16 (__a, __b, __p);
24646 }
24647 
24648 __extension__ extern __inline int32_t
24649 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)24650 __arm_vmladavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24651 {
24652  return __arm_vmladavq_p_s16 (__a, __b, __p);
24653 }
24654 
24655 __extension__ extern __inline int16_t
24656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p(int16_t __a,int16x8_t __b,mve_pred16_t __p)24657 __arm_vminvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p)
24658 {
24659  return __arm_vminvq_p_s16 (__a, __b, __p);
24660 }
24661 
24662 __extension__ extern __inline int16_t
24663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p(int16_t __a,int16x8_t __b,mve_pred16_t __p)24664 __arm_vmaxvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p)
24665 {
24666  return __arm_vmaxvq_p_s16 (__a, __b, __p);
24667 }
24668 
24669 __extension__ extern __inline int16x8_t
24670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m(int16x8_t __inactive,int16_t __a,mve_pred16_t __p)24671 __arm_vdupq_m (int16x8_t __inactive, int16_t __a, mve_pred16_t __p)
24672 {
24673  return __arm_vdupq_m_n_s16 (__inactive, __a, __p);
24674 }
24675 
24676 __extension__ extern __inline int16x8_t
24677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)24678 __arm_vclzq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24679 {
24680  return __arm_vclzq_m_s16 (__inactive, __a, __p);
24681 }
24682 
24683 __extension__ extern __inline int16x8_t
24684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_m(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)24685 __arm_vclsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24686 {
24687  return __arm_vclsq_m_s16 (__inactive, __a, __p);
24688 }
24689 
24690 __extension__ extern __inline int32_t
24691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p(int32_t __a,int16x8_t __b,mve_pred16_t __p)24692 __arm_vaddvaq_p (int32_t __a, int16x8_t __b, mve_pred16_t __p)
24693 {
24694  return __arm_vaddvaq_p_s16 (__a, __b, __p);
24695 }
24696 
24697 __extension__ extern __inline int16x8_t
24698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)24699 __arm_vabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24700 {
24701  return __arm_vabsq_m_s16 (__inactive, __a, __p);
24702 }
24703 
24704 __extension__ extern __inline int16x8_t
24705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)24706 __arm_vqrdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24707 {
24708  return __arm_vqrdmlsdhxq_s16 (__inactive, __a, __b);
24709 }
24710 
24711 __extension__ extern __inline int16x8_t
24712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)24713 __arm_vqrdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24714 {
24715  return __arm_vqrdmlsdhq_s16 (__inactive, __a, __b);
24716 }
24717 
24718 __extension__ extern __inline int16x8_t
24719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq(int16x8_t __a,int16x8_t __b,int16_t __c)24720 __arm_vqrdmlashq (int16x8_t __a, int16x8_t __b, int16_t __c)
24721 {
24722  return __arm_vqrdmlashq_n_s16 (__a, __b, __c);
24723 }
24724 
24725 __extension__ extern __inline int16x8_t
24726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq(int16x8_t __a,int16x8_t __b,int16_t __c)24727 __arm_vqdmlashq (int16x8_t __a, int16x8_t __b, int16_t __c)
24728 {
24729  return __arm_vqdmlashq_n_s16 (__a, __b, __c);
24730 }
24731 
24732 __extension__ extern __inline int16x8_t
24733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq(int16x8_t __a,int16x8_t __b,int16_t __c)24734 __arm_vqrdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c)
24735 {
24736  return __arm_vqrdmlahq_n_s16 (__a, __b, __c);
24737 }
24738 
24739 __extension__ extern __inline int16x8_t
24740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)24741 __arm_vqrdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24742 {
24743  return __arm_vqrdmladhxq_s16 (__inactive, __a, __b);
24744 }
24745 
24746 __extension__ extern __inline int16x8_t
24747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)24748 __arm_vqrdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24749 {
24750  return __arm_vqrdmladhq_s16 (__inactive, __a, __b);
24751 }
24752 
24753 __extension__ extern __inline int16x8_t
24754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)24755 __arm_vqdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24756 {
24757  return __arm_vqdmlsdhxq_s16 (__inactive, __a, __b);
24758 }
24759 
24760 __extension__ extern __inline int16x8_t
24761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)24762 __arm_vqdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24763 {
24764  return __arm_vqdmlsdhq_s16 (__inactive, __a, __b);
24765 }
24766 
24767 __extension__ extern __inline int16x8_t
24768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq(int16x8_t __a,int16x8_t __b,int16_t __c)24769 __arm_vqdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c)
24770 {
24771  return __arm_vqdmlahq_n_s16 (__a, __b, __c);
24772 }
24773 
24774 __extension__ extern __inline int16x8_t
24775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)24776 __arm_vqdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24777 {
24778  return __arm_vqdmladhxq_s16 (__inactive, __a, __b);
24779 }
24780 
24781 __extension__ extern __inline int16x8_t
24782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq(int16x8_t __inactive,int16x8_t __a,int16x8_t __b)24783 __arm_vqdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24784 {
24785  return __arm_vqdmladhq_s16 (__inactive, __a, __b);
24786 }
24787 
24788 __extension__ extern __inline int32_t
24789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq(int32_t __a,int16x8_t __b,int16x8_t __c)24790 __arm_vmlsdavaxq (int32_t __a, int16x8_t __b, int16x8_t __c)
24791 {
24792  return __arm_vmlsdavaxq_s16 (__a, __b, __c);
24793 }
24794 
24795 __extension__ extern __inline int32_t
24796 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq(int32_t __a,int16x8_t __b,int16x8_t __c)24797 __arm_vmlsdavaq (int32_t __a, int16x8_t __b, int16x8_t __c)
24798 {
24799  return __arm_vmlsdavaq_s16 (__a, __b, __c);
24800 }
24801 
24802 __extension__ extern __inline int16x8_t
24803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq(int16x8_t __a,int16x8_t __b,int16_t __c)24804 __arm_vmlasq (int16x8_t __a, int16x8_t __b, int16_t __c)
24805 {
24806  return __arm_vmlasq_n_s16 (__a, __b, __c);
24807 }
24808 
24809 __extension__ extern __inline int16x8_t
24810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq(int16x8_t __a,int16x8_t __b,int16_t __c)24811 __arm_vmlaq (int16x8_t __a, int16x8_t __b, int16_t __c)
24812 {
24813  return __arm_vmlaq_n_s16 (__a, __b, __c);
24814 }
24815 
24816 __extension__ extern __inline int32_t
24817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq(int32_t __a,int16x8_t __b,int16x8_t __c)24818 __arm_vmladavaxq (int32_t __a, int16x8_t __b, int16x8_t __c)
24819 {
24820  return __arm_vmladavaxq_s16 (__a, __b, __c);
24821 }
24822 
24823 __extension__ extern __inline int32_t
24824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq(int32_t __a,int16x8_t __b,int16x8_t __c)24825 __arm_vmladavaq (int32_t __a, int16x8_t __b, int16x8_t __c)
24826 {
24827  return __arm_vmladavaq_s16 (__a, __b, __c);
24828 }
24829 
24830 __extension__ extern __inline int16x8_t
24831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq(int16x8_t __a,int16x8_t __b,const int __imm)24832 __arm_vsriq (int16x8_t __a, int16x8_t __b, const int __imm)
24833 {
24834  return __arm_vsriq_n_s16 (__a, __b, __imm);
24835 }
24836 
24837 __extension__ extern __inline int16x8_t
24838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq(int16x8_t __a,int16x8_t __b,const int __imm)24839 __arm_vsliq (int16x8_t __a, int16x8_t __b, const int __imm)
24840 {
24841  return __arm_vsliq_n_s16 (__a, __b, __imm);
24842 }
24843 
24844 __extension__ extern __inline uint32x4_t
24845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)24846 __arm_vpselq (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24847 {
24848  return __arm_vpselq_u32 (__a, __b, __p);
24849 }
24850 
24851 __extension__ extern __inline int32x4_t
24852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)24853 __arm_vpselq (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
24854 {
24855  return __arm_vpselq_s32 (__a, __b, __p);
24856 }
24857 
24858 __extension__ extern __inline uint32x4_t
24859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m(uint32x4_t __inactive,uint32x4_t __a,mve_pred16_t __p)24860 __arm_vrev64q_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
24861 {
24862  return __arm_vrev64q_m_u32 (__inactive, __a, __p);
24863 }
24864 
24865 __extension__ extern __inline uint32x4_t
24866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(uint32x4_t __inactive,uint32x4_t __a,mve_pred16_t __p)24867 __arm_vmvnq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
24868 {
24869  return __arm_vmvnq_m_u32 (__inactive, __a, __p);
24870 }
24871 
24872 __extension__ extern __inline uint32x4_t
24873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq(uint32x4_t __a,uint32x4_t __b,uint32_t __c)24874 __arm_vmlasq (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
24875 {
24876  return __arm_vmlasq_n_u32 (__a, __b, __c);
24877 }
24878 
24879 __extension__ extern __inline uint32x4_t
24880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq(uint32x4_t __a,uint32x4_t __b,uint32_t __c)24881 __arm_vmlaq (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
24882 {
24883  return __arm_vmlaq_n_u32 (__a, __b, __c);
24884 }
24885 
24886 __extension__ extern __inline uint32_t
24887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)24888 __arm_vmladavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24889 {
24890  return __arm_vmladavq_p_u32 (__a, __b, __p);
24891 }
24892 
24893 __extension__ extern __inline uint32_t
24894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq(uint32_t __a,uint32x4_t __b,uint32x4_t __c)24895 __arm_vmladavaq (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
24896 {
24897  return __arm_vmladavaq_u32 (__a, __b, __c);
24898 }
24899 
24900 __extension__ extern __inline uint32_t
24901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p(uint32_t __a,uint32x4_t __b,mve_pred16_t __p)24902 __arm_vminvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
24903 {
24904  return __arm_vminvq_p_u32 (__a, __b, __p);
24905 }
24906 
24907 __extension__ extern __inline uint32_t
24908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p(uint32_t __a,uint32x4_t __b,mve_pred16_t __p)24909 __arm_vmaxvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
24910 {
24911  return __arm_vmaxvq_p_u32 (__a, __b, __p);
24912 }
24913 
24914 __extension__ extern __inline uint32x4_t
24915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m(uint32x4_t __inactive,uint32_t __a,mve_pred16_t __p)24916 __arm_vdupq_m (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p)
24917 {
24918  return __arm_vdupq_m_n_u32 (__inactive, __a, __p);
24919 }
24920 
24921 __extension__ extern __inline mve_pred16_t
24922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)24923 __arm_vcmpneq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24924 {
24925  return __arm_vcmpneq_m_u32 (__a, __b, __p);
24926 }
24927 
24928 __extension__ extern __inline mve_pred16_t
24929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)24930 __arm_vcmpneq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
24931 {
24932  return __arm_vcmpneq_m_n_u32 (__a, __b, __p);
24933 }
24934 
24935 __extension__ extern __inline mve_pred16_t
24936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)24937 __arm_vcmphiq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24938 {
24939  return __arm_vcmphiq_m_u32 (__a, __b, __p);
24940 }
24941 
24942 __extension__ extern __inline mve_pred16_t
24943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmphiq_m(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)24944 __arm_vcmphiq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
24945 {
24946  return __arm_vcmphiq_m_n_u32 (__a, __b, __p);
24947 }
24948 
24949 __extension__ extern __inline mve_pred16_t
24950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)24951 __arm_vcmpeqq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24952 {
24953  return __arm_vcmpeqq_m_u32 (__a, __b, __p);
24954 }
24955 
24956 __extension__ extern __inline mve_pred16_t
24957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)24958 __arm_vcmpeqq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
24959 {
24960  return __arm_vcmpeqq_m_n_u32 (__a, __b, __p);
24961 }
24962 
24963 __extension__ extern __inline mve_pred16_t
24964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)24965 __arm_vcmpcsq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24966 {
24967  return __arm_vcmpcsq_m_u32 (__a, __b, __p);
24968 }
24969 
24970 __extension__ extern __inline mve_pred16_t
24971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpcsq_m(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)24972 __arm_vcmpcsq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
24973 {
24974  return __arm_vcmpcsq_m_n_u32 (__a, __b, __p);
24975 }
24976 
24977 __extension__ extern __inline uint32x4_t
24978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m(uint32x4_t __inactive,uint32x4_t __a,mve_pred16_t __p)24979 __arm_vclzq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
24980 {
24981  return __arm_vclzq_m_u32 (__inactive, __a, __p);
24982 }
24983 
24984 __extension__ extern __inline uint32_t
24985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p(uint32_t __a,uint32x4_t __b,mve_pred16_t __p)24986 __arm_vaddvaq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
24987 {
24988  return __arm_vaddvaq_p_u32 (__a, __b, __p);
24989 }
24990 
24991 __extension__ extern __inline uint32x4_t
24992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq(uint32x4_t __a,uint32x4_t __b,const int __imm)24993 __arm_vsriq (uint32x4_t __a, uint32x4_t __b, const int __imm)
24994 {
24995  return __arm_vsriq_n_u32 (__a, __b, __imm);
24996 }
24997 
24998 __extension__ extern __inline uint32x4_t
24999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq(uint32x4_t __a,uint32x4_t __b,const int __imm)25000 __arm_vsliq (uint32x4_t __a, uint32x4_t __b, const int __imm)
25001 {
25002  return __arm_vsliq_n_u32 (__a, __b, __imm);
25003 }
25004 
25005 __extension__ extern __inline uint32x4_t
25006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r(uint32x4_t __a,int32_t __b,mve_pred16_t __p)25007 __arm_vshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
25008 {
25009  return __arm_vshlq_m_r_u32 (__a, __b, __p);
25010 }
25011 
25012 __extension__ extern __inline uint32x4_t
25013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n(uint32x4_t __a,int32_t __b,mve_pred16_t __p)25014 __arm_vrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
25015 {
25016  return __arm_vrshlq_m_n_u32 (__a, __b, __p);
25017 }
25018 
25019 __extension__ extern __inline uint32x4_t
25020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r(uint32x4_t __a,int32_t __b,mve_pred16_t __p)25021 __arm_vqshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
25022 {
25023  return __arm_vqshlq_m_r_u32 (__a, __b, __p);
25024 }
25025 
25026 __extension__ extern __inline uint32x4_t
25027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n(uint32x4_t __a,int32_t __b,mve_pred16_t __p)25028 __arm_vqrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
25029 {
25030  return __arm_vqrshlq_m_n_u32 (__a, __b, __p);
25031 }
25032 
25033 __extension__ extern __inline uint32_t
25034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminavq_p(uint32_t __a,int32x4_t __b,mve_pred16_t __p)25035 __arm_vminavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
25036 {
25037  return __arm_vminavq_p_s32 (__a, __b, __p);
25038 }
25039 
25040 __extension__ extern __inline uint32x4_t
25041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminaq_m(uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)25042 __arm_vminaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25043 {
25044  return __arm_vminaq_m_s32 (__a, __b, __p);
25045 }
25046 
25047 __extension__ extern __inline uint32_t
25048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxavq_p(uint32_t __a,int32x4_t __b,mve_pred16_t __p)25049 __arm_vmaxavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
25050 {
25051  return __arm_vmaxavq_p_s32 (__a, __b, __p);
25052 }
25053 
25054 __extension__ extern __inline uint32x4_t
25055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxaq_m(uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)25056 __arm_vmaxaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25057 {
25058  return __arm_vmaxaq_m_s32 (__a, __b, __p);
25059 }
25060 
25061 __extension__ extern __inline mve_pred16_t
25062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25063 __arm_vcmpneq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25064 {
25065  return __arm_vcmpneq_m_s32 (__a, __b, __p);
25066 }
25067 
25068 __extension__ extern __inline mve_pred16_t
25069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(int32x4_t __a,int32_t __b,mve_pred16_t __p)25070 __arm_vcmpneq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25071 {
25072  return __arm_vcmpneq_m_n_s32 (__a, __b, __p);
25073 }
25074 
25075 __extension__ extern __inline mve_pred16_t
25076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25077 __arm_vcmpltq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25078 {
25079  return __arm_vcmpltq_m_s32 (__a, __b, __p);
25080 }
25081 
25082 __extension__ extern __inline mve_pred16_t
25083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(int32x4_t __a,int32_t __b,mve_pred16_t __p)25084 __arm_vcmpltq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25085 {
25086  return __arm_vcmpltq_m_n_s32 (__a, __b, __p);
25087 }
25088 
25089 __extension__ extern __inline mve_pred16_t
25090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25091 __arm_vcmpleq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25092 {
25093  return __arm_vcmpleq_m_s32 (__a, __b, __p);
25094 }
25095 
25096 __extension__ extern __inline mve_pred16_t
25097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(int32x4_t __a,int32_t __b,mve_pred16_t __p)25098 __arm_vcmpleq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25099 {
25100  return __arm_vcmpleq_m_n_s32 (__a, __b, __p);
25101 }
25102 
25103 __extension__ extern __inline mve_pred16_t
25104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25105 __arm_vcmpgtq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25106 {
25107  return __arm_vcmpgtq_m_s32 (__a, __b, __p);
25108 }
25109 
25110 __extension__ extern __inline mve_pred16_t
25111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(int32x4_t __a,int32_t __b,mve_pred16_t __p)25112 __arm_vcmpgtq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25113 {
25114  return __arm_vcmpgtq_m_n_s32 (__a, __b, __p);
25115 }
25116 
25117 __extension__ extern __inline mve_pred16_t
25118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25119 __arm_vcmpgeq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25120 {
25121  return __arm_vcmpgeq_m_s32 (__a, __b, __p);
25122 }
25123 
25124 __extension__ extern __inline mve_pred16_t
25125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(int32x4_t __a,int32_t __b,mve_pred16_t __p)25126 __arm_vcmpgeq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25127 {
25128  return __arm_vcmpgeq_m_n_s32 (__a, __b, __p);
25129 }
25130 
25131 __extension__ extern __inline mve_pred16_t
25132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25133 __arm_vcmpeqq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25134 {
25135  return __arm_vcmpeqq_m_s32 (__a, __b, __p);
25136 }
25137 
25138 __extension__ extern __inline mve_pred16_t
25139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(int32x4_t __a,int32_t __b,mve_pred16_t __p)25140 __arm_vcmpeqq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25141 {
25142  return __arm_vcmpeqq_m_n_s32 (__a, __b, __p);
25143 }
25144 
25145 __extension__ extern __inline int32x4_t
25146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_r(int32x4_t __a,int32_t __b,mve_pred16_t __p)25147 __arm_vshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25148 {
25149  return __arm_vshlq_m_r_s32 (__a, __b, __p);
25150 }
25151 
25152 __extension__ extern __inline int32x4_t
25153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m_n(int32x4_t __a,int32_t __b,mve_pred16_t __p)25154 __arm_vrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25155 {
25156  return __arm_vrshlq_m_n_s32 (__a, __b, __p);
25157 }
25158 
25159 __extension__ extern __inline int32x4_t
25160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)25161 __arm_vrev64q_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25162 {
25163  return __arm_vrev64q_m_s32 (__inactive, __a, __p);
25164 }
25165 
25166 __extension__ extern __inline int32x4_t
25167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_r(int32x4_t __a,int32_t __b,mve_pred16_t __p)25168 __arm_vqshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25169 {
25170  return __arm_vqshlq_m_r_s32 (__a, __b, __p);
25171 }
25172 
25173 __extension__ extern __inline int32x4_t
25174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m_n(int32x4_t __a,int32_t __b,mve_pred16_t __p)25175 __arm_vqrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25176 {
25177  return __arm_vqrshlq_m_n_s32 (__a, __b, __p);
25178 }
25179 
25180 __extension__ extern __inline int32x4_t
25181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqnegq_m(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)25182 __arm_vqnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25183 {
25184  return __arm_vqnegq_m_s32 (__inactive, __a, __p);
25185 }
25186 
25187 __extension__ extern __inline int32x4_t
25188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqabsq_m(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)25189 __arm_vqabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25190 {
25191  return __arm_vqabsq_m_s32 (__inactive, __a, __p);
25192 }
25193 
25194 __extension__ extern __inline int32x4_t
25195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)25196 __arm_vnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25197 {
25198  return __arm_vnegq_m_s32 (__inactive, __a, __p);
25199 }
25200 
25201 __extension__ extern __inline int32x4_t
25202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)25203 __arm_vmvnq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25204 {
25205  return __arm_vmvnq_m_s32 (__inactive, __a, __p);
25206 }
25207 
25208 __extension__ extern __inline int32_t
25209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavxq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25210 __arm_vmlsdavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25211 {
25212  return __arm_vmlsdavxq_p_s32 (__a, __b, __p);
25213 }
25214 
25215 __extension__ extern __inline int32_t
25216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25217 __arm_vmlsdavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25218 {
25219  return __arm_vmlsdavq_p_s32 (__a, __b, __p);
25220 }
25221 
25222 __extension__ extern __inline int32_t
25223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavxq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25224 __arm_vmladavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25225 {
25226  return __arm_vmladavxq_p_s32 (__a, __b, __p);
25227 }
25228 
25229 __extension__ extern __inline int32_t
25230 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25231 __arm_vmladavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25232 {
25233  return __arm_vmladavq_p_s32 (__a, __b, __p);
25234 }
25235 
25236 __extension__ extern __inline int32_t
25237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminvq_p(int32_t __a,int32x4_t __b,mve_pred16_t __p)25238 __arm_vminvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p)
25239 {
25240  return __arm_vminvq_p_s32 (__a, __b, __p);
25241 }
25242 
25243 __extension__ extern __inline int32_t
25244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxvq_p(int32_t __a,int32x4_t __b,mve_pred16_t __p)25245 __arm_vmaxvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p)
25246 {
25247  return __arm_vmaxvq_p_s32 (__a, __b, __p);
25248 }
25249 
25250 __extension__ extern __inline int32x4_t
25251 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m(int32x4_t __inactive,int32_t __a,mve_pred16_t __p)25252 __arm_vdupq_m (int32x4_t __inactive, int32_t __a, mve_pred16_t __p)
25253 {
25254  return __arm_vdupq_m_n_s32 (__inactive, __a, __p);
25255 }
25256 
25257 __extension__ extern __inline int32x4_t
25258 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_m(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)25259 __arm_vclzq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25260 {
25261  return __arm_vclzq_m_s32 (__inactive, __a, __p);
25262 }
25263 
25264 __extension__ extern __inline int32x4_t
25265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_m(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)25266 __arm_vclsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25267 {
25268  return __arm_vclsq_m_s32 (__inactive, __a, __p);
25269 }
25270 
25271 __extension__ extern __inline int32_t
25272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddvaq_p(int32_t __a,int32x4_t __b,mve_pred16_t __p)25273 __arm_vaddvaq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p)
25274 {
25275  return __arm_vaddvaq_p_s32 (__a, __b, __p);
25276 }
25277 
25278 __extension__ extern __inline int32x4_t
25279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m(int32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)25280 __arm_vabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25281 {
25282  return __arm_vabsq_m_s32 (__inactive, __a, __p);
25283 }
25284 
25285 __extension__ extern __inline int32x4_t
25286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)25287 __arm_vqrdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25288 {
25289  return __arm_vqrdmlsdhxq_s32 (__inactive, __a, __b);
25290 }
25291 
25292 __extension__ extern __inline int32x4_t
25293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)25294 __arm_vqrdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25295 {
25296  return __arm_vqrdmlsdhq_s32 (__inactive, __a, __b);
25297 }
25298 
25299 __extension__ extern __inline int32x4_t
25300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq(int32x4_t __a,int32x4_t __b,int32_t __c)25301 __arm_vqrdmlashq (int32x4_t __a, int32x4_t __b, int32_t __c)
25302 {
25303  return __arm_vqrdmlashq_n_s32 (__a, __b, __c);
25304 }
25305 
25306 __extension__ extern __inline int32x4_t
25307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq(int32x4_t __a,int32x4_t __b,int32_t __c)25308 __arm_vqdmlashq (int32x4_t __a, int32x4_t __b, int32_t __c)
25309 {
25310  return __arm_vqdmlashq_n_s32 (__a, __b, __c);
25311 }
25312 
25313 __extension__ extern __inline int32x4_t
25314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq(int32x4_t __a,int32x4_t __b,int32_t __c)25315 __arm_vqrdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c)
25316 {
25317  return __arm_vqrdmlahq_n_s32 (__a, __b, __c);
25318 }
25319 
25320 __extension__ extern __inline int32x4_t
25321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)25322 __arm_vqrdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25323 {
25324  return __arm_vqrdmladhxq_s32 (__inactive, __a, __b);
25325 }
25326 
25327 __extension__ extern __inline int32x4_t
25328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)25329 __arm_vqrdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25330 {
25331  return __arm_vqrdmladhq_s32 (__inactive, __a, __b);
25332 }
25333 
25334 __extension__ extern __inline int32x4_t
25335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)25336 __arm_vqdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25337 {
25338  return __arm_vqdmlsdhxq_s32 (__inactive, __a, __b);
25339 }
25340 
25341 __extension__ extern __inline int32x4_t
25342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)25343 __arm_vqdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25344 {
25345  return __arm_vqdmlsdhq_s32 (__inactive, __a, __b);
25346 }
25347 
25348 __extension__ extern __inline int32x4_t
25349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq(int32x4_t __a,int32x4_t __b,int32_t __c)25350 __arm_vqdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c)
25351 {
25352  return __arm_vqdmlahq_n_s32 (__a, __b, __c);
25353 }
25354 
25355 __extension__ extern __inline int32x4_t
25356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)25357 __arm_vqdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25358 {
25359  return __arm_vqdmladhxq_s32 (__inactive, __a, __b);
25360 }
25361 
25362 __extension__ extern __inline int32x4_t
25363 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq(int32x4_t __inactive,int32x4_t __a,int32x4_t __b)25364 __arm_vqdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25365 {
25366  return __arm_vqdmladhq_s32 (__inactive, __a, __b);
25367 }
25368 
25369 __extension__ extern __inline int32_t
25370 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq(int32_t __a,int32x4_t __b,int32x4_t __c)25371 __arm_vmlsdavaxq (int32_t __a, int32x4_t __b, int32x4_t __c)
25372 {
25373  return __arm_vmlsdavaxq_s32 (__a, __b, __c);
25374 }
25375 
25376 __extension__ extern __inline int32_t
25377 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq(int32_t __a,int32x4_t __b,int32x4_t __c)25378 __arm_vmlsdavaq (int32_t __a, int32x4_t __b, int32x4_t __c)
25379 {
25380  return __arm_vmlsdavaq_s32 (__a, __b, __c);
25381 }
25382 
25383 __extension__ extern __inline int32x4_t
25384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq(int32x4_t __a,int32x4_t __b,int32_t __c)25385 __arm_vmlasq (int32x4_t __a, int32x4_t __b, int32_t __c)
25386 {
25387  return __arm_vmlasq_n_s32 (__a, __b, __c);
25388 }
25389 
25390 __extension__ extern __inline int32x4_t
25391 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq(int32x4_t __a,int32x4_t __b,int32_t __c)25392 __arm_vmlaq (int32x4_t __a, int32x4_t __b, int32_t __c)
25393 {
25394  return __arm_vmlaq_n_s32 (__a, __b, __c);
25395 }
25396 
25397 __extension__ extern __inline int32_t
25398 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq(int32_t __a,int32x4_t __b,int32x4_t __c)25399 __arm_vmladavaxq (int32_t __a, int32x4_t __b, int32x4_t __c)
25400 {
25401  return __arm_vmladavaxq_s32 (__a, __b, __c);
25402 }
25403 
25404 __extension__ extern __inline int32_t
25405 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq(int32_t __a,int32x4_t __b,int32x4_t __c)25406 __arm_vmladavaq (int32_t __a, int32x4_t __b, int32x4_t __c)
25407 {
25408  return __arm_vmladavaq_s32 (__a, __b, __c);
25409 }
25410 
25411 __extension__ extern __inline int32x4_t
25412 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq(int32x4_t __a,int32x4_t __b,const int __imm)25413 __arm_vsriq (int32x4_t __a, int32x4_t __b, const int __imm)
25414 {
25415  return __arm_vsriq_n_s32 (__a, __b, __imm);
25416 }
25417 
25418 __extension__ extern __inline int32x4_t
25419 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq(int32x4_t __a,int32x4_t __b,const int __imm)25420 __arm_vsliq (int32x4_t __a, int32x4_t __b, const int __imm)
25421 {
25422  return __arm_vsliq_n_s32 (__a, __b, __imm);
25423 }
25424 
25425 __extension__ extern __inline uint64x2_t
25426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(uint64x2_t __a,uint64x2_t __b,mve_pred16_t __p)25427 __arm_vpselq (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p)
25428 {
25429  return __arm_vpselq_u64 (__a, __b, __p);
25430 }
25431 
25432 __extension__ extern __inline int64x2_t
25433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(int64x2_t __a,int64x2_t __b,mve_pred16_t __p)25434 __arm_vpselq (int64x2_t __a, int64x2_t __b, mve_pred16_t __p)
25435 {
25436  return __arm_vpselq_s64 (__a, __b, __p);
25437 }
25438 
25439 __extension__ extern __inline int64_t
25440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaxq(int64_t __a,int32x4_t __b,int32x4_t __c)25441 __arm_vrmlaldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
25442 {
25443  return __arm_vrmlaldavhaxq_s32 (__a, __b, __c);
25444 }
25445 
25446 __extension__ extern __inline int64_t
25447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhaq(int64_t __a,int32x4_t __b,int32x4_t __c)25448 __arm_vrmlsldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c)
25449 {
25450  return __arm_vrmlsldavhaq_s32 (__a, __b, __c);
25451 }
25452 
25453 __extension__ extern __inline int64_t
25454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhaxq(int64_t __a,int32x4_t __b,int32x4_t __c)25455 __arm_vrmlsldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
25456 {
25457  return __arm_vrmlsldavhaxq_s32 (__a, __b, __c);
25458 }
25459 
25460 __extension__ extern __inline int64_t
25461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvaq_p(int64_t __a,int32x4_t __b,mve_pred16_t __p)25462 __arm_vaddlvaq_p (int64_t __a, int32x4_t __b, mve_pred16_t __p)
25463 {
25464  return __arm_vaddlvaq_p_s32 (__a, __b, __p);
25465 }
25466 
25467 __extension__ extern __inline int8x16_t
25468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)25469 __arm_vrev16q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
25470 {
25471  return __arm_vrev16q_m_s8 (__inactive, __a, __p);
25472 }
25473 
25474 __extension__ extern __inline int64_t
25475 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25476 __arm_vrmlaldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25477 {
25478  return __arm_vrmlaldavhq_p_s32 (__a, __b, __p);
25479 }
25480 
25481 __extension__ extern __inline int64_t
25482 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhxq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25483 __arm_vrmlaldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25484 {
25485  return __arm_vrmlaldavhxq_p_s32 (__a, __b, __p);
25486 }
25487 
25488 __extension__ extern __inline int64_t
25489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25490 __arm_vrmlsldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25491 {
25492  return __arm_vrmlsldavhq_p_s32 (__a, __b, __p);
25493 }
25494 
25495 __extension__ extern __inline int64_t
25496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhxq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25497 __arm_vrmlsldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25498 {
25499  return __arm_vrmlsldavhxq_p_s32 (__a, __b, __p);
25500 }
25501 
25502 __extension__ extern __inline uint64_t
25503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddlvaq_p(uint64_t __a,uint32x4_t __b,mve_pred16_t __p)25504 __arm_vaddlvaq_p (uint64_t __a, uint32x4_t __b, mve_pred16_t __p)
25505 {
25506  return __arm_vaddlvaq_p_u32 (__a, __b, __p);
25507 }
25508 
25509 __extension__ extern __inline uint8x16_t
25510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_m(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)25511 __arm_vrev16q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
25512 {
25513  return __arm_vrev16q_m_u8 (__inactive, __a, __p);
25514 }
25515 
25516 __extension__ extern __inline uint64_t
25517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhq_p(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)25518 __arm_vrmlaldavhq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
25519 {
25520  return __arm_vrmlaldavhq_p_u32 (__a, __b, __p);
25521 }
25522 
25523 __extension__ extern __inline int16x8_t
25524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(int16x8_t __inactive,const int __imm,mve_pred16_t __p)25525 __arm_vmvnq_m (int16x8_t __inactive, const int __imm, mve_pred16_t __p)
25526 {
25527  return __arm_vmvnq_m_n_s16 (__inactive, __imm, __p);
25528 }
25529 
25530 __extension__ extern __inline int16x8_t
25531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_n(int16x8_t __a,const int __imm,mve_pred16_t __p)25532 __arm_vorrq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p)
25533 {
25534  return __arm_vorrq_m_n_s16 (__a, __imm, __p);
25535 }
25536 
25537 __extension__ extern __inline int8x16_t
25538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq(int8x16_t __a,int16x8_t __b,const int __imm)25539 __arm_vqrshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
25540 {
25541  return __arm_vqrshrntq_n_s16 (__a, __b, __imm);
25542 }
25543 
25544 __extension__ extern __inline int8x16_t
25545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq(int8x16_t __a,int16x8_t __b,const int __imm)25546 __arm_vqshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
25547 {
25548  return __arm_vqshrnbq_n_s16 (__a, __b, __imm);
25549 }
25550 
25551 __extension__ extern __inline int8x16_t
25552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq(int8x16_t __a,int16x8_t __b,const int __imm)25553 __arm_vqshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
25554 {
25555  return __arm_vqshrntq_n_s16 (__a, __b, __imm);
25556 }
25557 
25558 __extension__ extern __inline int8x16_t
25559 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq(int8x16_t __a,int16x8_t __b,const int __imm)25560 __arm_vrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
25561 {
25562  return __arm_vrshrnbq_n_s16 (__a, __b, __imm);
25563 }
25564 
25565 __extension__ extern __inline int8x16_t
25566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq(int8x16_t __a,int16x8_t __b,const int __imm)25567 __arm_vrshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
25568 {
25569  return __arm_vrshrntq_n_s16 (__a, __b, __imm);
25570 }
25571 
25572 __extension__ extern __inline int8x16_t
25573 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq(int8x16_t __a,int16x8_t __b,const int __imm)25574 __arm_vshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
25575 {
25576  return __arm_vshrnbq_n_s16 (__a, __b, __imm);
25577 }
25578 
25579 __extension__ extern __inline int8x16_t
25580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq(int8x16_t __a,int16x8_t __b,const int __imm)25581 __arm_vshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
25582 {
25583  return __arm_vshrntq_n_s16 (__a, __b, __imm);
25584 }
25585 
25586 __extension__ extern __inline int64_t
25587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq(int64_t __a,int16x8_t __b,int16x8_t __c)25588 __arm_vmlaldavaq (int64_t __a, int16x8_t __b, int16x8_t __c)
25589 {
25590  return __arm_vmlaldavaq_s16 (__a, __b, __c);
25591 }
25592 
25593 __extension__ extern __inline int64_t
25594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaxq(int64_t __a,int16x8_t __b,int16x8_t __c)25595 __arm_vmlaldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c)
25596 {
25597  return __arm_vmlaldavaxq_s16 (__a, __b, __c);
25598 }
25599 
25600 __extension__ extern __inline int64_t
25601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaq(int64_t __a,int16x8_t __b,int16x8_t __c)25602 __arm_vmlsldavaq (int64_t __a, int16x8_t __b, int16x8_t __c)
25603 {
25604  return __arm_vmlsldavaq_s16 (__a, __b, __c);
25605 }
25606 
25607 __extension__ extern __inline int64_t
25608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaxq(int64_t __a,int16x8_t __b,int16x8_t __c)25609 __arm_vmlsldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c)
25610 {
25611  return __arm_vmlsldavaxq_s16 (__a, __b, __c);
25612 }
25613 
25614 __extension__ extern __inline int64_t
25615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_p(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)25616 __arm_vmlaldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
25617 {
25618  return __arm_vmlaldavq_p_s16 (__a, __b, __p);
25619 }
25620 
25621 __extension__ extern __inline int64_t
25622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavxq_p(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)25623 __arm_vmlaldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
25624 {
25625  return __arm_vmlaldavxq_p_s16 (__a, __b, __p);
25626 }
25627 
25628 __extension__ extern __inline int64_t
25629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavq_p(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)25630 __arm_vmlsldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
25631 {
25632  return __arm_vmlsldavq_p_s16 (__a, __b, __p);
25633 }
25634 
25635 __extension__ extern __inline int64_t
25636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavxq_p(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)25637 __arm_vmlsldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
25638 {
25639  return __arm_vmlsldavxq_p_s16 (__a, __b, __p);
25640 }
25641 
25642 __extension__ extern __inline int16x8_t
25643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_m(int16x8_t __inactive,int8x16_t __a,mve_pred16_t __p)25644 __arm_vmovlbq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
25645 {
25646  return __arm_vmovlbq_m_s8 (__inactive, __a, __p);
25647 }
25648 
25649 __extension__ extern __inline int16x8_t
25650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_m(int16x8_t __inactive,int8x16_t __a,mve_pred16_t __p)25651 __arm_vmovltq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
25652 {
25653  return __arm_vmovltq_m_s8 (__inactive, __a, __p);
25654 }
25655 
25656 __extension__ extern __inline int8x16_t
25657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_m(int8x16_t __a,int16x8_t __b,mve_pred16_t __p)25658 __arm_vmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25659 {
25660  return __arm_vmovnbq_m_s16 (__a, __b, __p);
25661 }
25662 
25663 __extension__ extern __inline int8x16_t
25664 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_m(int8x16_t __a,int16x8_t __b,mve_pred16_t __p)25665 __arm_vmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25666 {
25667  return __arm_vmovntq_m_s16 (__a, __b, __p);
25668 }
25669 
25670 __extension__ extern __inline int8x16_t
25671 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_m(int8x16_t __a,int16x8_t __b,mve_pred16_t __p)25672 __arm_vqmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25673 {
25674  return __arm_vqmovnbq_m_s16 (__a, __b, __p);
25675 }
25676 
25677 __extension__ extern __inline int8x16_t
25678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_m(int8x16_t __a,int16x8_t __b,mve_pred16_t __p)25679 __arm_vqmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25680 {
25681  return __arm_vqmovntq_m_s16 (__a, __b, __p);
25682 }
25683 
25684 __extension__ extern __inline int8x16_t
25685 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m(int8x16_t __inactive,int8x16_t __a,mve_pred16_t __p)25686 __arm_vrev32q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
25687 {
25688  return __arm_vrev32q_m_s8 (__inactive, __a, __p);
25689 }
25690 
25691 __extension__ extern __inline uint16x8_t
25692 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(uint16x8_t __inactive,const int __imm,mve_pred16_t __p)25693 __arm_vmvnq_m (uint16x8_t __inactive, const int __imm, mve_pred16_t __p)
25694 {
25695  return __arm_vmvnq_m_n_u16 (__inactive, __imm, __p);
25696 }
25697 
25698 __extension__ extern __inline uint16x8_t
25699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_n(uint16x8_t __a,const int __imm,mve_pred16_t __p)25700 __arm_vorrq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p)
25701 {
25702  return __arm_vorrq_m_n_u16 (__a, __imm, __p);
25703 }
25704 
25705 __extension__ extern __inline uint8x16_t
25706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshruntq(uint8x16_t __a,int16x8_t __b,const int __imm)25707 __arm_vqrshruntq (uint8x16_t __a, int16x8_t __b, const int __imm)
25708 {
25709  return __arm_vqrshruntq_n_s16 (__a, __b, __imm);
25710 }
25711 
25712 __extension__ extern __inline uint8x16_t
25713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrunbq(uint8x16_t __a,int16x8_t __b,const int __imm)25714 __arm_vqshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm)
25715 {
25716  return __arm_vqshrunbq_n_s16 (__a, __b, __imm);
25717 }
25718 
25719 __extension__ extern __inline uint8x16_t
25720 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshruntq(uint8x16_t __a,int16x8_t __b,const int __imm)25721 __arm_vqshruntq (uint8x16_t __a, int16x8_t __b, const int __imm)
25722 {
25723  return __arm_vqshruntq_n_s16 (__a, __b, __imm);
25724 }
25725 
25726 __extension__ extern __inline uint8x16_t
25727 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovunbq_m(uint8x16_t __a,int16x8_t __b,mve_pred16_t __p)25728 __arm_vqmovunbq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25729 {
25730  return __arm_vqmovunbq_m_s16 (__a, __b, __p);
25731 }
25732 
25733 __extension__ extern __inline uint8x16_t
25734 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovuntq_m(uint8x16_t __a,int16x8_t __b,mve_pred16_t __p)25735 __arm_vqmovuntq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25736 {
25737  return __arm_vqmovuntq_m_s16 (__a, __b, __p);
25738 }
25739 
25740 __extension__ extern __inline uint8x16_t
25741 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq(uint8x16_t __a,uint16x8_t __b,const int __imm)25742 __arm_vqrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25743 {
25744  return __arm_vqrshrntq_n_u16 (__a, __b, __imm);
25745 }
25746 
25747 __extension__ extern __inline uint8x16_t
25748 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq(uint8x16_t __a,uint16x8_t __b,const int __imm)25749 __arm_vqshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25750 {
25751  return __arm_vqshrnbq_n_u16 (__a, __b, __imm);
25752 }
25753 
25754 __extension__ extern __inline uint8x16_t
25755 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq(uint8x16_t __a,uint16x8_t __b,const int __imm)25756 __arm_vqshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25757 {
25758  return __arm_vqshrntq_n_u16 (__a, __b, __imm);
25759 }
25760 
25761 __extension__ extern __inline uint8x16_t
25762 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq(uint8x16_t __a,uint16x8_t __b,const int __imm)25763 __arm_vrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25764 {
25765  return __arm_vrshrnbq_n_u16 (__a, __b, __imm);
25766 }
25767 
25768 __extension__ extern __inline uint8x16_t
25769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq(uint8x16_t __a,uint16x8_t __b,const int __imm)25770 __arm_vrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25771 {
25772  return __arm_vrshrntq_n_u16 (__a, __b, __imm);
25773 }
25774 
25775 __extension__ extern __inline uint8x16_t
25776 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq(uint8x16_t __a,uint16x8_t __b,const int __imm)25777 __arm_vshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25778 {
25779  return __arm_vshrnbq_n_u16 (__a, __b, __imm);
25780 }
25781 
25782 __extension__ extern __inline uint8x16_t
25783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq(uint8x16_t __a,uint16x8_t __b,const int __imm)25784 __arm_vshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25785 {
25786  return __arm_vshrntq_n_u16 (__a, __b, __imm);
25787 }
25788 
25789 __extension__ extern __inline uint64_t
25790 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq(uint64_t __a,uint16x8_t __b,uint16x8_t __c)25791 __arm_vmlaldavaq (uint64_t __a, uint16x8_t __b, uint16x8_t __c)
25792 {
25793  return __arm_vmlaldavaq_u16 (__a, __b, __c);
25794 }
25795 
25796 __extension__ extern __inline uint64_t
25797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_p(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)25798 __arm_vmlaldavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
25799 {
25800  return __arm_vmlaldavq_p_u16 (__a, __b, __p);
25801 }
25802 
25803 __extension__ extern __inline uint16x8_t
25804 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_m(uint16x8_t __inactive,uint8x16_t __a,mve_pred16_t __p)25805 __arm_vmovlbq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
25806 {
25807  return __arm_vmovlbq_m_u8 (__inactive, __a, __p);
25808 }
25809 
25810 __extension__ extern __inline uint16x8_t
25811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_m(uint16x8_t __inactive,uint8x16_t __a,mve_pred16_t __p)25812 __arm_vmovltq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
25813 {
25814  return __arm_vmovltq_m_u8 (__inactive, __a, __p);
25815 }
25816 
25817 __extension__ extern __inline uint8x16_t
25818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_m(uint8x16_t __a,uint16x8_t __b,mve_pred16_t __p)25819 __arm_vmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
25820 {
25821  return __arm_vmovnbq_m_u16 (__a, __b, __p);
25822 }
25823 
25824 __extension__ extern __inline uint8x16_t
25825 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_m(uint8x16_t __a,uint16x8_t __b,mve_pred16_t __p)25826 __arm_vmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
25827 {
25828  return __arm_vmovntq_m_u16 (__a, __b, __p);
25829 }
25830 
25831 __extension__ extern __inline uint8x16_t
25832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_m(uint8x16_t __a,uint16x8_t __b,mve_pred16_t __p)25833 __arm_vqmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
25834 {
25835  return __arm_vqmovnbq_m_u16 (__a, __b, __p);
25836 }
25837 
25838 __extension__ extern __inline uint8x16_t
25839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_m(uint8x16_t __a,uint16x8_t __b,mve_pred16_t __p)25840 __arm_vqmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
25841 {
25842  return __arm_vqmovntq_m_u16 (__a, __b, __p);
25843 }
25844 
25845 __extension__ extern __inline uint8x16_t
25846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m(uint8x16_t __inactive,uint8x16_t __a,mve_pred16_t __p)25847 __arm_vrev32q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
25848 {
25849  return __arm_vrev32q_m_u8 (__inactive, __a, __p);
25850 }
25851 
25852 __extension__ extern __inline int32x4_t
25853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(int32x4_t __inactive,const int __imm,mve_pred16_t __p)25854 __arm_vmvnq_m (int32x4_t __inactive, const int __imm, mve_pred16_t __p)
25855 {
25856  return __arm_vmvnq_m_n_s32 (__inactive, __imm, __p);
25857 }
25858 
25859 __extension__ extern __inline int32x4_t
25860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_n(int32x4_t __a,const int __imm,mve_pred16_t __p)25861 __arm_vorrq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p)
25862 {
25863  return __arm_vorrq_m_n_s32 (__a, __imm, __p);
25864 }
25865 
25866 __extension__ extern __inline int16x8_t
25867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq(int16x8_t __a,int32x4_t __b,const int __imm)25868 __arm_vqrshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
25869 {
25870  return __arm_vqrshrntq_n_s32 (__a, __b, __imm);
25871 }
25872 
25873 __extension__ extern __inline int16x8_t
25874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq(int16x8_t __a,int32x4_t __b,const int __imm)25875 __arm_vqshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
25876 {
25877  return __arm_vqshrnbq_n_s32 (__a, __b, __imm);
25878 }
25879 
25880 __extension__ extern __inline int16x8_t
25881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq(int16x8_t __a,int32x4_t __b,const int __imm)25882 __arm_vqshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
25883 {
25884  return __arm_vqshrntq_n_s32 (__a, __b, __imm);
25885 }
25886 
25887 __extension__ extern __inline int16x8_t
25888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq(int16x8_t __a,int32x4_t __b,const int __imm)25889 __arm_vrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
25890 {
25891  return __arm_vrshrnbq_n_s32 (__a, __b, __imm);
25892 }
25893 
25894 __extension__ extern __inline int16x8_t
25895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq(int16x8_t __a,int32x4_t __b,const int __imm)25896 __arm_vrshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
25897 {
25898  return __arm_vrshrntq_n_s32 (__a, __b, __imm);
25899 }
25900 
25901 __extension__ extern __inline int16x8_t
25902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq(int16x8_t __a,int32x4_t __b,const int __imm)25903 __arm_vshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
25904 {
25905  return __arm_vshrnbq_n_s32 (__a, __b, __imm);
25906 }
25907 
25908 __extension__ extern __inline int16x8_t
25909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq(int16x8_t __a,int32x4_t __b,const int __imm)25910 __arm_vshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
25911 {
25912  return __arm_vshrntq_n_s32 (__a, __b, __imm);
25913 }
25914 
25915 __extension__ extern __inline int64_t
25916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq(int64_t __a,int32x4_t __b,int32x4_t __c)25917 __arm_vmlaldavaq (int64_t __a, int32x4_t __b, int32x4_t __c)
25918 {
25919  return __arm_vmlaldavaq_s32 (__a, __b, __c);
25920 }
25921 
25922 __extension__ extern __inline int64_t
25923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaxq(int64_t __a,int32x4_t __b,int32x4_t __c)25924 __arm_vmlaldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
25925 {
25926  return __arm_vmlaldavaxq_s32 (__a, __b, __c);
25927 }
25928 
25929 __extension__ extern __inline int64_t
25930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaq(int64_t __a,int32x4_t __b,int32x4_t __c)25931 __arm_vmlsldavaq (int64_t __a, int32x4_t __b, int32x4_t __c)
25932 {
25933  return __arm_vmlsldavaq_s32 (__a, __b, __c);
25934 }
25935 
25936 __extension__ extern __inline int64_t
25937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaxq(int64_t __a,int32x4_t __b,int32x4_t __c)25938 __arm_vmlsldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
25939 {
25940  return __arm_vmlsldavaxq_s32 (__a, __b, __c);
25941 }
25942 
25943 __extension__ extern __inline int64_t
25944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25945 __arm_vmlaldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25946 {
25947  return __arm_vmlaldavq_p_s32 (__a, __b, __p);
25948 }
25949 
25950 __extension__ extern __inline int64_t
25951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavxq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25952 __arm_vmlaldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25953 {
25954  return __arm_vmlaldavxq_p_s32 (__a, __b, __p);
25955 }
25956 
25957 __extension__ extern __inline int64_t
25958 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25959 __arm_vmlsldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25960 {
25961  return __arm_vmlsldavq_p_s32 (__a, __b, __p);
25962 }
25963 
25964 __extension__ extern __inline int64_t
25965 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavxq_p(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)25966 __arm_vmlsldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25967 {
25968  return __arm_vmlsldavxq_p_s32 (__a, __b, __p);
25969 }
25970 
25971 __extension__ extern __inline int32x4_t
25972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_m(int32x4_t __inactive,int16x8_t __a,mve_pred16_t __p)25973 __arm_vmovlbq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
25974 {
25975  return __arm_vmovlbq_m_s16 (__inactive, __a, __p);
25976 }
25977 
25978 __extension__ extern __inline int32x4_t
25979 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_m(int32x4_t __inactive,int16x8_t __a,mve_pred16_t __p)25980 __arm_vmovltq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
25981 {
25982  return __arm_vmovltq_m_s16 (__inactive, __a, __p);
25983 }
25984 
25985 __extension__ extern __inline int16x8_t
25986 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_m(int16x8_t __a,int32x4_t __b,mve_pred16_t __p)25987 __arm_vmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
25988 {
25989  return __arm_vmovnbq_m_s32 (__a, __b, __p);
25990 }
25991 
25992 __extension__ extern __inline int16x8_t
25993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_m(int16x8_t __a,int32x4_t __b,mve_pred16_t __p)25994 __arm_vmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
25995 {
25996  return __arm_vmovntq_m_s32 (__a, __b, __p);
25997 }
25998 
25999 __extension__ extern __inline int16x8_t
26000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_m(int16x8_t __a,int32x4_t __b,mve_pred16_t __p)26001 __arm_vqmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
26002 {
26003  return __arm_vqmovnbq_m_s32 (__a, __b, __p);
26004 }
26005 
26006 __extension__ extern __inline int16x8_t
26007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_m(int16x8_t __a,int32x4_t __b,mve_pred16_t __p)26008 __arm_vqmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
26009 {
26010  return __arm_vqmovntq_m_s32 (__a, __b, __p);
26011 }
26012 
26013 __extension__ extern __inline int16x8_t
26014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m(int16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)26015 __arm_vrev32q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
26016 {
26017  return __arm_vrev32q_m_s16 (__inactive, __a, __p);
26018 }
26019 
26020 __extension__ extern __inline uint32x4_t
26021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_m(uint32x4_t __inactive,const int __imm,mve_pred16_t __p)26022 __arm_vmvnq_m (uint32x4_t __inactive, const int __imm, mve_pred16_t __p)
26023 {
26024  return __arm_vmvnq_m_n_u32 (__inactive, __imm, __p);
26025 }
26026 
26027 __extension__ extern __inline uint32x4_t
26028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m_n(uint32x4_t __a,const int __imm,mve_pred16_t __p)26029 __arm_vorrq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p)
26030 {
26031  return __arm_vorrq_m_n_u32 (__a, __imm, __p);
26032 }
26033 
26034 __extension__ extern __inline uint16x8_t
26035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshruntq(uint16x8_t __a,int32x4_t __b,const int __imm)26036 __arm_vqrshruntq (uint16x8_t __a, int32x4_t __b, const int __imm)
26037 {
26038  return __arm_vqrshruntq_n_s32 (__a, __b, __imm);
26039 }
26040 
26041 __extension__ extern __inline uint16x8_t
26042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrunbq(uint16x8_t __a,int32x4_t __b,const int __imm)26043 __arm_vqshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm)
26044 {
26045  return __arm_vqshrunbq_n_s32 (__a, __b, __imm);
26046 }
26047 
26048 __extension__ extern __inline uint16x8_t
26049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshruntq(uint16x8_t __a,int32x4_t __b,const int __imm)26050 __arm_vqshruntq (uint16x8_t __a, int32x4_t __b, const int __imm)
26051 {
26052  return __arm_vqshruntq_n_s32 (__a, __b, __imm);
26053 }
26054 
26055 __extension__ extern __inline uint16x8_t
26056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovunbq_m(uint16x8_t __a,int32x4_t __b,mve_pred16_t __p)26057 __arm_vqmovunbq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
26058 {
26059  return __arm_vqmovunbq_m_s32 (__a, __b, __p);
26060 }
26061 
26062 __extension__ extern __inline uint16x8_t
26063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovuntq_m(uint16x8_t __a,int32x4_t __b,mve_pred16_t __p)26064 __arm_vqmovuntq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
26065 {
26066  return __arm_vqmovuntq_m_s32 (__a, __b, __p);
26067 }
26068 
26069 __extension__ extern __inline uint16x8_t
26070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq(uint16x8_t __a,uint32x4_t __b,const int __imm)26071 __arm_vqrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26072 {
26073  return __arm_vqrshrntq_n_u32 (__a, __b, __imm);
26074 }
26075 
26076 __extension__ extern __inline uint16x8_t
26077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq(uint16x8_t __a,uint32x4_t __b,const int __imm)26078 __arm_vqshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26079 {
26080  return __arm_vqshrnbq_n_u32 (__a, __b, __imm);
26081 }
26082 
26083 __extension__ extern __inline uint16x8_t
26084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq(uint16x8_t __a,uint32x4_t __b,const int __imm)26085 __arm_vqshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26086 {
26087  return __arm_vqshrntq_n_u32 (__a, __b, __imm);
26088 }
26089 
26090 __extension__ extern __inline uint16x8_t
26091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq(uint16x8_t __a,uint32x4_t __b,const int __imm)26092 __arm_vrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26093 {
26094  return __arm_vrshrnbq_n_u32 (__a, __b, __imm);
26095 }
26096 
26097 __extension__ extern __inline uint16x8_t
26098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq(uint16x8_t __a,uint32x4_t __b,const int __imm)26099 __arm_vrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26100 {
26101  return __arm_vrshrntq_n_u32 (__a, __b, __imm);
26102 }
26103 
26104 __extension__ extern __inline uint16x8_t
26105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq(uint16x8_t __a,uint32x4_t __b,const int __imm)26106 __arm_vshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26107 {
26108  return __arm_vshrnbq_n_u32 (__a, __b, __imm);
26109 }
26110 
26111 __extension__ extern __inline uint16x8_t
26112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq(uint16x8_t __a,uint32x4_t __b,const int __imm)26113 __arm_vshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26114 {
26115  return __arm_vshrntq_n_u32 (__a, __b, __imm);
26116 }
26117 
26118 __extension__ extern __inline uint64_t
26119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq(uint64_t __a,uint32x4_t __b,uint32x4_t __c)26120 __arm_vmlaldavaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
26121 {
26122  return __arm_vmlaldavaq_u32 (__a, __b, __c);
26123 }
26124 
26125 __extension__ extern __inline uint64_t
26126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavq_p(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26127 __arm_vmlaldavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26128 {
26129  return __arm_vmlaldavq_p_u32 (__a, __b, __p);
26130 }
26131 
26132 __extension__ extern __inline uint32x4_t
26133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_m(uint32x4_t __inactive,uint16x8_t __a,mve_pred16_t __p)26134 __arm_vmovlbq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
26135 {
26136  return __arm_vmovlbq_m_u16 (__inactive, __a, __p);
26137 }
26138 
26139 __extension__ extern __inline uint32x4_t
26140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_m(uint32x4_t __inactive,uint16x8_t __a,mve_pred16_t __p)26141 __arm_vmovltq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
26142 {
26143  return __arm_vmovltq_m_u16 (__inactive, __a, __p);
26144 }
26145 
26146 __extension__ extern __inline uint16x8_t
26147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovnbq_m(uint16x8_t __a,uint32x4_t __b,mve_pred16_t __p)26148 __arm_vmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
26149 {
26150  return __arm_vmovnbq_m_u32 (__a, __b, __p);
26151 }
26152 
26153 __extension__ extern __inline uint16x8_t
26154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovntq_m(uint16x8_t __a,uint32x4_t __b,mve_pred16_t __p)26155 __arm_vmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
26156 {
26157  return __arm_vmovntq_m_u32 (__a, __b, __p);
26158 }
26159 
26160 __extension__ extern __inline uint16x8_t
26161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovnbq_m(uint16x8_t __a,uint32x4_t __b,mve_pred16_t __p)26162 __arm_vqmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
26163 {
26164  return __arm_vqmovnbq_m_u32 (__a, __b, __p);
26165 }
26166 
26167 __extension__ extern __inline uint16x8_t
26168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqmovntq_m(uint16x8_t __a,uint32x4_t __b,mve_pred16_t __p)26169 __arm_vqmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
26170 {
26171  return __arm_vqmovntq_m_u32 (__a, __b, __p);
26172 }
26173 
26174 __extension__ extern __inline uint16x8_t
26175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m(uint16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)26176 __arm_vrev32q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
26177 {
26178  return __arm_vrev32q_m_u16 (__inactive, __a, __p);
26179 }
26180 
26181 __extension__ extern __inline int8x16_t
26182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m(int8x16_t __a,int8x16_t __b,const int __imm,mve_pred16_t __p)26183 __arm_vsriq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
26184 {
26185  return __arm_vsriq_m_n_s8 (__a, __b, __imm, __p);
26186 }
26187 
26188 __extension__ extern __inline int8x16_t
26189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26190 __arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26191 {
26192  return __arm_vsubq_m_s8 (__inactive, __a, __b, __p);
26193 }
26194 
26195 __extension__ extern __inline uint8x16_t
26196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq_m(uint8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)26197 __arm_vqshluq_m (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
26198 {
26199  return __arm_vqshluq_m_n_s8 (__inactive, __a, __imm, __p);
26200 }
26201 
26202 __extension__ extern __inline uint32_t
26203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p(uint32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)26204 __arm_vabavq_p (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
26205 {
26206  return __arm_vabavq_p_s8 (__a, __b, __c, __p);
26207 }
26208 
26209 __extension__ extern __inline uint8x16_t
26210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m(uint8x16_t __a,uint8x16_t __b,const int __imm,mve_pred16_t __p)26211 __arm_vsriq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
26212 {
26213  return __arm_vsriq_m_n_u8 (__a, __b, __imm, __p);
26214 }
26215 
26216 __extension__ extern __inline uint8x16_t
26217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m(uint8x16_t __inactive,uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)26218 __arm_vshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26219 {
26220  return __arm_vshlq_m_u8 (__inactive, __a, __b, __p);
26221 }
26222 
26223 __extension__ extern __inline uint8x16_t
26224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26225 __arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26226 {
26227  return __arm_vsubq_m_u8 (__inactive, __a, __b, __p);
26228 }
26229 
26230 __extension__ extern __inline uint32_t
26231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p(uint32_t __a,uint8x16_t __b,uint8x16_t __c,mve_pred16_t __p)26232 __arm_vabavq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
26233 {
26234  return __arm_vabavq_p_u8 (__a, __b, __c, __p);
26235 }
26236 
26237 __extension__ extern __inline int8x16_t
26238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26239 __arm_vshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26240 {
26241  return __arm_vshlq_m_s8 (__inactive, __a, __b, __p);
26242 }
26243 
26244 __extension__ extern __inline int16x8_t
26245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m(int16x8_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)26246 __arm_vsriq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
26247 {
26248  return __arm_vsriq_m_n_s16 (__a, __b, __imm, __p);
26249 }
26250 
26251 __extension__ extern __inline int16x8_t
26252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26253 __arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26254 {
26255  return __arm_vsubq_m_s16 (__inactive, __a, __b, __p);
26256 }
26257 
26258 __extension__ extern __inline uint16x8_t
26259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq_m(uint16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)26260 __arm_vqshluq_m (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
26261 {
26262  return __arm_vqshluq_m_n_s16 (__inactive, __a, __imm, __p);
26263 }
26264 
26265 __extension__ extern __inline uint32_t
26266 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p(uint32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)26267 __arm_vabavq_p (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
26268 {
26269  return __arm_vabavq_p_s16 (__a, __b, __c, __p);
26270 }
26271 
26272 __extension__ extern __inline uint16x8_t
26273 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m(uint16x8_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)26274 __arm_vsriq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
26275 {
26276  return __arm_vsriq_m_n_u16 (__a, __b, __imm, __p);
26277 }
26278 
26279 __extension__ extern __inline uint16x8_t
26280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m(uint16x8_t __inactive,uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)26281 __arm_vshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26282 {
26283  return __arm_vshlq_m_u16 (__inactive, __a, __b, __p);
26284 }
26285 
26286 __extension__ extern __inline uint16x8_t
26287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26288 __arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26289 {
26290  return __arm_vsubq_m_u16 (__inactive, __a, __b, __p);
26291 }
26292 
26293 __extension__ extern __inline uint32_t
26294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p(uint32_t __a,uint16x8_t __b,uint16x8_t __c,mve_pred16_t __p)26295 __arm_vabavq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
26296 {
26297  return __arm_vabavq_p_u16 (__a, __b, __c, __p);
26298 }
26299 
26300 __extension__ extern __inline int16x8_t
26301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26302 __arm_vshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26303 {
26304  return __arm_vshlq_m_s16 (__inactive, __a, __b, __p);
26305 }
26306 
26307 __extension__ extern __inline int32x4_t
26308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m(int32x4_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)26309 __arm_vsriq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
26310 {
26311  return __arm_vsriq_m_n_s32 (__a, __b, __imm, __p);
26312 }
26313 
26314 __extension__ extern __inline int32x4_t
26315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26316 __arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26317 {
26318  return __arm_vsubq_m_s32 (__inactive, __a, __b, __p);
26319 }
26320 
26321 __extension__ extern __inline uint32x4_t
26322 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshluq_m(uint32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)26323 __arm_vqshluq_m (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
26324 {
26325  return __arm_vqshluq_m_n_s32 (__inactive, __a, __imm, __p);
26326 }
26327 
26328 __extension__ extern __inline uint32_t
26329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p(uint32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)26330 __arm_vabavq_p (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
26331 {
26332  return __arm_vabavq_p_s32 (__a, __b, __c, __p);
26333 }
26334 
26335 __extension__ extern __inline uint32x4_t
26336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsriq_m(uint32x4_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)26337 __arm_vsriq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
26338 {
26339  return __arm_vsriq_m_n_u32 (__a, __b, __imm, __p);
26340 }
26341 
26342 __extension__ extern __inline uint32x4_t
26343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m(uint32x4_t __inactive,uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)26344 __arm_vshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26345 {
26346  return __arm_vshlq_m_u32 (__inactive, __a, __b, __p);
26347 }
26348 
26349 __extension__ extern __inline uint32x4_t
26350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26351 __arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26352 {
26353  return __arm_vsubq_m_u32 (__inactive, __a, __b, __p);
26354 }
26355 
26356 __extension__ extern __inline uint32_t
26357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabavq_p(uint32_t __a,uint32x4_t __b,uint32x4_t __c,mve_pred16_t __p)26358 __arm_vabavq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
26359 {
26360  return __arm_vabavq_p_u32 (__a, __b, __c, __p);
26361 }
26362 
26363 __extension__ extern __inline int32x4_t
26364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26365 __arm_vshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26366 {
26367  return __arm_vshlq_m_s32 (__inactive, __a, __b, __p);
26368 }
26369 
26370 __extension__ extern __inline int8x16_t
26371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26372 __arm_vabdq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26373 {
26374  return __arm_vabdq_m_s8 (__inactive, __a, __b, __p);
26375 }
26376 
26377 __extension__ extern __inline int32x4_t
26378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26379 __arm_vabdq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26380 {
26381  return __arm_vabdq_m_s32 (__inactive, __a, __b, __p);
26382 }
26383 
26384 __extension__ extern __inline int16x8_t
26385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26386 __arm_vabdq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26387 {
26388  return __arm_vabdq_m_s16 (__inactive, __a, __b, __p);
26389 }
26390 
26391 __extension__ extern __inline uint8x16_t
26392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26393 __arm_vabdq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26394 {
26395  return __arm_vabdq_m_u8 (__inactive, __a, __b, __p);
26396 }
26397 
26398 __extension__ extern __inline uint32x4_t
26399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26400 __arm_vabdq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26401 {
26402  return __arm_vabdq_m_u32 (__inactive, __a, __b, __p);
26403 }
26404 
26405 __extension__ extern __inline uint16x8_t
26406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26407 __arm_vabdq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26408 {
26409  return __arm_vabdq_m_u16 (__inactive, __a, __b, __p);
26410 }
26411 
26412 __extension__ extern __inline int8x16_t
26413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(int8x16_t __inactive,int8x16_t __a,int __b,mve_pred16_t __p)26414 __arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int __b, mve_pred16_t __p)
26415 {
26416  return __arm_vaddq_m_n_s8 (__inactive, __a, __b, __p);
26417 }
26418 
26419 __extension__ extern __inline int32x4_t
26420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(int32x4_t __inactive,int32x4_t __a,int __b,mve_pred16_t __p)26421 __arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int __b, mve_pred16_t __p)
26422 {
26423  return __arm_vaddq_m_n_s32 (__inactive, __a, __b, __p);
26424 }
26425 
26426 __extension__ extern __inline int16x8_t
26427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(int16x8_t __inactive,int16x8_t __a,int __b,mve_pred16_t __p)26428 __arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int __b, mve_pred16_t __p)
26429 {
26430  return __arm_vaddq_m_n_s16 (__inactive, __a, __b, __p);
26431 }
26432 
26433 __extension__ extern __inline uint8x16_t
26434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(uint8x16_t __inactive,uint8x16_t __a,int __b,mve_pred16_t __p)26435 __arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, int __b, mve_pred16_t __p)
26436 {
26437  return __arm_vaddq_m_n_u8 (__inactive, __a, __b, __p);
26438 }
26439 
26440 __extension__ extern __inline uint32x4_t
26441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(uint32x4_t __inactive,uint32x4_t __a,int __b,mve_pred16_t __p)26442 __arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, int __b, mve_pred16_t __p)
26443 {
26444  return __arm_vaddq_m_n_u32 (__inactive, __a, __b, __p);
26445 }
26446 
26447 __extension__ extern __inline uint16x8_t
26448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(uint16x8_t __inactive,uint16x8_t __a,int __b,mve_pred16_t __p)26449 __arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, int __b, mve_pred16_t __p)
26450 {
26451  return __arm_vaddq_m_n_u16 (__inactive, __a, __b, __p);
26452 }
26453 
26454 __extension__ extern __inline int8x16_t
26455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26456 __arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26457 {
26458  return __arm_vaddq_m_s8 (__inactive, __a, __b, __p);
26459 }
26460 
26461 __extension__ extern __inline int32x4_t
26462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26463 __arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26464 {
26465  return __arm_vaddq_m_s32 (__inactive, __a, __b, __p);
26466 }
26467 
26468 __extension__ extern __inline int16x8_t
26469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26470 __arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26471 {
26472  return __arm_vaddq_m_s16 (__inactive, __a, __b, __p);
26473 }
26474 
26475 __extension__ extern __inline uint8x16_t
26476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26477 __arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26478 {
26479  return __arm_vaddq_m_u8 (__inactive, __a, __b, __p);
26480 }
26481 
26482 __extension__ extern __inline uint32x4_t
26483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26484 __arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26485 {
26486  return __arm_vaddq_m_u32 (__inactive, __a, __b, __p);
26487 }
26488 
26489 __extension__ extern __inline uint16x8_t
26490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26491 __arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26492 {
26493  return __arm_vaddq_m_u16 (__inactive, __a, __b, __p);
26494 }
26495 
26496 __extension__ extern __inline int8x16_t
26497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26498 __arm_vandq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26499 {
26500  return __arm_vandq_m_s8 (__inactive, __a, __b, __p);
26501 }
26502 
26503 __extension__ extern __inline int32x4_t
26504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26505 __arm_vandq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26506 {
26507  return __arm_vandq_m_s32 (__inactive, __a, __b, __p);
26508 }
26509 
26510 __extension__ extern __inline int16x8_t
26511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26512 __arm_vandq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26513 {
26514  return __arm_vandq_m_s16 (__inactive, __a, __b, __p);
26515 }
26516 
26517 __extension__ extern __inline uint8x16_t
26518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26519 __arm_vandq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26520 {
26521  return __arm_vandq_m_u8 (__inactive, __a, __b, __p);
26522 }
26523 
26524 __extension__ extern __inline uint32x4_t
26525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26526 __arm_vandq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26527 {
26528  return __arm_vandq_m_u32 (__inactive, __a, __b, __p);
26529 }
26530 
26531 __extension__ extern __inline uint16x8_t
26532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26533 __arm_vandq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26534 {
26535  return __arm_vandq_m_u16 (__inactive, __a, __b, __p);
26536 }
26537 
26538 __extension__ extern __inline int8x16_t
26539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26540 __arm_vbicq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26541 {
26542  return __arm_vbicq_m_s8 (__inactive, __a, __b, __p);
26543 }
26544 
26545 __extension__ extern __inline int32x4_t
26546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26547 __arm_vbicq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26548 {
26549  return __arm_vbicq_m_s32 (__inactive, __a, __b, __p);
26550 }
26551 
26552 __extension__ extern __inline int16x8_t
26553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26554 __arm_vbicq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26555 {
26556  return __arm_vbicq_m_s16 (__inactive, __a, __b, __p);
26557 }
26558 
26559 __extension__ extern __inline uint8x16_t
26560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26561 __arm_vbicq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26562 {
26563  return __arm_vbicq_m_u8 (__inactive, __a, __b, __p);
26564 }
26565 
26566 __extension__ extern __inline uint32x4_t
26567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26568 __arm_vbicq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26569 {
26570  return __arm_vbicq_m_u32 (__inactive, __a, __b, __p);
26571 }
26572 
26573 __extension__ extern __inline uint16x8_t
26574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26575 __arm_vbicq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26576 {
26577  return __arm_vbicq_m_u16 (__inactive, __a, __b, __p);
26578 }
26579 
26580 __extension__ extern __inline int8x16_t
26581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m(int8x16_t __inactive,int8x16_t __a,int32_t __b,mve_pred16_t __p)26582 __arm_vbrsrq_m (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p)
26583 {
26584  return __arm_vbrsrq_m_n_s8 (__inactive, __a, __b, __p);
26585 }
26586 
26587 __extension__ extern __inline int32x4_t
26588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)26589 __arm_vbrsrq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
26590 {
26591  return __arm_vbrsrq_m_n_s32 (__inactive, __a, __b, __p);
26592 }
26593 
26594 __extension__ extern __inline int16x8_t
26595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m(int16x8_t __inactive,int16x8_t __a,int32_t __b,mve_pred16_t __p)26596 __arm_vbrsrq_m (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p)
26597 {
26598  return __arm_vbrsrq_m_n_s16 (__inactive, __a, __b, __p);
26599 }
26600 
26601 __extension__ extern __inline uint8x16_t
26602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m(uint8x16_t __inactive,uint8x16_t __a,int32_t __b,mve_pred16_t __p)26603 __arm_vbrsrq_m (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p)
26604 {
26605  return __arm_vbrsrq_m_n_u8 (__inactive, __a, __b, __p);
26606 }
26607 
26608 __extension__ extern __inline uint32x4_t
26609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m(uint32x4_t __inactive,uint32x4_t __a,int32_t __b,mve_pred16_t __p)26610 __arm_vbrsrq_m (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p)
26611 {
26612  return __arm_vbrsrq_m_n_u32 (__inactive, __a, __b, __p);
26613 }
26614 
26615 __extension__ extern __inline uint16x8_t
26616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m(uint16x8_t __inactive,uint16x8_t __a,int32_t __b,mve_pred16_t __p)26617 __arm_vbrsrq_m (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p)
26618 {
26619  return __arm_vbrsrq_m_n_u16 (__inactive, __a, __b, __p);
26620 }
26621 
26622 __extension__ extern __inline int8x16_t
26623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26624 __arm_vcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26625 {
26626  return __arm_vcaddq_rot270_m_s8 (__inactive, __a, __b, __p);
26627 }
26628 
26629 __extension__ extern __inline int32x4_t
26630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26631 __arm_vcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26632 {
26633  return __arm_vcaddq_rot270_m_s32 (__inactive, __a, __b, __p);
26634 }
26635 
26636 __extension__ extern __inline int16x8_t
26637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26638 __arm_vcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26639 {
26640  return __arm_vcaddq_rot270_m_s16 (__inactive, __a, __b, __p);
26641 }
26642 
26643 __extension__ extern __inline uint8x16_t
26644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26645 __arm_vcaddq_rot270_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26646 {
26647  return __arm_vcaddq_rot270_m_u8 (__inactive, __a, __b, __p);
26648 }
26649 
26650 __extension__ extern __inline uint32x4_t
26651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26652 __arm_vcaddq_rot270_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26653 {
26654  return __arm_vcaddq_rot270_m_u32 (__inactive, __a, __b, __p);
26655 }
26656 
26657 __extension__ extern __inline uint16x8_t
26658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26659 __arm_vcaddq_rot270_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26660 {
26661  return __arm_vcaddq_rot270_m_u16 (__inactive, __a, __b, __p);
26662 }
26663 
26664 __extension__ extern __inline int8x16_t
26665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26666 __arm_vcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26667 {
26668  return __arm_vcaddq_rot90_m_s8 (__inactive, __a, __b, __p);
26669 }
26670 
26671 __extension__ extern __inline int32x4_t
26672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26673 __arm_vcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26674 {
26675  return __arm_vcaddq_rot90_m_s32 (__inactive, __a, __b, __p);
26676 }
26677 
26678 __extension__ extern __inline int16x8_t
26679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26680 __arm_vcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26681 {
26682  return __arm_vcaddq_rot90_m_s16 (__inactive, __a, __b, __p);
26683 }
26684 
26685 __extension__ extern __inline uint8x16_t
26686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26687 __arm_vcaddq_rot90_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26688 {
26689  return __arm_vcaddq_rot90_m_u8 (__inactive, __a, __b, __p);
26690 }
26691 
26692 __extension__ extern __inline uint32x4_t
26693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26694 __arm_vcaddq_rot90_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26695 {
26696  return __arm_vcaddq_rot90_m_u32 (__inactive, __a, __b, __p);
26697 }
26698 
26699 __extension__ extern __inline uint16x8_t
26700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26701 __arm_vcaddq_rot90_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26702 {
26703  return __arm_vcaddq_rot90_m_u16 (__inactive, __a, __b, __p);
26704 }
26705 
26706 __extension__ extern __inline int8x16_t
26707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26708 __arm_veorq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26709 {
26710  return __arm_veorq_m_s8 (__inactive, __a, __b, __p);
26711 }
26712 
26713 __extension__ extern __inline int32x4_t
26714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26715 __arm_veorq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26716 {
26717  return __arm_veorq_m_s32 (__inactive, __a, __b, __p);
26718 }
26719 
26720 __extension__ extern __inline int16x8_t
26721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26722 __arm_veorq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26723 {
26724  return __arm_veorq_m_s16 (__inactive, __a, __b, __p);
26725 }
26726 
26727 __extension__ extern __inline uint8x16_t
26728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26729 __arm_veorq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26730 {
26731  return __arm_veorq_m_u8 (__inactive, __a, __b, __p);
26732 }
26733 
26734 __extension__ extern __inline uint32x4_t
26735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26736 __arm_veorq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26737 {
26738  return __arm_veorq_m_u32 (__inactive, __a, __b, __p);
26739 }
26740 
26741 __extension__ extern __inline uint16x8_t
26742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26743 __arm_veorq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26744 {
26745  return __arm_veorq_m_u16 (__inactive, __a, __b, __p);
26746 }
26747 
26748 __extension__ extern __inline int8x16_t
26749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)26750 __arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
26751 {
26752  return __arm_vhaddq_m_n_s8 (__inactive, __a, __b, __p);
26753 }
26754 
26755 __extension__ extern __inline int32x4_t
26756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)26757 __arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
26758 {
26759  return __arm_vhaddq_m_n_s32 (__inactive, __a, __b, __p);
26760 }
26761 
26762 __extension__ extern __inline int16x8_t
26763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)26764 __arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
26765 {
26766  return __arm_vhaddq_m_n_s16 (__inactive, __a, __b, __p);
26767 }
26768 
26769 __extension__ extern __inline uint8x16_t
26770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)26771 __arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
26772 {
26773  return __arm_vhaddq_m_n_u8 (__inactive, __a, __b, __p);
26774 }
26775 
26776 __extension__ extern __inline uint32x4_t
26777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)26778 __arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
26779 {
26780  return __arm_vhaddq_m_n_u32 (__inactive, __a, __b, __p);
26781 }
26782 
26783 __extension__ extern __inline uint16x8_t
26784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)26785 __arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
26786 {
26787  return __arm_vhaddq_m_n_u16 (__inactive, __a, __b, __p);
26788 }
26789 
26790 __extension__ extern __inline int8x16_t
26791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26792 __arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26793 {
26794  return __arm_vhaddq_m_s8 (__inactive, __a, __b, __p);
26795 }
26796 
26797 __extension__ extern __inline int32x4_t
26798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26799 __arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26800 {
26801  return __arm_vhaddq_m_s32 (__inactive, __a, __b, __p);
26802 }
26803 
26804 __extension__ extern __inline int16x8_t
26805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26806 __arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26807 {
26808  return __arm_vhaddq_m_s16 (__inactive, __a, __b, __p);
26809 }
26810 
26811 __extension__ extern __inline uint8x16_t
26812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26813 __arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26814 {
26815  return __arm_vhaddq_m_u8 (__inactive, __a, __b, __p);
26816 }
26817 
26818 __extension__ extern __inline uint32x4_t
26819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26820 __arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26821 {
26822  return __arm_vhaddq_m_u32 (__inactive, __a, __b, __p);
26823 }
26824 
26825 __extension__ extern __inline uint16x8_t
26826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26827 __arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26828 {
26829  return __arm_vhaddq_m_u16 (__inactive, __a, __b, __p);
26830 }
26831 
26832 __extension__ extern __inline int8x16_t
26833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26834 __arm_vhcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26835 {
26836  return __arm_vhcaddq_rot270_m_s8 (__inactive, __a, __b, __p);
26837 }
26838 
26839 __extension__ extern __inline int32x4_t
26840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26841 __arm_vhcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26842 {
26843  return __arm_vhcaddq_rot270_m_s32 (__inactive, __a, __b, __p);
26844 }
26845 
26846 __extension__ extern __inline int16x8_t
26847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26848 __arm_vhcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26849 {
26850  return __arm_vhcaddq_rot270_m_s16 (__inactive, __a, __b, __p);
26851 }
26852 
26853 __extension__ extern __inline int8x16_t
26854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26855 __arm_vhcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26856 {
26857  return __arm_vhcaddq_rot90_m_s8 (__inactive, __a, __b, __p);
26858 }
26859 
26860 __extension__ extern __inline int32x4_t
26861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26862 __arm_vhcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26863 {
26864  return __arm_vhcaddq_rot90_m_s32 (__inactive, __a, __b, __p);
26865 }
26866 
26867 __extension__ extern __inline int16x8_t
26868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26869 __arm_vhcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26870 {
26871  return __arm_vhcaddq_rot90_m_s16 (__inactive, __a, __b, __p);
26872 }
26873 
26874 __extension__ extern __inline int8x16_t
26875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)26876 __arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
26877 {
26878  return __arm_vhsubq_m_n_s8 (__inactive, __a, __b, __p);
26879 }
26880 
26881 __extension__ extern __inline int32x4_t
26882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)26883 __arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
26884 {
26885  return __arm_vhsubq_m_n_s32 (__inactive, __a, __b, __p);
26886 }
26887 
26888 __extension__ extern __inline int16x8_t
26889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)26890 __arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
26891 {
26892  return __arm_vhsubq_m_n_s16 (__inactive, __a, __b, __p);
26893 }
26894 
26895 __extension__ extern __inline uint8x16_t
26896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)26897 __arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
26898 {
26899  return __arm_vhsubq_m_n_u8 (__inactive, __a, __b, __p);
26900 }
26901 
26902 __extension__ extern __inline uint32x4_t
26903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)26904 __arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
26905 {
26906  return __arm_vhsubq_m_n_u32 (__inactive, __a, __b, __p);
26907 }
26908 
26909 __extension__ extern __inline uint16x8_t
26910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)26911 __arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
26912 {
26913  return __arm_vhsubq_m_n_u16 (__inactive, __a, __b, __p);
26914 }
26915 
26916 __extension__ extern __inline int8x16_t
26917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26918 __arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26919 {
26920  return __arm_vhsubq_m_s8 (__inactive, __a, __b, __p);
26921 }
26922 
26923 __extension__ extern __inline int32x4_t
26924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26925 __arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26926 {
26927  return __arm_vhsubq_m_s32 (__inactive, __a, __b, __p);
26928 }
26929 
26930 __extension__ extern __inline int16x8_t
26931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26932 __arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26933 {
26934  return __arm_vhsubq_m_s16 (__inactive, __a, __b, __p);
26935 }
26936 
26937 __extension__ extern __inline uint8x16_t
26938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26939 __arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26940 {
26941  return __arm_vhsubq_m_u8 (__inactive, __a, __b, __p);
26942 }
26943 
26944 __extension__ extern __inline uint32x4_t
26945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26946 __arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26947 {
26948  return __arm_vhsubq_m_u32 (__inactive, __a, __b, __p);
26949 }
26950 
26951 __extension__ extern __inline uint16x8_t
26952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26953 __arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26954 {
26955  return __arm_vhsubq_m_u16 (__inactive, __a, __b, __p);
26956 }
26957 
26958 __extension__ extern __inline int8x16_t
26959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)26960 __arm_vmaxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26961 {
26962  return __arm_vmaxq_m_s8 (__inactive, __a, __b, __p);
26963 }
26964 
26965 __extension__ extern __inline int32x4_t
26966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)26967 __arm_vmaxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26968 {
26969  return __arm_vmaxq_m_s32 (__inactive, __a, __b, __p);
26970 }
26971 
26972 __extension__ extern __inline int16x8_t
26973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)26974 __arm_vmaxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26975 {
26976  return __arm_vmaxq_m_s16 (__inactive, __a, __b, __p);
26977 }
26978 
26979 __extension__ extern __inline uint8x16_t
26980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)26981 __arm_vmaxq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26982 {
26983  return __arm_vmaxq_m_u8 (__inactive, __a, __b, __p);
26984 }
26985 
26986 __extension__ extern __inline uint32x4_t
26987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)26988 __arm_vmaxq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26989 {
26990  return __arm_vmaxq_m_u32 (__inactive, __a, __b, __p);
26991 }
26992 
26993 __extension__ extern __inline uint16x8_t
26994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)26995 __arm_vmaxq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26996 {
26997  return __arm_vmaxq_m_u16 (__inactive, __a, __b, __p);
26998 }
26999 
27000 __extension__ extern __inline int8x16_t
27001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27002 __arm_vminq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27003 {
27004  return __arm_vminq_m_s8 (__inactive, __a, __b, __p);
27005 }
27006 
27007 __extension__ extern __inline int32x4_t
27008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27009 __arm_vminq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27010 {
27011  return __arm_vminq_m_s32 (__inactive, __a, __b, __p);
27012 }
27013 
27014 __extension__ extern __inline int16x8_t
27015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27016 __arm_vminq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27017 {
27018  return __arm_vminq_m_s16 (__inactive, __a, __b, __p);
27019 }
27020 
27021 __extension__ extern __inline uint8x16_t
27022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)27023 __arm_vminq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27024 {
27025  return __arm_vminq_m_u8 (__inactive, __a, __b, __p);
27026 }
27027 
27028 __extension__ extern __inline uint32x4_t
27029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)27030 __arm_vminq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27031 {
27032  return __arm_vminq_m_u32 (__inactive, __a, __b, __p);
27033 }
27034 
27035 __extension__ extern __inline uint16x8_t
27036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)27037 __arm_vminq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27038 {
27039  return __arm_vminq_m_u16 (__inactive, __a, __b, __p);
27040 }
27041 
27042 __extension__ extern __inline int32_t
27043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p(int32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)27044 __arm_vmladavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
27045 {
27046  return __arm_vmladavaq_p_s8 (__a, __b, __c, __p);
27047 }
27048 
27049 __extension__ extern __inline int32_t
27050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p(int32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)27051 __arm_vmladavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
27052 {
27053  return __arm_vmladavaq_p_s32 (__a, __b, __c, __p);
27054 }
27055 
27056 __extension__ extern __inline int32_t
27057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p(int32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)27058 __arm_vmladavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
27059 {
27060  return __arm_vmladavaq_p_s16 (__a, __b, __c, __p);
27061 }
27062 
27063 __extension__ extern __inline uint32_t
27064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p(uint32_t __a,uint8x16_t __b,uint8x16_t __c,mve_pred16_t __p)27065 __arm_vmladavaq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
27066 {
27067  return __arm_vmladavaq_p_u8 (__a, __b, __c, __p);
27068 }
27069 
27070 __extension__ extern __inline uint32_t
27071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p(uint32_t __a,uint32x4_t __b,uint32x4_t __c,mve_pred16_t __p)27072 __arm_vmladavaq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
27073 {
27074  return __arm_vmladavaq_p_u32 (__a, __b, __c, __p);
27075 }
27076 
27077 __extension__ extern __inline uint32_t
27078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaq_p(uint32_t __a,uint16x8_t __b,uint16x8_t __c,mve_pred16_t __p)27079 __arm_vmladavaq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
27080 {
27081  return __arm_vmladavaq_p_u16 (__a, __b, __c, __p);
27082 }
27083 
27084 __extension__ extern __inline int32_t
27085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq_p(int32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)27086 __arm_vmladavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
27087 {
27088  return __arm_vmladavaxq_p_s8 (__a, __b, __c, __p);
27089 }
27090 
27091 __extension__ extern __inline int32_t
27092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq_p(int32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)27093 __arm_vmladavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
27094 {
27095  return __arm_vmladavaxq_p_s32 (__a, __b, __c, __p);
27096 }
27097 
27098 __extension__ extern __inline int32_t
27099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmladavaxq_p(int32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)27100 __arm_vmladavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
27101 {
27102  return __arm_vmladavaxq_p_s16 (__a, __b, __c, __p);
27103 }
27104 
27105 __extension__ extern __inline int8x16_t
27106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)27107 __arm_vmlaq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27108 {
27109  return __arm_vmlaq_m_n_s8 (__a, __b, __c, __p);
27110 }
27111 
27112 __extension__ extern __inline int32x4_t
27113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)27114 __arm_vmlaq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27115 {
27116  return __arm_vmlaq_m_n_s32 (__a, __b, __c, __p);
27117 }
27118 
27119 __extension__ extern __inline int16x8_t
27120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)27121 __arm_vmlaq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27122 {
27123  return __arm_vmlaq_m_n_s16 (__a, __b, __c, __p);
27124 }
27125 
27126 __extension__ extern __inline uint8x16_t
27127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m(uint8x16_t __a,uint8x16_t __b,uint8_t __c,mve_pred16_t __p)27128 __arm_vmlaq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
27129 {
27130  return __arm_vmlaq_m_n_u8 (__a, __b, __c, __p);
27131 }
27132 
27133 __extension__ extern __inline uint32x4_t
27134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m(uint32x4_t __a,uint32x4_t __b,uint32_t __c,mve_pred16_t __p)27135 __arm_vmlaq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
27136 {
27137  return __arm_vmlaq_m_n_u32 (__a, __b, __c, __p);
27138 }
27139 
27140 __extension__ extern __inline uint16x8_t
27141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaq_m(uint16x8_t __a,uint16x8_t __b,uint16_t __c,mve_pred16_t __p)27142 __arm_vmlaq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
27143 {
27144  return __arm_vmlaq_m_n_u16 (__a, __b, __c, __p);
27145 }
27146 
27147 __extension__ extern __inline int8x16_t
27148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)27149 __arm_vmlasq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27150 {
27151  return __arm_vmlasq_m_n_s8 (__a, __b, __c, __p);
27152 }
27153 
27154 __extension__ extern __inline int32x4_t
27155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)27156 __arm_vmlasq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27157 {
27158  return __arm_vmlasq_m_n_s32 (__a, __b, __c, __p);
27159 }
27160 
27161 __extension__ extern __inline int16x8_t
27162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)27163 __arm_vmlasq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27164 {
27165  return __arm_vmlasq_m_n_s16 (__a, __b, __c, __p);
27166 }
27167 
27168 __extension__ extern __inline uint8x16_t
27169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m(uint8x16_t __a,uint8x16_t __b,uint8_t __c,mve_pred16_t __p)27170 __arm_vmlasq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
27171 {
27172  return __arm_vmlasq_m_n_u8 (__a, __b, __c, __p);
27173 }
27174 
27175 __extension__ extern __inline uint32x4_t
27176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m(uint32x4_t __a,uint32x4_t __b,uint32_t __c,mve_pred16_t __p)27177 __arm_vmlasq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
27178 {
27179  return __arm_vmlasq_m_n_u32 (__a, __b, __c, __p);
27180 }
27181 
27182 __extension__ extern __inline uint16x8_t
27183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlasq_m(uint16x8_t __a,uint16x8_t __b,uint16_t __c,mve_pred16_t __p)27184 __arm_vmlasq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
27185 {
27186  return __arm_vmlasq_m_n_u16 (__a, __b, __c, __p);
27187 }
27188 
27189 __extension__ extern __inline int32_t
27190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq_p(int32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)27191 __arm_vmlsdavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
27192 {
27193  return __arm_vmlsdavaq_p_s8 (__a, __b, __c, __p);
27194 }
27195 
27196 __extension__ extern __inline int32_t
27197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq_p(int32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)27198 __arm_vmlsdavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
27199 {
27200  return __arm_vmlsdavaq_p_s32 (__a, __b, __c, __p);
27201 }
27202 
27203 __extension__ extern __inline int32_t
27204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaq_p(int32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)27205 __arm_vmlsdavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
27206 {
27207  return __arm_vmlsdavaq_p_s16 (__a, __b, __c, __p);
27208 }
27209 
27210 __extension__ extern __inline int32_t
27211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq_p(int32_t __a,int8x16_t __b,int8x16_t __c,mve_pred16_t __p)27212 __arm_vmlsdavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
27213 {
27214  return __arm_vmlsdavaxq_p_s8 (__a, __b, __c, __p);
27215 }
27216 
27217 __extension__ extern __inline int32_t
27218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq_p(int32_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)27219 __arm_vmlsdavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
27220 {
27221  return __arm_vmlsdavaxq_p_s32 (__a, __b, __c, __p);
27222 }
27223 
27224 __extension__ extern __inline int32_t
27225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsdavaxq_p(int32_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)27226 __arm_vmlsdavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
27227 {
27228  return __arm_vmlsdavaxq_p_s16 (__a, __b, __c, __p);
27229 }
27230 
27231 __extension__ extern __inline int8x16_t
27232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27233 __arm_vmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27234 {
27235  return __arm_vmulhq_m_s8 (__inactive, __a, __b, __p);
27236 }
27237 
27238 __extension__ extern __inline int32x4_t
27239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27240 __arm_vmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27241 {
27242  return __arm_vmulhq_m_s32 (__inactive, __a, __b, __p);
27243 }
27244 
27245 __extension__ extern __inline int16x8_t
27246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27247 __arm_vmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27248 {
27249  return __arm_vmulhq_m_s16 (__inactive, __a, __b, __p);
27250 }
27251 
27252 __extension__ extern __inline uint8x16_t
27253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)27254 __arm_vmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27255 {
27256  return __arm_vmulhq_m_u8 (__inactive, __a, __b, __p);
27257 }
27258 
27259 __extension__ extern __inline uint32x4_t
27260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)27261 __arm_vmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27262 {
27263  return __arm_vmulhq_m_u32 (__inactive, __a, __b, __p);
27264 }
27265 
27266 __extension__ extern __inline uint16x8_t
27267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)27268 __arm_vmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27269 {
27270  return __arm_vmulhq_m_u16 (__inactive, __a, __b, __p);
27271 }
27272 
27273 __extension__ extern __inline int16x8_t
27274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m(int16x8_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27275 __arm_vmullbq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27276 {
27277  return __arm_vmullbq_int_m_s8 (__inactive, __a, __b, __p);
27278 }
27279 
27280 __extension__ extern __inline int64x2_t
27281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m(int64x2_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27282 __arm_vmullbq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27283 {
27284  return __arm_vmullbq_int_m_s32 (__inactive, __a, __b, __p);
27285 }
27286 
27287 __extension__ extern __inline int32x4_t
27288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m(int32x4_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27289 __arm_vmullbq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27290 {
27291  return __arm_vmullbq_int_m_s16 (__inactive, __a, __b, __p);
27292 }
27293 
27294 __extension__ extern __inline uint16x8_t
27295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m(uint16x8_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)27296 __arm_vmullbq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27297 {
27298  return __arm_vmullbq_int_m_u8 (__inactive, __a, __b, __p);
27299 }
27300 
27301 __extension__ extern __inline uint64x2_t
27302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m(uint64x2_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)27303 __arm_vmullbq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27304 {
27305  return __arm_vmullbq_int_m_u32 (__inactive, __a, __b, __p);
27306 }
27307 
27308 __extension__ extern __inline uint32x4_t
27309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_m(uint32x4_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)27310 __arm_vmullbq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27311 {
27312  return __arm_vmullbq_int_m_u16 (__inactive, __a, __b, __p);
27313 }
27314 
27315 __extension__ extern __inline int16x8_t
27316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m(int16x8_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27317 __arm_vmulltq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27318 {
27319  return __arm_vmulltq_int_m_s8 (__inactive, __a, __b, __p);
27320 }
27321 
27322 __extension__ extern __inline int64x2_t
27323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m(int64x2_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27324 __arm_vmulltq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27325 {
27326  return __arm_vmulltq_int_m_s32 (__inactive, __a, __b, __p);
27327 }
27328 
27329 __extension__ extern __inline int32x4_t
27330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m(int32x4_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27331 __arm_vmulltq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27332 {
27333  return __arm_vmulltq_int_m_s16 (__inactive, __a, __b, __p);
27334 }
27335 
27336 __extension__ extern __inline uint16x8_t
27337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m(uint16x8_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)27338 __arm_vmulltq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27339 {
27340  return __arm_vmulltq_int_m_u8 (__inactive, __a, __b, __p);
27341 }
27342 
27343 __extension__ extern __inline uint64x2_t
27344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m(uint64x2_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)27345 __arm_vmulltq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27346 {
27347  return __arm_vmulltq_int_m_u32 (__inactive, __a, __b, __p);
27348 }
27349 
27350 __extension__ extern __inline uint32x4_t
27351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_m(uint32x4_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)27352 __arm_vmulltq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27353 {
27354  return __arm_vmulltq_int_m_u16 (__inactive, __a, __b, __p);
27355 }
27356 
27357 __extension__ extern __inline int8x16_t
27358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)27359 __arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
27360 {
27361  return __arm_vmulq_m_n_s8 (__inactive, __a, __b, __p);
27362 }
27363 
27364 __extension__ extern __inline int32x4_t
27365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)27366 __arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
27367 {
27368  return __arm_vmulq_m_n_s32 (__inactive, __a, __b, __p);
27369 }
27370 
27371 __extension__ extern __inline int16x8_t
27372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)27373 __arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
27374 {
27375  return __arm_vmulq_m_n_s16 (__inactive, __a, __b, __p);
27376 }
27377 
27378 __extension__ extern __inline uint8x16_t
27379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)27380 __arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
27381 {
27382  return __arm_vmulq_m_n_u8 (__inactive, __a, __b, __p);
27383 }
27384 
27385 __extension__ extern __inline uint32x4_t
27386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)27387 __arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
27388 {
27389  return __arm_vmulq_m_n_u32 (__inactive, __a, __b, __p);
27390 }
27391 
27392 __extension__ extern __inline uint16x8_t
27393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)27394 __arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
27395 {
27396  return __arm_vmulq_m_n_u16 (__inactive, __a, __b, __p);
27397 }
27398 
27399 __extension__ extern __inline int8x16_t
27400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27401 __arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27402 {
27403  return __arm_vmulq_m_s8 (__inactive, __a, __b, __p);
27404 }
27405 
27406 __extension__ extern __inline int32x4_t
27407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27408 __arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27409 {
27410  return __arm_vmulq_m_s32 (__inactive, __a, __b, __p);
27411 }
27412 
27413 __extension__ extern __inline int16x8_t
27414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27415 __arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27416 {
27417  return __arm_vmulq_m_s16 (__inactive, __a, __b, __p);
27418 }
27419 
27420 __extension__ extern __inline uint8x16_t
27421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)27422 __arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27423 {
27424  return __arm_vmulq_m_u8 (__inactive, __a, __b, __p);
27425 }
27426 
27427 __extension__ extern __inline uint32x4_t
27428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)27429 __arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27430 {
27431  return __arm_vmulq_m_u32 (__inactive, __a, __b, __p);
27432 }
27433 
27434 __extension__ extern __inline uint16x8_t
27435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)27436 __arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27437 {
27438  return __arm_vmulq_m_u16 (__inactive, __a, __b, __p);
27439 }
27440 
27441 __extension__ extern __inline int8x16_t
27442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27443 __arm_vornq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27444 {
27445  return __arm_vornq_m_s8 (__inactive, __a, __b, __p);
27446 }
27447 
27448 __extension__ extern __inline int32x4_t
27449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27450 __arm_vornq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27451 {
27452  return __arm_vornq_m_s32 (__inactive, __a, __b, __p);
27453 }
27454 
27455 __extension__ extern __inline int16x8_t
27456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27457 __arm_vornq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27458 {
27459  return __arm_vornq_m_s16 (__inactive, __a, __b, __p);
27460 }
27461 
27462 __extension__ extern __inline uint8x16_t
27463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)27464 __arm_vornq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27465 {
27466  return __arm_vornq_m_u8 (__inactive, __a, __b, __p);
27467 }
27468 
27469 __extension__ extern __inline uint32x4_t
27470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)27471 __arm_vornq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27472 {
27473  return __arm_vornq_m_u32 (__inactive, __a, __b, __p);
27474 }
27475 
27476 __extension__ extern __inline uint16x8_t
27477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)27478 __arm_vornq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27479 {
27480  return __arm_vornq_m_u16 (__inactive, __a, __b, __p);
27481 }
27482 
27483 __extension__ extern __inline int8x16_t
27484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27485 __arm_vorrq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27486 {
27487  return __arm_vorrq_m_s8 (__inactive, __a, __b, __p);
27488 }
27489 
27490 __extension__ extern __inline int32x4_t
27491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27492 __arm_vorrq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27493 {
27494  return __arm_vorrq_m_s32 (__inactive, __a, __b, __p);
27495 }
27496 
27497 __extension__ extern __inline int16x8_t
27498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27499 __arm_vorrq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27500 {
27501  return __arm_vorrq_m_s16 (__inactive, __a, __b, __p);
27502 }
27503 
27504 __extension__ extern __inline uint8x16_t
27505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)27506 __arm_vorrq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27507 {
27508  return __arm_vorrq_m_u8 (__inactive, __a, __b, __p);
27509 }
27510 
27511 __extension__ extern __inline uint32x4_t
27512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)27513 __arm_vorrq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27514 {
27515  return __arm_vorrq_m_u32 (__inactive, __a, __b, __p);
27516 }
27517 
27518 __extension__ extern __inline uint16x8_t
27519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)27520 __arm_vorrq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27521 {
27522  return __arm_vorrq_m_u16 (__inactive, __a, __b, __p);
27523 }
27524 
27525 __extension__ extern __inline int8x16_t
27526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)27527 __arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
27528 {
27529  return __arm_vqaddq_m_n_s8 (__inactive, __a, __b, __p);
27530 }
27531 
27532 __extension__ extern __inline int32x4_t
27533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)27534 __arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
27535 {
27536  return __arm_vqaddq_m_n_s32 (__inactive, __a, __b, __p);
27537 }
27538 
27539 __extension__ extern __inline int16x8_t
27540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)27541 __arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
27542 {
27543  return __arm_vqaddq_m_n_s16 (__inactive, __a, __b, __p);
27544 }
27545 
27546 __extension__ extern __inline uint8x16_t
27547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)27548 __arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
27549 {
27550  return __arm_vqaddq_m_n_u8 (__inactive, __a, __b, __p);
27551 }
27552 
27553 __extension__ extern __inline uint32x4_t
27554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)27555 __arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
27556 {
27557  return __arm_vqaddq_m_n_u32 (__inactive, __a, __b, __p);
27558 }
27559 
27560 __extension__ extern __inline uint16x8_t
27561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)27562 __arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
27563 {
27564  return __arm_vqaddq_m_n_u16 (__inactive, __a, __b, __p);
27565 }
27566 
27567 __extension__ extern __inline int8x16_t
27568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27569 __arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27570 {
27571  return __arm_vqaddq_m_s8 (__inactive, __a, __b, __p);
27572 }
27573 
27574 __extension__ extern __inline int32x4_t
27575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27576 __arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27577 {
27578  return __arm_vqaddq_m_s32 (__inactive, __a, __b, __p);
27579 }
27580 
27581 __extension__ extern __inline int16x8_t
27582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27583 __arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27584 {
27585  return __arm_vqaddq_m_s16 (__inactive, __a, __b, __p);
27586 }
27587 
27588 __extension__ extern __inline uint8x16_t
27589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)27590 __arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27591 {
27592  return __arm_vqaddq_m_u8 (__inactive, __a, __b, __p);
27593 }
27594 
27595 __extension__ extern __inline uint32x4_t
27596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)27597 __arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27598 {
27599  return __arm_vqaddq_m_u32 (__inactive, __a, __b, __p);
27600 }
27601 
27602 __extension__ extern __inline uint16x8_t
27603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqaddq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)27604 __arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27605 {
27606  return __arm_vqaddq_m_u16 (__inactive, __a, __b, __p);
27607 }
27608 
27609 __extension__ extern __inline int8x16_t
27610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27611 __arm_vqdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27612 {
27613  return __arm_vqdmladhq_m_s8 (__inactive, __a, __b, __p);
27614 }
27615 
27616 __extension__ extern __inline int32x4_t
27617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27618 __arm_vqdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27619 {
27620  return __arm_vqdmladhq_m_s32 (__inactive, __a, __b, __p);
27621 }
27622 
27623 __extension__ extern __inline int16x8_t
27624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27625 __arm_vqdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27626 {
27627  return __arm_vqdmladhq_m_s16 (__inactive, __a, __b, __p);
27628 }
27629 
27630 __extension__ extern __inline int8x16_t
27631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27632 __arm_vqdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27633 {
27634  return __arm_vqdmladhxq_m_s8 (__inactive, __a, __b, __p);
27635 }
27636 
27637 __extension__ extern __inline int32x4_t
27638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27639 __arm_vqdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27640 {
27641  return __arm_vqdmladhxq_m_s32 (__inactive, __a, __b, __p);
27642 }
27643 
27644 __extension__ extern __inline int16x8_t
27645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmladhxq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27646 __arm_vqdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27647 {
27648  return __arm_vqdmladhxq_m_s16 (__inactive, __a, __b, __p);
27649 }
27650 
27651 __extension__ extern __inline int8x16_t
27652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq_m(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)27653 __arm_vqdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27654 {
27655  return __arm_vqdmlahq_m_n_s8 (__a, __b, __c, __p);
27656 }
27657 
27658 __extension__ extern __inline int32x4_t
27659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq_m(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)27660 __arm_vqdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27661 {
27662  return __arm_vqdmlahq_m_n_s32 (__a, __b, __c, __p);
27663 }
27664 
27665 __extension__ extern __inline int16x8_t
27666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlahq_m(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)27667 __arm_vqdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27668 {
27669  return __arm_vqdmlahq_m_n_s16 (__a, __b, __c, __p);
27670 }
27671 
27672 __extension__ extern __inline int8x16_t
27673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27674 __arm_vqdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27675 {
27676  return __arm_vqdmlsdhq_m_s8 (__inactive, __a, __b, __p);
27677 }
27678 
27679 __extension__ extern __inline int32x4_t
27680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27681 __arm_vqdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27682 {
27683  return __arm_vqdmlsdhq_m_s32 (__inactive, __a, __b, __p);
27684 }
27685 
27686 __extension__ extern __inline int16x8_t
27687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27688 __arm_vqdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27689 {
27690  return __arm_vqdmlsdhq_m_s16 (__inactive, __a, __b, __p);
27691 }
27692 
27693 __extension__ extern __inline int8x16_t
27694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27695 __arm_vqdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27696 {
27697  return __arm_vqdmlsdhxq_m_s8 (__inactive, __a, __b, __p);
27698 }
27699 
27700 __extension__ extern __inline int32x4_t
27701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27702 __arm_vqdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27703 {
27704  return __arm_vqdmlsdhxq_m_s32 (__inactive, __a, __b, __p);
27705 }
27706 
27707 __extension__ extern __inline int16x8_t
27708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlsdhxq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27709 __arm_vqdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27710 {
27711  return __arm_vqdmlsdhxq_m_s16 (__inactive, __a, __b, __p);
27712 }
27713 
27714 __extension__ extern __inline int8x16_t
27715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)27716 __arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
27717 {
27718  return __arm_vqdmulhq_m_n_s8 (__inactive, __a, __b, __p);
27719 }
27720 
27721 __extension__ extern __inline int32x4_t
27722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)27723 __arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
27724 {
27725  return __arm_vqdmulhq_m_n_s32 (__inactive, __a, __b, __p);
27726 }
27727 
27728 __extension__ extern __inline int16x8_t
27729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)27730 __arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
27731 {
27732  return __arm_vqdmulhq_m_n_s16 (__inactive, __a, __b, __p);
27733 }
27734 
27735 __extension__ extern __inline int8x16_t
27736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27737 __arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27738 {
27739  return __arm_vqdmulhq_m_s8 (__inactive, __a, __b, __p);
27740 }
27741 
27742 __extension__ extern __inline int32x4_t
27743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27744 __arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27745 {
27746  return __arm_vqdmulhq_m_s32 (__inactive, __a, __b, __p);
27747 }
27748 
27749 __extension__ extern __inline int16x8_t
27750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulhq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27751 __arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27752 {
27753  return __arm_vqdmulhq_m_s16 (__inactive, __a, __b, __p);
27754 }
27755 
27756 __extension__ extern __inline int8x16_t
27757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27758 __arm_vqrdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27759 {
27760  return __arm_vqrdmladhq_m_s8 (__inactive, __a, __b, __p);
27761 }
27762 
27763 __extension__ extern __inline int32x4_t
27764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27765 __arm_vqrdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27766 {
27767  return __arm_vqrdmladhq_m_s32 (__inactive, __a, __b, __p);
27768 }
27769 
27770 __extension__ extern __inline int16x8_t
27771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27772 __arm_vqrdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27773 {
27774  return __arm_vqrdmladhq_m_s16 (__inactive, __a, __b, __p);
27775 }
27776 
27777 __extension__ extern __inline int8x16_t
27778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27779 __arm_vqrdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27780 {
27781  return __arm_vqrdmladhxq_m_s8 (__inactive, __a, __b, __p);
27782 }
27783 
27784 __extension__ extern __inline int32x4_t
27785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27786 __arm_vqrdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27787 {
27788  return __arm_vqrdmladhxq_m_s32 (__inactive, __a, __b, __p);
27789 }
27790 
27791 __extension__ extern __inline int16x8_t
27792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmladhxq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27793 __arm_vqrdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27794 {
27795  return __arm_vqrdmladhxq_m_s16 (__inactive, __a, __b, __p);
27796 }
27797 
27798 __extension__ extern __inline int8x16_t
27799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq_m(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)27800 __arm_vqrdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27801 {
27802  return __arm_vqrdmlahq_m_n_s8 (__a, __b, __c, __p);
27803 }
27804 
27805 __extension__ extern __inline int32x4_t
27806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq_m(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)27807 __arm_vqrdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27808 {
27809  return __arm_vqrdmlahq_m_n_s32 (__a, __b, __c, __p);
27810 }
27811 
27812 __extension__ extern __inline int16x8_t
27813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlahq_m(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)27814 __arm_vqrdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27815 {
27816  return __arm_vqrdmlahq_m_n_s16 (__a, __b, __c, __p);
27817 }
27818 
27819 __extension__ extern __inline int8x16_t
27820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq_m(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)27821 __arm_vqrdmlashq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27822 {
27823  return __arm_vqrdmlashq_m_n_s8 (__a, __b, __c, __p);
27824 }
27825 
27826 __extension__ extern __inline int32x4_t
27827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq_m(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)27828 __arm_vqrdmlashq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27829 {
27830  return __arm_vqrdmlashq_m_n_s32 (__a, __b, __c, __p);
27831 }
27832 
27833 __extension__ extern __inline int16x8_t
27834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlashq_m(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)27835 __arm_vqrdmlashq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27836 {
27837  return __arm_vqrdmlashq_m_n_s16 (__a, __b, __c, __p);
27838 }
27839 
27840 __extension__ extern __inline int8x16_t
27841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq_m(int8x16_t __a,int8x16_t __b,int8_t __c,mve_pred16_t __p)27842 __arm_vqdmlashq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27843 {
27844  return __arm_vqdmlashq_m_n_s8 (__a, __b, __c, __p);
27845 }
27846 
27847 __extension__ extern __inline int16x8_t
27848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq_m(int16x8_t __a,int16x8_t __b,int16_t __c,mve_pred16_t __p)27849 __arm_vqdmlashq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27850 {
27851  return __arm_vqdmlashq_m_n_s16 (__a, __b, __c, __p);
27852 }
27853 
27854 __extension__ extern __inline int32x4_t
27855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmlashq_m(int32x4_t __a,int32x4_t __b,int32_t __c,mve_pred16_t __p)27856 __arm_vqdmlashq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27857 {
27858  return __arm_vqdmlashq_m_n_s32 (__a, __b, __c, __p);
27859 }
27860 
27861 __extension__ extern __inline int8x16_t
27862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27863 __arm_vqrdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27864 {
27865  return __arm_vqrdmlsdhq_m_s8 (__inactive, __a, __b, __p);
27866 }
27867 
27868 __extension__ extern __inline int32x4_t
27869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27870 __arm_vqrdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27871 {
27872  return __arm_vqrdmlsdhq_m_s32 (__inactive, __a, __b, __p);
27873 }
27874 
27875 __extension__ extern __inline int16x8_t
27876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27877 __arm_vqrdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27878 {
27879  return __arm_vqrdmlsdhq_m_s16 (__inactive, __a, __b, __p);
27880 }
27881 
27882 __extension__ extern __inline int8x16_t
27883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27884 __arm_vqrdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27885 {
27886  return __arm_vqrdmlsdhxq_m_s8 (__inactive, __a, __b, __p);
27887 }
27888 
27889 __extension__ extern __inline int32x4_t
27890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27891 __arm_vqrdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27892 {
27893  return __arm_vqrdmlsdhxq_m_s32 (__inactive, __a, __b, __p);
27894 }
27895 
27896 __extension__ extern __inline int16x8_t
27897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmlsdhxq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27898 __arm_vqrdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27899 {
27900  return __arm_vqrdmlsdhxq_m_s16 (__inactive, __a, __b, __p);
27901 }
27902 
27903 __extension__ extern __inline int8x16_t
27904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)27905 __arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
27906 {
27907  return __arm_vqrdmulhq_m_n_s8 (__inactive, __a, __b, __p);
27908 }
27909 
27910 __extension__ extern __inline int32x4_t
27911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)27912 __arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
27913 {
27914  return __arm_vqrdmulhq_m_n_s32 (__inactive, __a, __b, __p);
27915 }
27916 
27917 __extension__ extern __inline int16x8_t
27918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)27919 __arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
27920 {
27921  return __arm_vqrdmulhq_m_n_s16 (__inactive, __a, __b, __p);
27922 }
27923 
27924 __extension__ extern __inline int8x16_t
27925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27926 __arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27927 {
27928  return __arm_vqrdmulhq_m_s8 (__inactive, __a, __b, __p);
27929 }
27930 
27931 __extension__ extern __inline int32x4_t
27932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27933 __arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27934 {
27935  return __arm_vqrdmulhq_m_s32 (__inactive, __a, __b, __p);
27936 }
27937 
27938 __extension__ extern __inline int16x8_t
27939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrdmulhq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27940 __arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27941 {
27942  return __arm_vqrdmulhq_m_s16 (__inactive, __a, __b, __p);
27943 }
27944 
27945 __extension__ extern __inline int8x16_t
27946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)27947 __arm_vqrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27948 {
27949  return __arm_vqrshlq_m_s8 (__inactive, __a, __b, __p);
27950 }
27951 
27952 __extension__ extern __inline int32x4_t
27953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)27954 __arm_vqrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27955 {
27956  return __arm_vqrshlq_m_s32 (__inactive, __a, __b, __p);
27957 }
27958 
27959 __extension__ extern __inline int16x8_t
27960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)27961 __arm_vqrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27962 {
27963  return __arm_vqrshlq_m_s16 (__inactive, __a, __b, __p);
27964 }
27965 
27966 __extension__ extern __inline uint8x16_t
27967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m(uint8x16_t __inactive,uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)27968 __arm_vqrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27969 {
27970  return __arm_vqrshlq_m_u8 (__inactive, __a, __b, __p);
27971 }
27972 
27973 __extension__ extern __inline uint32x4_t
27974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m(uint32x4_t __inactive,uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)27975 __arm_vqrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27976 {
27977  return __arm_vqrshlq_m_u32 (__inactive, __a, __b, __p);
27978 }
27979 
27980 __extension__ extern __inline uint16x8_t
27981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshlq_m(uint16x8_t __inactive,uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)27982 __arm_vqrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27983 {
27984  return __arm_vqrshlq_m_u16 (__inactive, __a, __b, __p);
27985 }
27986 
27987 __extension__ extern __inline int8x16_t
27988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n(int8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)27989 __arm_vqshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
27990 {
27991  return __arm_vqshlq_m_n_s8 (__inactive, __a, __imm, __p);
27992 }
27993 
27994 __extension__ extern __inline int32x4_t
27995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n(int32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)27996 __arm_vqshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
27997 {
27998  return __arm_vqshlq_m_n_s32 (__inactive, __a, __imm, __p);
27999 }
28000 
28001 __extension__ extern __inline int16x8_t
28002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n(int16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)28003 __arm_vqshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28004 {
28005  return __arm_vqshlq_m_n_s16 (__inactive, __a, __imm, __p);
28006 }
28007 
28008 __extension__ extern __inline uint8x16_t
28009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n(uint8x16_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)28010 __arm_vqshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28011 {
28012  return __arm_vqshlq_m_n_u8 (__inactive, __a, __imm, __p);
28013 }
28014 
28015 __extension__ extern __inline uint32x4_t
28016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n(uint32x4_t __inactive,uint32x4_t __a,const int __imm,mve_pred16_t __p)28017 __arm_vqshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
28018 {
28019  return __arm_vqshlq_m_n_u32 (__inactive, __a, __imm, __p);
28020 }
28021 
28022 __extension__ extern __inline uint16x8_t
28023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m_n(uint16x8_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)28024 __arm_vqshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28025 {
28026  return __arm_vqshlq_m_n_u16 (__inactive, __a, __imm, __p);
28027 }
28028 
28029 __extension__ extern __inline int8x16_t
28030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)28031 __arm_vqshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28032 {
28033  return __arm_vqshlq_m_s8 (__inactive, __a, __b, __p);
28034 }
28035 
28036 __extension__ extern __inline int32x4_t
28037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)28038 __arm_vqshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28039 {
28040  return __arm_vqshlq_m_s32 (__inactive, __a, __b, __p);
28041 }
28042 
28043 __extension__ extern __inline int16x8_t
28044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)28045 __arm_vqshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28046 {
28047  return __arm_vqshlq_m_s16 (__inactive, __a, __b, __p);
28048 }
28049 
28050 __extension__ extern __inline uint8x16_t
28051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m(uint8x16_t __inactive,uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)28052 __arm_vqshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28053 {
28054  return __arm_vqshlq_m_u8 (__inactive, __a, __b, __p);
28055 }
28056 
28057 __extension__ extern __inline uint32x4_t
28058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m(uint32x4_t __inactive,uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)28059 __arm_vqshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28060 {
28061  return __arm_vqshlq_m_u32 (__inactive, __a, __b, __p);
28062 }
28063 
28064 __extension__ extern __inline uint16x8_t
28065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshlq_m(uint16x8_t __inactive,uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)28066 __arm_vqshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28067 {
28068  return __arm_vqshlq_m_u16 (__inactive, __a, __b, __p);
28069 }
28070 
28071 __extension__ extern __inline int8x16_t
28072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)28073 __arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
28074 {
28075  return __arm_vqsubq_m_n_s8 (__inactive, __a, __b, __p);
28076 }
28077 
28078 __extension__ extern __inline int32x4_t
28079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)28080 __arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
28081 {
28082  return __arm_vqsubq_m_n_s32 (__inactive, __a, __b, __p);
28083 }
28084 
28085 __extension__ extern __inline int16x8_t
28086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)28087 __arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
28088 {
28089  return __arm_vqsubq_m_n_s16 (__inactive, __a, __b, __p);
28090 }
28091 
28092 __extension__ extern __inline uint8x16_t
28093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)28094 __arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
28095 {
28096  return __arm_vqsubq_m_n_u8 (__inactive, __a, __b, __p);
28097 }
28098 
28099 __extension__ extern __inline uint32x4_t
28100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)28101 __arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
28102 {
28103  return __arm_vqsubq_m_n_u32 (__inactive, __a, __b, __p);
28104 }
28105 
28106 __extension__ extern __inline uint16x8_t
28107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)28108 __arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
28109 {
28110  return __arm_vqsubq_m_n_u16 (__inactive, __a, __b, __p);
28111 }
28112 
28113 __extension__ extern __inline int8x16_t
28114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)28115 __arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28116 {
28117  return __arm_vqsubq_m_s8 (__inactive, __a, __b, __p);
28118 }
28119 
28120 __extension__ extern __inline int32x4_t
28121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)28122 __arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28123 {
28124  return __arm_vqsubq_m_s32 (__inactive, __a, __b, __p);
28125 }
28126 
28127 __extension__ extern __inline int16x8_t
28128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)28129 __arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28130 {
28131  return __arm_vqsubq_m_s16 (__inactive, __a, __b, __p);
28132 }
28133 
28134 __extension__ extern __inline uint8x16_t
28135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)28136 __arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28137 {
28138  return __arm_vqsubq_m_u8 (__inactive, __a, __b, __p);
28139 }
28140 
28141 __extension__ extern __inline uint32x4_t
28142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)28143 __arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
28144 {
28145  return __arm_vqsubq_m_u32 (__inactive, __a, __b, __p);
28146 }
28147 
28148 __extension__ extern __inline uint16x8_t
28149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqsubq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)28150 __arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28151 {
28152  return __arm_vqsubq_m_u16 (__inactive, __a, __b, __p);
28153 }
28154 
28155 __extension__ extern __inline int8x16_t
28156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)28157 __arm_vrhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28158 {
28159  return __arm_vrhaddq_m_s8 (__inactive, __a, __b, __p);
28160 }
28161 
28162 __extension__ extern __inline int32x4_t
28163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)28164 __arm_vrhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28165 {
28166  return __arm_vrhaddq_m_s32 (__inactive, __a, __b, __p);
28167 }
28168 
28169 __extension__ extern __inline int16x8_t
28170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)28171 __arm_vrhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28172 {
28173  return __arm_vrhaddq_m_s16 (__inactive, __a, __b, __p);
28174 }
28175 
28176 __extension__ extern __inline uint8x16_t
28177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)28178 __arm_vrhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28179 {
28180  return __arm_vrhaddq_m_u8 (__inactive, __a, __b, __p);
28181 }
28182 
28183 __extension__ extern __inline uint32x4_t
28184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)28185 __arm_vrhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
28186 {
28187  return __arm_vrhaddq_m_u32 (__inactive, __a, __b, __p);
28188 }
28189 
28190 __extension__ extern __inline uint16x8_t
28191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)28192 __arm_vrhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28193 {
28194  return __arm_vrhaddq_m_u16 (__inactive, __a, __b, __p);
28195 }
28196 
28197 __extension__ extern __inline int8x16_t
28198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)28199 __arm_vrmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28200 {
28201  return __arm_vrmulhq_m_s8 (__inactive, __a, __b, __p);
28202 }
28203 
28204 __extension__ extern __inline int32x4_t
28205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)28206 __arm_vrmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28207 {
28208  return __arm_vrmulhq_m_s32 (__inactive, __a, __b, __p);
28209 }
28210 
28211 __extension__ extern __inline int16x8_t
28212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)28213 __arm_vrmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28214 {
28215  return __arm_vrmulhq_m_s16 (__inactive, __a, __b, __p);
28216 }
28217 
28218 __extension__ extern __inline uint8x16_t
28219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m(uint8x16_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)28220 __arm_vrmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28221 {
28222  return __arm_vrmulhq_m_u8 (__inactive, __a, __b, __p);
28223 }
28224 
28225 __extension__ extern __inline uint32x4_t
28226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)28227 __arm_vrmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
28228 {
28229  return __arm_vrmulhq_m_u32 (__inactive, __a, __b, __p);
28230 }
28231 
28232 __extension__ extern __inline uint16x8_t
28233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_m(uint16x8_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)28234 __arm_vrmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28235 {
28236  return __arm_vrmulhq_m_u16 (__inactive, __a, __b, __p);
28237 }
28238 
28239 __extension__ extern __inline int8x16_t
28240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m(int8x16_t __inactive,int8x16_t __a,int8x16_t __b,mve_pred16_t __p)28241 __arm_vrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28242 {
28243  return __arm_vrshlq_m_s8 (__inactive, __a, __b, __p);
28244 }
28245 
28246 __extension__ extern __inline int32x4_t
28247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)28248 __arm_vrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28249 {
28250  return __arm_vrshlq_m_s32 (__inactive, __a, __b, __p);
28251 }
28252 
28253 __extension__ extern __inline int16x8_t
28254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m(int16x8_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)28255 __arm_vrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28256 {
28257  return __arm_vrshlq_m_s16 (__inactive, __a, __b, __p);
28258 }
28259 
28260 __extension__ extern __inline uint8x16_t
28261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m(uint8x16_t __inactive,uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)28262 __arm_vrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28263 {
28264  return __arm_vrshlq_m_u8 (__inactive, __a, __b, __p);
28265 }
28266 
28267 __extension__ extern __inline uint32x4_t
28268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m(uint32x4_t __inactive,uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)28269 __arm_vrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28270 {
28271  return __arm_vrshlq_m_u32 (__inactive, __a, __b, __p);
28272 }
28273 
28274 __extension__ extern __inline uint16x8_t
28275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_m(uint16x8_t __inactive,uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)28276 __arm_vrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28277 {
28278  return __arm_vrshlq_m_u16 (__inactive, __a, __b, __p);
28279 }
28280 
28281 __extension__ extern __inline int8x16_t
28282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m(int8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)28283 __arm_vrshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28284 {
28285  return __arm_vrshrq_m_n_s8 (__inactive, __a, __imm, __p);
28286 }
28287 
28288 __extension__ extern __inline int32x4_t
28289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m(int32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)28290 __arm_vrshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
28291 {
28292  return __arm_vrshrq_m_n_s32 (__inactive, __a, __imm, __p);
28293 }
28294 
28295 __extension__ extern __inline int16x8_t
28296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m(int16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)28297 __arm_vrshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28298 {
28299  return __arm_vrshrq_m_n_s16 (__inactive, __a, __imm, __p);
28300 }
28301 
28302 __extension__ extern __inline uint8x16_t
28303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m(uint8x16_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)28304 __arm_vrshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28305 {
28306  return __arm_vrshrq_m_n_u8 (__inactive, __a, __imm, __p);
28307 }
28308 
28309 __extension__ extern __inline uint32x4_t
28310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m(uint32x4_t __inactive,uint32x4_t __a,const int __imm,mve_pred16_t __p)28311 __arm_vrshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
28312 {
28313  return __arm_vrshrq_m_n_u32 (__inactive, __a, __imm, __p);
28314 }
28315 
28316 __extension__ extern __inline uint16x8_t
28317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_m(uint16x8_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)28318 __arm_vrshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28319 {
28320  return __arm_vrshrq_m_n_u16 (__inactive, __a, __imm, __p);
28321 }
28322 
28323 __extension__ extern __inline int8x16_t
28324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n(int8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)28325 __arm_vshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28326 {
28327  return __arm_vshlq_m_n_s8 (__inactive, __a, __imm, __p);
28328 }
28329 
28330 __extension__ extern __inline int32x4_t
28331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n(int32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)28332 __arm_vshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
28333 {
28334  return __arm_vshlq_m_n_s32 (__inactive, __a, __imm, __p);
28335 }
28336 
28337 __extension__ extern __inline int16x8_t
28338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n(int16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)28339 __arm_vshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28340 {
28341  return __arm_vshlq_m_n_s16 (__inactive, __a, __imm, __p);
28342 }
28343 
28344 __extension__ extern __inline uint8x16_t
28345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n(uint8x16_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)28346 __arm_vshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28347 {
28348  return __arm_vshlq_m_n_u8 (__inactive, __a, __imm, __p);
28349 }
28350 
28351 __extension__ extern __inline uint32x4_t
28352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n(uint32x4_t __inactive,uint32x4_t __a,const int __imm,mve_pred16_t __p)28353 __arm_vshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
28354 {
28355  return __arm_vshlq_m_n_u32 (__inactive, __a, __imm, __p);
28356 }
28357 
28358 __extension__ extern __inline uint16x8_t
28359 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_m_n(uint16x8_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)28360 __arm_vshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28361 {
28362  return __arm_vshlq_m_n_u16 (__inactive, __a, __imm, __p);
28363 }
28364 
28365 __extension__ extern __inline int8x16_t
28366 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m(int8x16_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)28367 __arm_vshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28368 {
28369  return __arm_vshrq_m_n_s8 (__inactive, __a, __imm, __p);
28370 }
28371 
28372 __extension__ extern __inline int32x4_t
28373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m(int32x4_t __inactive,int32x4_t __a,const int __imm,mve_pred16_t __p)28374 __arm_vshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
28375 {
28376  return __arm_vshrq_m_n_s32 (__inactive, __a, __imm, __p);
28377 }
28378 
28379 __extension__ extern __inline int16x8_t
28380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m(int16x8_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)28381 __arm_vshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28382 {
28383  return __arm_vshrq_m_n_s16 (__inactive, __a, __imm, __p);
28384 }
28385 
28386 __extension__ extern __inline uint8x16_t
28387 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m(uint8x16_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)28388 __arm_vshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28389 {
28390  return __arm_vshrq_m_n_u8 (__inactive, __a, __imm, __p);
28391 }
28392 
28393 __extension__ extern __inline uint32x4_t
28394 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m(uint32x4_t __inactive,uint32x4_t __a,const int __imm,mve_pred16_t __p)28395 __arm_vshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
28396 {
28397  return __arm_vshrq_m_n_u32 (__inactive, __a, __imm, __p);
28398 }
28399 
28400 __extension__ extern __inline uint16x8_t
28401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_m(uint16x8_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)28402 __arm_vshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28403 {
28404  return __arm_vshrq_m_n_u16 (__inactive, __a, __imm, __p);
28405 }
28406 
28407 __extension__ extern __inline int8x16_t
28408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m(int8x16_t __a,int8x16_t __b,const int __imm,mve_pred16_t __p)28409 __arm_vsliq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
28410 {
28411  return __arm_vsliq_m_n_s8 (__a, __b, __imm, __p);
28412 }
28413 
28414 __extension__ extern __inline int32x4_t
28415 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m(int32x4_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28416 __arm_vsliq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28417 {
28418  return __arm_vsliq_m_n_s32 (__a, __b, __imm, __p);
28419 }
28420 
28421 __extension__ extern __inline int16x8_t
28422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m(int16x8_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28423 __arm_vsliq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28424 {
28425  return __arm_vsliq_m_n_s16 (__a, __b, __imm, __p);
28426 }
28427 
28428 __extension__ extern __inline uint8x16_t
28429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m(uint8x16_t __a,uint8x16_t __b,const int __imm,mve_pred16_t __p)28430 __arm_vsliq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
28431 {
28432  return __arm_vsliq_m_n_u8 (__a, __b, __imm, __p);
28433 }
28434 
28435 __extension__ extern __inline uint32x4_t
28436 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m(uint32x4_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)28437 __arm_vsliq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28438 {
28439  return __arm_vsliq_m_n_u32 (__a, __b, __imm, __p);
28440 }
28441 
28442 __extension__ extern __inline uint16x8_t
28443 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsliq_m(uint16x8_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)28444 __arm_vsliq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28445 {
28446  return __arm_vsliq_m_n_u16 (__a, __b, __imm, __p);
28447 }
28448 
28449 __extension__ extern __inline int8x16_t
28450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(int8x16_t __inactive,int8x16_t __a,int8_t __b,mve_pred16_t __p)28451 __arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
28452 {
28453  return __arm_vsubq_m_n_s8 (__inactive, __a, __b, __p);
28454 }
28455 
28456 __extension__ extern __inline int32x4_t
28457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(int32x4_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)28458 __arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
28459 {
28460  return __arm_vsubq_m_n_s32 (__inactive, __a, __b, __p);
28461 }
28462 
28463 __extension__ extern __inline int16x8_t
28464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(int16x8_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)28465 __arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
28466 {
28467  return __arm_vsubq_m_n_s16 (__inactive, __a, __b, __p);
28468 }
28469 
28470 __extension__ extern __inline uint8x16_t
28471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(uint8x16_t __inactive,uint8x16_t __a,uint8_t __b,mve_pred16_t __p)28472 __arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
28473 {
28474  return __arm_vsubq_m_n_u8 (__inactive, __a, __b, __p);
28475 }
28476 
28477 __extension__ extern __inline uint32x4_t
28478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(uint32x4_t __inactive,uint32x4_t __a,uint32_t __b,mve_pred16_t __p)28479 __arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
28480 {
28481  return __arm_vsubq_m_n_u32 (__inactive, __a, __b, __p);
28482 }
28483 
28484 __extension__ extern __inline uint16x8_t
28485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(uint16x8_t __inactive,uint16x8_t __a,uint16_t __b,mve_pred16_t __p)28486 __arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
28487 {
28488  return __arm_vsubq_m_n_u16 (__inactive, __a, __b, __p);
28489 }
28490 
28491 __extension__ extern __inline int64_t
28492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_p(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)28493 __arm_vmlaldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28494 {
28495  return __arm_vmlaldavaq_p_s32 (__a, __b, __c, __p);
28496 }
28497 
28498 __extension__ extern __inline int64_t
28499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_p(int64_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)28500 __arm_vmlaldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
28501 {
28502  return __arm_vmlaldavaq_p_s16 (__a, __b, __c, __p);
28503 }
28504 
28505 __extension__ extern __inline uint64_t
28506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_p(uint64_t __a,uint32x4_t __b,uint32x4_t __c,mve_pred16_t __p)28507 __arm_vmlaldavaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
28508 {
28509  return __arm_vmlaldavaq_p_u32 (__a, __b, __c, __p);
28510 }
28511 
28512 __extension__ extern __inline uint64_t
28513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaq_p(uint64_t __a,uint16x8_t __b,uint16x8_t __c,mve_pred16_t __p)28514 __arm_vmlaldavaq_p (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
28515 {
28516  return __arm_vmlaldavaq_p_u16 (__a, __b, __c, __p);
28517 }
28518 
28519 __extension__ extern __inline int64_t
28520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaxq_p(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)28521 __arm_vmlaldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28522 {
28523  return __arm_vmlaldavaxq_p_s32 (__a, __b, __c, __p);
28524 }
28525 
28526 __extension__ extern __inline int64_t
28527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlaldavaxq_p(int64_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)28528 __arm_vmlaldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
28529 {
28530  return __arm_vmlaldavaxq_p_s16 (__a, __b, __c, __p);
28531 }
28532 
28533 __extension__ extern __inline int64_t
28534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaq_p(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)28535 __arm_vmlsldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28536 {
28537  return __arm_vmlsldavaq_p_s32 (__a, __b, __c, __p);
28538 }
28539 
28540 __extension__ extern __inline int64_t
28541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaq_p(int64_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)28542 __arm_vmlsldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
28543 {
28544  return __arm_vmlsldavaq_p_s16 (__a, __b, __c, __p);
28545 }
28546 
28547 __extension__ extern __inline int64_t
28548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaxq_p(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)28549 __arm_vmlsldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28550 {
28551  return __arm_vmlsldavaxq_p_s32 (__a, __b, __c, __p);
28552 }
28553 
28554 __extension__ extern __inline int64_t
28555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmlsldavaxq_p(int64_t __a,int16x8_t __b,int16x8_t __c,mve_pred16_t __p)28556 __arm_vmlsldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
28557 {
28558  return __arm_vmlsldavaxq_p_s16 (__a, __b, __c, __p);
28559 }
28560 
28561 __extension__ extern __inline uint16x8_t
28562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_m(uint16x8_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)28563 __arm_vmullbq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28564 {
28565  return __arm_vmullbq_poly_m_p8 (__inactive, __a, __b, __p);
28566 }
28567 
28568 __extension__ extern __inline uint32x4_t
28569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_m(uint32x4_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)28570 __arm_vmullbq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28571 {
28572  return __arm_vmullbq_poly_m_p16 (__inactive, __a, __b, __p);
28573 }
28574 
28575 __extension__ extern __inline uint16x8_t
28576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_m(uint16x8_t __inactive,uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)28577 __arm_vmulltq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28578 {
28579  return __arm_vmulltq_poly_m_p8 (__inactive, __a, __b, __p);
28580 }
28581 
28582 __extension__ extern __inline uint32x4_t
28583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_m(uint32x4_t __inactive,uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)28584 __arm_vmulltq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28585 {
28586  return __arm_vmulltq_poly_m_p16 (__inactive, __a, __b, __p);
28587 }
28588 
28589 __extension__ extern __inline int64x2_t
28590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_m(int64x2_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)28591 __arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
28592 {
28593  return __arm_vqdmullbq_m_n_s32 (__inactive, __a, __b, __p);
28594 }
28595 
28596 __extension__ extern __inline int32x4_t
28597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_m(int32x4_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)28598 __arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
28599 {
28600  return __arm_vqdmullbq_m_n_s16 (__inactive, __a, __b, __p);
28601 }
28602 
28603 __extension__ extern __inline int64x2_t
28604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_m(int64x2_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)28605 __arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28606 {
28607  return __arm_vqdmullbq_m_s32 (__inactive, __a, __b, __p);
28608 }
28609 
28610 __extension__ extern __inline int32x4_t
28611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmullbq_m(int32x4_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)28612 __arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28613 {
28614  return __arm_vqdmullbq_m_s16 (__inactive, __a, __b, __p);
28615 }
28616 
28617 __extension__ extern __inline int64x2_t
28618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_m(int64x2_t __inactive,int32x4_t __a,int32_t __b,mve_pred16_t __p)28619 __arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
28620 {
28621  return __arm_vqdmulltq_m_n_s32 (__inactive, __a, __b, __p);
28622 }
28623 
28624 __extension__ extern __inline int32x4_t
28625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_m(int32x4_t __inactive,int16x8_t __a,int16_t __b,mve_pred16_t __p)28626 __arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
28627 {
28628  return __arm_vqdmulltq_m_n_s16 (__inactive, __a, __b, __p);
28629 }
28630 
28631 __extension__ extern __inline int64x2_t
28632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_m(int64x2_t __inactive,int32x4_t __a,int32x4_t __b,mve_pred16_t __p)28633 __arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28634 {
28635  return __arm_vqdmulltq_m_s32 (__inactive, __a, __b, __p);
28636 }
28637 
28638 __extension__ extern __inline int32x4_t
28639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqdmulltq_m(int32x4_t __inactive,int16x8_t __a,int16x8_t __b,mve_pred16_t __p)28640 __arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28641 {
28642  return __arm_vqdmulltq_m_s16 (__inactive, __a, __b, __p);
28643 }
28644 
28645 __extension__ extern __inline int16x8_t
28646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_m(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28647 __arm_vqrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28648 {
28649  return __arm_vqrshrnbq_m_n_s32 (__a, __b, __imm, __p);
28650 }
28651 
28652 __extension__ extern __inline int8x16_t
28653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_m(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28654 __arm_vqrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28655 {
28656  return __arm_vqrshrnbq_m_n_s16 (__a, __b, __imm, __p);
28657 }
28658 
28659 __extension__ extern __inline uint16x8_t
28660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_m(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)28661 __arm_vqrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28662 {
28663  return __arm_vqrshrnbq_m_n_u32 (__a, __b, __imm, __p);
28664 }
28665 
28666 __extension__ extern __inline uint8x16_t
28667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrnbq_m(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)28668 __arm_vqrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28669 {
28670  return __arm_vqrshrnbq_m_n_u16 (__a, __b, __imm, __p);
28671 }
28672 
28673 __extension__ extern __inline int16x8_t
28674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_m(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28675 __arm_vqrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28676 {
28677  return __arm_vqrshrntq_m_n_s32 (__a, __b, __imm, __p);
28678 }
28679 
28680 __extension__ extern __inline int8x16_t
28681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_m(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28682 __arm_vqrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28683 {
28684  return __arm_vqrshrntq_m_n_s16 (__a, __b, __imm, __p);
28685 }
28686 
28687 __extension__ extern __inline uint16x8_t
28688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_m(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)28689 __arm_vqrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28690 {
28691  return __arm_vqrshrntq_m_n_u32 (__a, __b, __imm, __p);
28692 }
28693 
28694 __extension__ extern __inline uint8x16_t
28695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrntq_m(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)28696 __arm_vqrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28697 {
28698  return __arm_vqrshrntq_m_n_u16 (__a, __b, __imm, __p);
28699 }
28700 
28701 __extension__ extern __inline uint16x8_t
28702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrunbq_m(uint16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28703 __arm_vqrshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28704 {
28705  return __arm_vqrshrunbq_m_n_s32 (__a, __b, __imm, __p);
28706 }
28707 
28708 __extension__ extern __inline uint8x16_t
28709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshrunbq_m(uint8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28710 __arm_vqrshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28711 {
28712  return __arm_vqrshrunbq_m_n_s16 (__a, __b, __imm, __p);
28713 }
28714 
28715 __extension__ extern __inline uint16x8_t
28716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshruntq_m(uint16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28717 __arm_vqrshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28718 {
28719  return __arm_vqrshruntq_m_n_s32 (__a, __b, __imm, __p);
28720 }
28721 
28722 __extension__ extern __inline uint8x16_t
28723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqrshruntq_m(uint8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28724 __arm_vqrshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28725 {
28726  return __arm_vqrshruntq_m_n_s16 (__a, __b, __imm, __p);
28727 }
28728 
28729 __extension__ extern __inline int16x8_t
28730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_m(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28731 __arm_vqshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28732 {
28733  return __arm_vqshrnbq_m_n_s32 (__a, __b, __imm, __p);
28734 }
28735 
28736 __extension__ extern __inline int8x16_t
28737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_m(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28738 __arm_vqshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28739 {
28740  return __arm_vqshrnbq_m_n_s16 (__a, __b, __imm, __p);
28741 }
28742 
28743 __extension__ extern __inline uint16x8_t
28744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_m(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)28745 __arm_vqshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28746 {
28747  return __arm_vqshrnbq_m_n_u32 (__a, __b, __imm, __p);
28748 }
28749 
28750 __extension__ extern __inline uint8x16_t
28751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrnbq_m(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)28752 __arm_vqshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28753 {
28754  return __arm_vqshrnbq_m_n_u16 (__a, __b, __imm, __p);
28755 }
28756 
28757 __extension__ extern __inline int16x8_t
28758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_m(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28759 __arm_vqshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28760 {
28761  return __arm_vqshrntq_m_n_s32 (__a, __b, __imm, __p);
28762 }
28763 
28764 __extension__ extern __inline int8x16_t
28765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_m(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28766 __arm_vqshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28767 {
28768  return __arm_vqshrntq_m_n_s16 (__a, __b, __imm, __p);
28769 }
28770 
28771 __extension__ extern __inline uint16x8_t
28772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_m(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)28773 __arm_vqshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28774 {
28775  return __arm_vqshrntq_m_n_u32 (__a, __b, __imm, __p);
28776 }
28777 
28778 __extension__ extern __inline uint8x16_t
28779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrntq_m(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)28780 __arm_vqshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28781 {
28782  return __arm_vqshrntq_m_n_u16 (__a, __b, __imm, __p);
28783 }
28784 
28785 __extension__ extern __inline uint16x8_t
28786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrunbq_m(uint16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28787 __arm_vqshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28788 {
28789  return __arm_vqshrunbq_m_n_s32 (__a, __b, __imm, __p);
28790 }
28791 
28792 __extension__ extern __inline uint8x16_t
28793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshrunbq_m(uint8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28794 __arm_vqshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28795 {
28796  return __arm_vqshrunbq_m_n_s16 (__a, __b, __imm, __p);
28797 }
28798 
28799 __extension__ extern __inline uint16x8_t
28800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshruntq_m(uint16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28801 __arm_vqshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28802 {
28803  return __arm_vqshruntq_m_n_s32 (__a, __b, __imm, __p);
28804 }
28805 
28806 __extension__ extern __inline uint8x16_t
28807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vqshruntq_m(uint8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28808 __arm_vqshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28809 {
28810  return __arm_vqshruntq_m_n_s16 (__a, __b, __imm, __p);
28811 }
28812 
28813 __extension__ extern __inline int64_t
28814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaq_p(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)28815 __arm_vrmlaldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28816 {
28817  return __arm_vrmlaldavhaq_p_s32 (__a, __b, __c, __p);
28818 }
28819 
28820 __extension__ extern __inline uint64_t
28821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaq_p(uint64_t __a,uint32x4_t __b,uint32x4_t __c,mve_pred16_t __p)28822 __arm_vrmlaldavhaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
28823 {
28824  return __arm_vrmlaldavhaq_p_u32 (__a, __b, __c, __p);
28825 }
28826 
28827 __extension__ extern __inline int64_t
28828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlaldavhaxq_p(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)28829 __arm_vrmlaldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28830 {
28831  return __arm_vrmlaldavhaxq_p_s32 (__a, __b, __c, __p);
28832 }
28833 
28834 __extension__ extern __inline int64_t
28835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhaq_p(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)28836 __arm_vrmlsldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28837 {
28838  return __arm_vrmlsldavhaq_p_s32 (__a, __b, __c, __p);
28839 }
28840 
28841 __extension__ extern __inline int64_t
28842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmlsldavhaxq_p(int64_t __a,int32x4_t __b,int32x4_t __c,mve_pred16_t __p)28843 __arm_vrmlsldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28844 {
28845  return __arm_vrmlsldavhaxq_p_s32 (__a, __b, __c, __p);
28846 }
28847 
28848 __extension__ extern __inline int16x8_t
28849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_m(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28850 __arm_vrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28851 {
28852  return __arm_vrshrnbq_m_n_s32 (__a, __b, __imm, __p);
28853 }
28854 
28855 __extension__ extern __inline int8x16_t
28856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_m(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28857 __arm_vrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28858 {
28859  return __arm_vrshrnbq_m_n_s16 (__a, __b, __imm, __p);
28860 }
28861 
28862 __extension__ extern __inline uint16x8_t
28863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_m(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)28864 __arm_vrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28865 {
28866  return __arm_vrshrnbq_m_n_u32 (__a, __b, __imm, __p);
28867 }
28868 
28869 __extension__ extern __inline uint8x16_t
28870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrnbq_m(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)28871 __arm_vrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28872 {
28873  return __arm_vrshrnbq_m_n_u16 (__a, __b, __imm, __p);
28874 }
28875 
28876 __extension__ extern __inline int16x8_t
28877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_m(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28878 __arm_vrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28879 {
28880  return __arm_vrshrntq_m_n_s32 (__a, __b, __imm, __p);
28881 }
28882 
28883 __extension__ extern __inline int8x16_t
28884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_m(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28885 __arm_vrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28886 {
28887  return __arm_vrshrntq_m_n_s16 (__a, __b, __imm, __p);
28888 }
28889 
28890 __extension__ extern __inline uint16x8_t
28891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_m(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)28892 __arm_vrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28893 {
28894  return __arm_vrshrntq_m_n_u32 (__a, __b, __imm, __p);
28895 }
28896 
28897 __extension__ extern __inline uint8x16_t
28898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrntq_m(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)28899 __arm_vrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28900 {
28901  return __arm_vrshrntq_m_n_u16 (__a, __b, __imm, __p);
28902 }
28903 
28904 __extension__ extern __inline int16x8_t
28905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_m(int16x8_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)28906 __arm_vshllbq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28907 {
28908  return __arm_vshllbq_m_n_s8 (__inactive, __a, __imm, __p);
28909 }
28910 
28911 __extension__ extern __inline int32x4_t
28912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_m(int32x4_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)28913 __arm_vshllbq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28914 {
28915  return __arm_vshllbq_m_n_s16 (__inactive, __a, __imm, __p);
28916 }
28917 
28918 __extension__ extern __inline uint16x8_t
28919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_m(uint16x8_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)28920 __arm_vshllbq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28921 {
28922  return __arm_vshllbq_m_n_u8 (__inactive, __a, __imm, __p);
28923 }
28924 
28925 __extension__ extern __inline uint32x4_t
28926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_m(uint32x4_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)28927 __arm_vshllbq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28928 {
28929  return __arm_vshllbq_m_n_u16 (__inactive, __a, __imm, __p);
28930 }
28931 
28932 __extension__ extern __inline int16x8_t
28933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_m(int16x8_t __inactive,int8x16_t __a,const int __imm,mve_pred16_t __p)28934 __arm_vshlltq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28935 {
28936  return __arm_vshlltq_m_n_s8 (__inactive, __a, __imm, __p);
28937 }
28938 
28939 __extension__ extern __inline int32x4_t
28940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_m(int32x4_t __inactive,int16x8_t __a,const int __imm,mve_pred16_t __p)28941 __arm_vshlltq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28942 {
28943  return __arm_vshlltq_m_n_s16 (__inactive, __a, __imm, __p);
28944 }
28945 
28946 __extension__ extern __inline uint16x8_t
28947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_m(uint16x8_t __inactive,uint8x16_t __a,const int __imm,mve_pred16_t __p)28948 __arm_vshlltq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28949 {
28950  return __arm_vshlltq_m_n_u8 (__inactive, __a, __imm, __p);
28951 }
28952 
28953 __extension__ extern __inline uint32x4_t
28954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_m(uint32x4_t __inactive,uint16x8_t __a,const int __imm,mve_pred16_t __p)28955 __arm_vshlltq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28956 {
28957  return __arm_vshlltq_m_n_u16 (__inactive, __a, __imm, __p);
28958 }
28959 
28960 __extension__ extern __inline int16x8_t
28961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_m(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28962 __arm_vshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28963 {
28964  return __arm_vshrnbq_m_n_s32 (__a, __b, __imm, __p);
28965 }
28966 
28967 __extension__ extern __inline int8x16_t
28968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_m(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28969 __arm_vshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28970 {
28971  return __arm_vshrnbq_m_n_s16 (__a, __b, __imm, __p);
28972 }
28973 
28974 __extension__ extern __inline uint16x8_t
28975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_m(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)28976 __arm_vshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28977 {
28978  return __arm_vshrnbq_m_n_u32 (__a, __b, __imm, __p);
28979 }
28980 
28981 __extension__ extern __inline uint8x16_t
28982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrnbq_m(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)28983 __arm_vshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28984 {
28985  return __arm_vshrnbq_m_n_u16 (__a, __b, __imm, __p);
28986 }
28987 
28988 __extension__ extern __inline int16x8_t
28989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_m(int16x8_t __a,int32x4_t __b,const int __imm,mve_pred16_t __p)28990 __arm_vshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28991 {
28992  return __arm_vshrntq_m_n_s32 (__a, __b, __imm, __p);
28993 }
28994 
28995 __extension__ extern __inline int8x16_t
28996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_m(int8x16_t __a,int16x8_t __b,const int __imm,mve_pred16_t __p)28997 __arm_vshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28998 {
28999  return __arm_vshrntq_m_n_s16 (__a, __b, __imm, __p);
29000 }
29001 
29002 __extension__ extern __inline uint16x8_t
29003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_m(uint16x8_t __a,uint32x4_t __b,const int __imm,mve_pred16_t __p)29004 __arm_vshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
29005 {
29006  return __arm_vshrntq_m_n_u32 (__a, __b, __imm, __p);
29007 }
29008 
29009 __extension__ extern __inline uint8x16_t
29010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrntq_m(uint8x16_t __a,uint16x8_t __b,const int __imm,mve_pred16_t __p)29011 __arm_vshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
29012 {
29013  return __arm_vshrntq_m_n_u16 (__a, __b, __imm, __p);
29014 }
29015 
29016 __extension__ extern __inline void
29017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset(int8_t * __base,uint8x16_t __offset,int8x16_t __value)29018 __arm_vstrbq_scatter_offset (int8_t * __base, uint8x16_t __offset, int8x16_t __value)
29019 {
29020  __arm_vstrbq_scatter_offset_s8 (__base, __offset, __value);
29021 }
29022 
29023 __extension__ extern __inline void
29024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset(int8_t * __base,uint32x4_t __offset,int32x4_t __value)29025 __arm_vstrbq_scatter_offset (int8_t * __base, uint32x4_t __offset, int32x4_t __value)
29026 {
29027  __arm_vstrbq_scatter_offset_s32 (__base, __offset, __value);
29028 }
29029 
29030 __extension__ extern __inline void
29031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset(int8_t * __base,uint16x8_t __offset,int16x8_t __value)29032 __arm_vstrbq_scatter_offset (int8_t * __base, uint16x8_t __offset, int16x8_t __value)
29033 {
29034  __arm_vstrbq_scatter_offset_s16 (__base, __offset, __value);
29035 }
29036 
29037 __extension__ extern __inline void
29038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset(uint8_t * __base,uint8x16_t __offset,uint8x16_t __value)29039 __arm_vstrbq_scatter_offset (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value)
29040 {
29041  __arm_vstrbq_scatter_offset_u8 (__base, __offset, __value);
29042 }
29043 
29044 __extension__ extern __inline void
29045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset(uint8_t * __base,uint32x4_t __offset,uint32x4_t __value)29046 __arm_vstrbq_scatter_offset (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value)
29047 {
29048  __arm_vstrbq_scatter_offset_u32 (__base, __offset, __value);
29049 }
29050 
29051 __extension__ extern __inline void
29052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset(uint8_t * __base,uint16x8_t __offset,uint16x8_t __value)29053 __arm_vstrbq_scatter_offset (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value)
29054 {
29055  __arm_vstrbq_scatter_offset_u16 (__base, __offset, __value);
29056 }
29057 
29058 __extension__ extern __inline void
29059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq(int8_t * __addr,int8x16_t __value)29060 __arm_vstrbq (int8_t * __addr, int8x16_t __value)
29061 {
29062  __arm_vstrbq_s8 (__addr, __value);
29063 }
29064 
29065 __extension__ extern __inline void
29066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq(int8_t * __addr,int32x4_t __value)29067 __arm_vstrbq (int8_t * __addr, int32x4_t __value)
29068 {
29069  __arm_vstrbq_s32 (__addr, __value);
29070 }
29071 
29072 __extension__ extern __inline void
29073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq(int8_t * __addr,int16x8_t __value)29074 __arm_vstrbq (int8_t * __addr, int16x8_t __value)
29075 {
29076  __arm_vstrbq_s16 (__addr, __value);
29077 }
29078 
29079 __extension__ extern __inline void
29080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq(uint8_t * __addr,uint8x16_t __value)29081 __arm_vstrbq (uint8_t * __addr, uint8x16_t __value)
29082 {
29083  __arm_vstrbq_u8 (__addr, __value);
29084 }
29085 
29086 __extension__ extern __inline void
29087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq(uint8_t * __addr,uint32x4_t __value)29088 __arm_vstrbq (uint8_t * __addr, uint32x4_t __value)
29089 {
29090  __arm_vstrbq_u32 (__addr, __value);
29091 }
29092 
29093 __extension__ extern __inline void
29094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq(uint8_t * __addr,uint16x8_t __value)29095 __arm_vstrbq (uint8_t * __addr, uint16x8_t __value)
29096 {
29097  __arm_vstrbq_u16 (__addr, __value);
29098 }
29099 
29100 __extension__ extern __inline void
29101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base(uint32x4_t __addr,const int __offset,int32x4_t __value)29102 __arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, int32x4_t __value)
29103 {
29104  __arm_vstrwq_scatter_base_s32 (__addr, __offset, __value);
29105 }
29106 
29107 __extension__ extern __inline void
29108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base(uint32x4_t __addr,const int __offset,uint32x4_t __value)29109 __arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, uint32x4_t __value)
29110 {
29111  __arm_vstrwq_scatter_base_u32 (__addr, __offset, __value);
29112 }
29113 
29114 __extension__ extern __inline uint8x16_t
29115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset(uint8_t const * __base,uint8x16_t __offset)29116 __arm_vldrbq_gather_offset (uint8_t const * __base, uint8x16_t __offset)
29117 {
29118  return __arm_vldrbq_gather_offset_u8 (__base, __offset);
29119 }
29120 
29121 __extension__ extern __inline int8x16_t
29122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset(int8_t const * __base,uint8x16_t __offset)29123 __arm_vldrbq_gather_offset (int8_t const * __base, uint8x16_t __offset)
29124 {
29125  return __arm_vldrbq_gather_offset_s8 (__base, __offset);
29126 }
29127 
29128 __extension__ extern __inline uint16x8_t
29129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset(uint8_t const * __base,uint16x8_t __offset)29130 __arm_vldrbq_gather_offset (uint8_t const * __base, uint16x8_t __offset)
29131 {
29132  return __arm_vldrbq_gather_offset_u16 (__base, __offset);
29133 }
29134 
29135 __extension__ extern __inline int16x8_t
29136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset(int8_t const * __base,uint16x8_t __offset)29137 __arm_vldrbq_gather_offset (int8_t const * __base, uint16x8_t __offset)
29138 {
29139  return __arm_vldrbq_gather_offset_s16 (__base, __offset);
29140 }
29141 
29142 __extension__ extern __inline uint32x4_t
29143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset(uint8_t const * __base,uint32x4_t __offset)29144 __arm_vldrbq_gather_offset (uint8_t const * __base, uint32x4_t __offset)
29145 {
29146  return __arm_vldrbq_gather_offset_u32 (__base, __offset);
29147 }
29148 
29149 __extension__ extern __inline int32x4_t
29150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset(int8_t const * __base,uint32x4_t __offset)29151 __arm_vldrbq_gather_offset (int8_t const * __base, uint32x4_t __offset)
29152 {
29153  return __arm_vldrbq_gather_offset_s32 (__base, __offset);
29154 }
29155 
29156 __extension__ extern __inline void
29157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p(int8_t * __addr,int8x16_t __value,mve_pred16_t __p)29158 __arm_vstrbq_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
29159 {
29160  __arm_vstrbq_p_s8 (__addr, __value, __p);
29161 }
29162 
29163 __extension__ extern __inline void
29164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p(int8_t * __addr,int32x4_t __value,mve_pred16_t __p)29165 __arm_vstrbq_p (int8_t * __addr, int32x4_t __value, mve_pred16_t __p)
29166 {
29167  __arm_vstrbq_p_s32 (__addr, __value, __p);
29168 }
29169 
29170 __extension__ extern __inline void
29171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p(int8_t * __addr,int16x8_t __value,mve_pred16_t __p)29172 __arm_vstrbq_p (int8_t * __addr, int16x8_t __value, mve_pred16_t __p)
29173 {
29174  __arm_vstrbq_p_s16 (__addr, __value, __p);
29175 }
29176 
29177 __extension__ extern __inline void
29178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p(uint8_t * __addr,uint8x16_t __value,mve_pred16_t __p)29179 __arm_vstrbq_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
29180 {
29181  __arm_vstrbq_p_u8 (__addr, __value, __p);
29182 }
29183 
29184 __extension__ extern __inline void
29185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p(uint8_t * __addr,uint32x4_t __value,mve_pred16_t __p)29186 __arm_vstrbq_p (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p)
29187 {
29188  __arm_vstrbq_p_u32 (__addr, __value, __p);
29189 }
29190 
29191 __extension__ extern __inline void
29192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_p(uint8_t * __addr,uint16x8_t __value,mve_pred16_t __p)29193 __arm_vstrbq_p (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p)
29194 {
29195  __arm_vstrbq_p_u16 (__addr, __value, __p);
29196 }
29197 
29198 __extension__ extern __inline void
29199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p(int8_t * __base,uint8x16_t __offset,int8x16_t __value,mve_pred16_t __p)29200 __arm_vstrbq_scatter_offset_p (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p)
29201 {
29202  __arm_vstrbq_scatter_offset_p_s8 (__base, __offset, __value, __p);
29203 }
29204 
29205 __extension__ extern __inline void
29206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p(int8_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)29207 __arm_vstrbq_scatter_offset_p (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29208 {
29209  __arm_vstrbq_scatter_offset_p_s32 (__base, __offset, __value, __p);
29210 }
29211 
29212 __extension__ extern __inline void
29213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p(int8_t * __base,uint16x8_t __offset,int16x8_t __value,mve_pred16_t __p)29214 __arm_vstrbq_scatter_offset_p (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
29215 {
29216  __arm_vstrbq_scatter_offset_p_s16 (__base, __offset, __value, __p);
29217 }
29218 
29219 __extension__ extern __inline void
29220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p(uint8_t * __base,uint8x16_t __offset,uint8x16_t __value,mve_pred16_t __p)29221 __arm_vstrbq_scatter_offset_p (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p)
29222 {
29223  __arm_vstrbq_scatter_offset_p_u8 (__base, __offset, __value, __p);
29224 }
29225 
29226 __extension__ extern __inline void
29227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p(uint8_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)29228 __arm_vstrbq_scatter_offset_p (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29229 {
29230  __arm_vstrbq_scatter_offset_p_u32 (__base, __offset, __value, __p);
29231 }
29232 
29233 __extension__ extern __inline void
29234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrbq_scatter_offset_p(uint8_t * __base,uint16x8_t __offset,uint16x8_t __value,mve_pred16_t __p)29235 __arm_vstrbq_scatter_offset_p (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
29236 {
29237  __arm_vstrbq_scatter_offset_p_u16 (__base, __offset, __value, __p);
29238 }
29239 
29240 __extension__ extern __inline void
29241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_p(uint32x4_t __addr,const int __offset,int32x4_t __value,mve_pred16_t __p)29242 __arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
29243 {
29244  __arm_vstrwq_scatter_base_p_s32 (__addr, __offset, __value, __p);
29245 }
29246 
29247 __extension__ extern __inline void
29248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_p(uint32x4_t __addr,const int __offset,uint32x4_t __value,mve_pred16_t __p)29249 __arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
29250 {
29251  __arm_vstrwq_scatter_base_p_u32 (__addr, __offset, __value, __p);
29252 }
29253 
29254 __extension__ extern __inline int8x16_t
29255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z(int8_t const * __base,uint8x16_t __offset,mve_pred16_t __p)29256 __arm_vldrbq_gather_offset_z (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
29257 {
29258  return __arm_vldrbq_gather_offset_z_s8 (__base, __offset, __p);
29259 }
29260 
29261 __extension__ extern __inline int32x4_t
29262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z(int8_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29263 __arm_vldrbq_gather_offset_z (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29264 {
29265  return __arm_vldrbq_gather_offset_z_s32 (__base, __offset, __p);
29266 }
29267 
29268 __extension__ extern __inline int16x8_t
29269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z(int8_t const * __base,uint16x8_t __offset,mve_pred16_t __p)29270 __arm_vldrbq_gather_offset_z (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29271 {
29272  return __arm_vldrbq_gather_offset_z_s16 (__base, __offset, __p);
29273 }
29274 
29275 __extension__ extern __inline uint8x16_t
29276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z(uint8_t const * __base,uint8x16_t __offset,mve_pred16_t __p)29277 __arm_vldrbq_gather_offset_z (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
29278 {
29279  return __arm_vldrbq_gather_offset_z_u8 (__base, __offset, __p);
29280 }
29281 
29282 __extension__ extern __inline uint32x4_t
29283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z(uint8_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29284 __arm_vldrbq_gather_offset_z (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29285 {
29286  return __arm_vldrbq_gather_offset_z_u32 (__base, __offset, __p);
29287 }
29288 
29289 __extension__ extern __inline uint16x8_t
29290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrbq_gather_offset_z(uint8_t const * __base,uint16x8_t __offset,mve_pred16_t __p)29291 __arm_vldrbq_gather_offset_z (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29292 {
29293  return __arm_vldrbq_gather_offset_z_u16 (__base, __offset, __p);
29294 }
29295 
29296 __extension__ extern __inline int8x16_t
29297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q(int8_t const * __base)29298 __arm_vld1q (int8_t const * __base)
29299 {
29300  return __arm_vld1q_s8 (__base);
29301 }
29302 
29303 __extension__ extern __inline int32x4_t
29304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q(int32_t const * __base)29305 __arm_vld1q (int32_t const * __base)
29306 {
29307  return __arm_vld1q_s32 (__base);
29308 }
29309 
29310 __extension__ extern __inline int16x8_t
29311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q(int16_t const * __base)29312 __arm_vld1q (int16_t const * __base)
29313 {
29314  return __arm_vld1q_s16 (__base);
29315 }
29316 
29317 __extension__ extern __inline uint8x16_t
29318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q(uint8_t const * __base)29319 __arm_vld1q (uint8_t const * __base)
29320 {
29321  return __arm_vld1q_u8 (__base);
29322 }
29323 
29324 __extension__ extern __inline uint32x4_t
29325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q(uint32_t const * __base)29326 __arm_vld1q (uint32_t const * __base)
29327 {
29328  return __arm_vld1q_u32 (__base);
29329 }
29330 
29331 __extension__ extern __inline uint16x8_t
29332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q(uint16_t const * __base)29333 __arm_vld1q (uint16_t const * __base)
29334 {
29335  return __arm_vld1q_u16 (__base);
29336 }
29337 
29338 __extension__ extern __inline int32x4_t
29339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset(int16_t const * __base,uint32x4_t __offset)29340 __arm_vldrhq_gather_offset (int16_t const * __base, uint32x4_t __offset)
29341 {
29342  return __arm_vldrhq_gather_offset_s32 (__base, __offset);
29343 }
29344 
29345 __extension__ extern __inline int16x8_t
29346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset(int16_t const * __base,uint16x8_t __offset)29347 __arm_vldrhq_gather_offset (int16_t const * __base, uint16x8_t __offset)
29348 {
29349  return __arm_vldrhq_gather_offset_s16 (__base, __offset);
29350 }
29351 
29352 __extension__ extern __inline uint32x4_t
29353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset(uint16_t const * __base,uint32x4_t __offset)29354 __arm_vldrhq_gather_offset (uint16_t const * __base, uint32x4_t __offset)
29355 {
29356  return __arm_vldrhq_gather_offset_u32 (__base, __offset);
29357 }
29358 
29359 __extension__ extern __inline uint16x8_t
29360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset(uint16_t const * __base,uint16x8_t __offset)29361 __arm_vldrhq_gather_offset (uint16_t const * __base, uint16x8_t __offset)
29362 {
29363  return __arm_vldrhq_gather_offset_u16 (__base, __offset);
29364 }
29365 
29366 __extension__ extern __inline int32x4_t
29367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z(int16_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29368 __arm_vldrhq_gather_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29369 {
29370  return __arm_vldrhq_gather_offset_z_s32 (__base, __offset, __p);
29371 }
29372 
29373 __extension__ extern __inline int16x8_t
29374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z(int16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)29375 __arm_vldrhq_gather_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29376 {
29377  return __arm_vldrhq_gather_offset_z_s16 (__base, __offset, __p);
29378 }
29379 
29380 __extension__ extern __inline uint32x4_t
29381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z(uint16_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29382 __arm_vldrhq_gather_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29383 {
29384  return __arm_vldrhq_gather_offset_z_u32 (__base, __offset, __p);
29385 }
29386 
29387 __extension__ extern __inline uint16x8_t
29388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z(uint16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)29389 __arm_vldrhq_gather_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29390 {
29391  return __arm_vldrhq_gather_offset_z_u16 (__base, __offset, __p);
29392 }
29393 
29394 __extension__ extern __inline int32x4_t
29395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset(int16_t const * __base,uint32x4_t __offset)29396 __arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint32x4_t __offset)
29397 {
29398  return __arm_vldrhq_gather_shifted_offset_s32 (__base, __offset);
29399 }
29400 
29401 __extension__ extern __inline int16x8_t
29402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset(int16_t const * __base,uint16x8_t __offset)29403 __arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint16x8_t __offset)
29404 {
29405  return __arm_vldrhq_gather_shifted_offset_s16 (__base, __offset);
29406 }
29407 
29408 __extension__ extern __inline uint32x4_t
29409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset(uint16_t const * __base,uint32x4_t __offset)29410 __arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint32x4_t __offset)
29411 {
29412  return __arm_vldrhq_gather_shifted_offset_u32 (__base, __offset);
29413 }
29414 
29415 __extension__ extern __inline uint16x8_t
29416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset(uint16_t const * __base,uint16x8_t __offset)29417 __arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint16x8_t __offset)
29418 {
29419  return __arm_vldrhq_gather_shifted_offset_u16 (__base, __offset);
29420 }
29421 
29422 __extension__ extern __inline int32x4_t
29423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z(int16_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29424 __arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29425 {
29426  return __arm_vldrhq_gather_shifted_offset_z_s32 (__base, __offset, __p);
29427 }
29428 
29429 __extension__ extern __inline int16x8_t
29430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z(int16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)29431 __arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29432 {
29433  return __arm_vldrhq_gather_shifted_offset_z_s16 (__base, __offset, __p);
29434 }
29435 
29436 __extension__ extern __inline uint32x4_t
29437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z(uint16_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29438 __arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29439 {
29440  return __arm_vldrhq_gather_shifted_offset_z_u32 (__base, __offset, __p);
29441 }
29442 
29443 __extension__ extern __inline uint16x8_t
29444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z(uint16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)29445 __arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29446 {
29447  return __arm_vldrhq_gather_shifted_offset_z_u16 (__base, __offset, __p);
29448 }
29449 
29450 __extension__ extern __inline int64x2_t
29451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_offset(int64_t const * __base,uint64x2_t __offset)29452 __arm_vldrdq_gather_offset (int64_t const * __base, uint64x2_t __offset)
29453 {
29454  return __arm_vldrdq_gather_offset_s64 (__base, __offset);
29455 }
29456 
29457 __extension__ extern __inline uint64x2_t
29458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_offset(uint64_t const * __base,uint64x2_t __offset)29459 __arm_vldrdq_gather_offset (uint64_t const * __base, uint64x2_t __offset)
29460 {
29461  return __arm_vldrdq_gather_offset_u64 (__base, __offset);
29462 }
29463 
29464 __extension__ extern __inline int64x2_t
29465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_offset_z(int64_t const * __base,uint64x2_t __offset,mve_pred16_t __p)29466 __arm_vldrdq_gather_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
29467 {
29468  return __arm_vldrdq_gather_offset_z_s64 (__base, __offset, __p);
29469 }
29470 
29471 __extension__ extern __inline uint64x2_t
29472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_offset_z(uint64_t const * __base,uint64x2_t __offset,mve_pred16_t __p)29473 __arm_vldrdq_gather_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
29474 {
29475  return __arm_vldrdq_gather_offset_z_u64 (__base, __offset, __p);
29476 }
29477 
29478 __extension__ extern __inline int64x2_t
29479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_shifted_offset(int64_t const * __base,uint64x2_t __offset)29480 __arm_vldrdq_gather_shifted_offset (int64_t const * __base, uint64x2_t __offset)
29481 {
29482  return __arm_vldrdq_gather_shifted_offset_s64 (__base, __offset);
29483 }
29484 
29485 __extension__ extern __inline uint64x2_t
29486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_shifted_offset(uint64_t const * __base,uint64x2_t __offset)29487 __arm_vldrdq_gather_shifted_offset (uint64_t const * __base, uint64x2_t __offset)
29488 {
29489  return __arm_vldrdq_gather_shifted_offset_u64 (__base, __offset);
29490 }
29491 
29492 __extension__ extern __inline int64x2_t
29493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_shifted_offset_z(int64_t const * __base,uint64x2_t __offset,mve_pred16_t __p)29494 __arm_vldrdq_gather_shifted_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
29495 {
29496  return __arm_vldrdq_gather_shifted_offset_z_s64 (__base, __offset, __p);
29497 }
29498 
29499 __extension__ extern __inline uint64x2_t
29500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_shifted_offset_z(uint64_t const * __base,uint64x2_t __offset,mve_pred16_t __p)29501 __arm_vldrdq_gather_shifted_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
29502 {
29503  return __arm_vldrdq_gather_shifted_offset_z_u64 (__base, __offset, __p);
29504 }
29505 
29506 __extension__ extern __inline int32x4_t
29507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset(int32_t const * __base,uint32x4_t __offset)29508 __arm_vldrwq_gather_offset (int32_t const * __base, uint32x4_t __offset)
29509 {
29510  return __arm_vldrwq_gather_offset_s32 (__base, __offset);
29511 }
29512 
29513 __extension__ extern __inline uint32x4_t
29514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset(uint32_t const * __base,uint32x4_t __offset)29515 __arm_vldrwq_gather_offset (uint32_t const * __base, uint32x4_t __offset)
29516 {
29517  return __arm_vldrwq_gather_offset_u32 (__base, __offset);
29518 }
29519 
29520 __extension__ extern __inline int32x4_t
29521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset_z(int32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29522 __arm_vldrwq_gather_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29523 {
29524  return __arm_vldrwq_gather_offset_z_s32 (__base, __offset, __p);
29525 }
29526 
29527 __extension__ extern __inline uint32x4_t
29528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset_z(uint32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29529 __arm_vldrwq_gather_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29530 {
29531  return __arm_vldrwq_gather_offset_z_u32 (__base, __offset, __p);
29532 }
29533 
29534 __extension__ extern __inline int32x4_t
29535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset(int32_t const * __base,uint32x4_t __offset)29536 __arm_vldrwq_gather_shifted_offset (int32_t const * __base, uint32x4_t __offset)
29537 {
29538  return __arm_vldrwq_gather_shifted_offset_s32 (__base, __offset);
29539 }
29540 
29541 __extension__ extern __inline uint32x4_t
29542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset(uint32_t const * __base,uint32x4_t __offset)29543 __arm_vldrwq_gather_shifted_offset (uint32_t const * __base, uint32x4_t __offset)
29544 {
29545  return __arm_vldrwq_gather_shifted_offset_u32 (__base, __offset);
29546 }
29547 
29548 __extension__ extern __inline int32x4_t
29549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset_z(int32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29550 __arm_vldrwq_gather_shifted_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29551 {
29552  return __arm_vldrwq_gather_shifted_offset_z_s32 (__base, __offset, __p);
29553 }
29554 
29555 __extension__ extern __inline uint32x4_t
29556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset_z(uint32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)29557 __arm_vldrwq_gather_shifted_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29558 {
29559  return __arm_vldrwq_gather_shifted_offset_z_u32 (__base, __offset, __p);
29560 }
29561 
29562 __extension__ extern __inline void
29563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q(int8_t * __addr,int8x16_t __value)29564 __arm_vst1q (int8_t * __addr, int8x16_t __value)
29565 {
29566  __arm_vst1q_s8 (__addr, __value);
29567 }
29568 
29569 __extension__ extern __inline void
29570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q(int32_t * __addr,int32x4_t __value)29571 __arm_vst1q (int32_t * __addr, int32x4_t __value)
29572 {
29573  __arm_vst1q_s32 (__addr, __value);
29574 }
29575 
29576 __extension__ extern __inline void
29577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q(int16_t * __addr,int16x8_t __value)29578 __arm_vst1q (int16_t * __addr, int16x8_t __value)
29579 {
29580  __arm_vst1q_s16 (__addr, __value);
29581 }
29582 
29583 __extension__ extern __inline void
29584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q(uint8_t * __addr,uint8x16_t __value)29585 __arm_vst1q (uint8_t * __addr, uint8x16_t __value)
29586 {
29587  __arm_vst1q_u8 (__addr, __value);
29588 }
29589 
29590 __extension__ extern __inline void
29591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q(uint32_t * __addr,uint32x4_t __value)29592 __arm_vst1q (uint32_t * __addr, uint32x4_t __value)
29593 {
29594  __arm_vst1q_u32 (__addr, __value);
29595 }
29596 
29597 __extension__ extern __inline void
29598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q(uint16_t * __addr,uint16x8_t __value)29599 __arm_vst1q (uint16_t * __addr, uint16x8_t __value)
29600 {
29601  __arm_vst1q_u16 (__addr, __value);
29602 }
29603 
29604 __extension__ extern __inline void
29605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset(int16_t * __base,uint32x4_t __offset,int32x4_t __value)29606 __arm_vstrhq_scatter_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
29607 {
29608  __arm_vstrhq_scatter_offset_s32 (__base, __offset, __value);
29609 }
29610 
29611 __extension__ extern __inline void
29612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset(int16_t * __base,uint16x8_t __offset,int16x8_t __value)29613 __arm_vstrhq_scatter_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
29614 {
29615  __arm_vstrhq_scatter_offset_s16 (__base, __offset, __value);
29616 }
29617 
29618 __extension__ extern __inline void
29619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset(uint16_t * __base,uint32x4_t __offset,uint32x4_t __value)29620 __arm_vstrhq_scatter_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
29621 {
29622  __arm_vstrhq_scatter_offset_u32 (__base, __offset, __value);
29623 }
29624 
29625 __extension__ extern __inline void
29626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset(uint16_t * __base,uint16x8_t __offset,uint16x8_t __value)29627 __arm_vstrhq_scatter_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
29628 {
29629  __arm_vstrhq_scatter_offset_u16 (__base, __offset, __value);
29630 }
29631 
29632 __extension__ extern __inline void
29633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p(int16_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)29634 __arm_vstrhq_scatter_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29635 {
29636  __arm_vstrhq_scatter_offset_p_s32 (__base, __offset, __value, __p);
29637 }
29638 
29639 __extension__ extern __inline void
29640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p(int16_t * __base,uint16x8_t __offset,int16x8_t __value,mve_pred16_t __p)29641 __arm_vstrhq_scatter_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
29642 {
29643  __arm_vstrhq_scatter_offset_p_s16 (__base, __offset, __value, __p);
29644 }
29645 
29646 __extension__ extern __inline void
29647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p(uint16_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)29648 __arm_vstrhq_scatter_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29649 {
29650  __arm_vstrhq_scatter_offset_p_u32 (__base, __offset, __value, __p);
29651 }
29652 
29653 __extension__ extern __inline void
29654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p(uint16_t * __base,uint16x8_t __offset,uint16x8_t __value,mve_pred16_t __p)29655 __arm_vstrhq_scatter_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
29656 {
29657  __arm_vstrhq_scatter_offset_p_u16 (__base, __offset, __value, __p);
29658 }
29659 
29660 __extension__ extern __inline void
29661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset(int16_t * __base,uint32x4_t __offset,int32x4_t __value)29662 __arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
29663 {
29664  __arm_vstrhq_scatter_shifted_offset_s32 (__base, __offset, __value);
29665 }
29666 
29667 __extension__ extern __inline void
29668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset(int16_t * __base,uint16x8_t __offset,int16x8_t __value)29669 __arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
29670 {
29671  __arm_vstrhq_scatter_shifted_offset_s16 (__base, __offset, __value);
29672 }
29673 
29674 __extension__ extern __inline void
29675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset(uint16_t * __base,uint32x4_t __offset,uint32x4_t __value)29676 __arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
29677 {
29678  __arm_vstrhq_scatter_shifted_offset_u32 (__base, __offset, __value);
29679 }
29680 
29681 __extension__ extern __inline void
29682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset(uint16_t * __base,uint16x8_t __offset,uint16x8_t __value)29683 __arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
29684 {
29685  __arm_vstrhq_scatter_shifted_offset_u16 (__base, __offset, __value);
29686 }
29687 
29688 __extension__ extern __inline void
29689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p(int16_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)29690 __arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29691 {
29692  __arm_vstrhq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p);
29693 }
29694 
29695 __extension__ extern __inline void
29696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p(int16_t * __base,uint16x8_t __offset,int16x8_t __value,mve_pred16_t __p)29697 __arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
29698 {
29699  __arm_vstrhq_scatter_shifted_offset_p_s16 (__base, __offset, __value, __p);
29700 }
29701 
29702 __extension__ extern __inline void
29703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p(uint16_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)29704 __arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29705 {
29706  __arm_vstrhq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p);
29707 }
29708 
29709 __extension__ extern __inline void
29710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p(uint16_t * __base,uint16x8_t __offset,uint16x8_t __value,mve_pred16_t __p)29711 __arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
29712 {
29713  __arm_vstrhq_scatter_shifted_offset_p_u16 (__base, __offset, __value, __p);
29714 }
29715 
29716 __extension__ extern __inline void
29717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq(int16_t * __addr,int32x4_t __value)29718 __arm_vstrhq (int16_t * __addr, int32x4_t __value)
29719 {
29720  __arm_vstrhq_s32 (__addr, __value);
29721 }
29722 
29723 __extension__ extern __inline void
29724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq(int16_t * __addr,int16x8_t __value)29725 __arm_vstrhq (int16_t * __addr, int16x8_t __value)
29726 {
29727  __arm_vstrhq_s16 (__addr, __value);
29728 }
29729 
29730 __extension__ extern __inline void
29731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq(uint16_t * __addr,uint32x4_t __value)29732 __arm_vstrhq (uint16_t * __addr, uint32x4_t __value)
29733 {
29734  __arm_vstrhq_u32 (__addr, __value);
29735 }
29736 
29737 __extension__ extern __inline void
29738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq(uint16_t * __addr,uint16x8_t __value)29739 __arm_vstrhq (uint16_t * __addr, uint16x8_t __value)
29740 {
29741  __arm_vstrhq_u16 (__addr, __value);
29742 }
29743 
29744 __extension__ extern __inline void
29745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p(int16_t * __addr,int32x4_t __value,mve_pred16_t __p)29746 __arm_vstrhq_p (int16_t * __addr, int32x4_t __value, mve_pred16_t __p)
29747 {
29748  __arm_vstrhq_p_s32 (__addr, __value, __p);
29749 }
29750 
29751 __extension__ extern __inline void
29752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p(int16_t * __addr,int16x8_t __value,mve_pred16_t __p)29753 __arm_vstrhq_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
29754 {
29755  __arm_vstrhq_p_s16 (__addr, __value, __p);
29756 }
29757 
29758 __extension__ extern __inline void
29759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p(uint16_t * __addr,uint32x4_t __value,mve_pred16_t __p)29760 __arm_vstrhq_p (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p)
29761 {
29762  __arm_vstrhq_p_u32 (__addr, __value, __p);
29763 }
29764 
29765 __extension__ extern __inline void
29766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p(uint16_t * __addr,uint16x8_t __value,mve_pred16_t __p)29767 __arm_vstrhq_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
29768 {
29769  __arm_vstrhq_p_u16 (__addr, __value, __p);
29770 }
29771 
29772 __extension__ extern __inline void
29773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq(int32_t * __addr,int32x4_t __value)29774 __arm_vstrwq (int32_t * __addr, int32x4_t __value)
29775 {
29776  __arm_vstrwq_s32 (__addr, __value);
29777 }
29778 
29779 __extension__ extern __inline void
29780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq(uint32_t * __addr,uint32x4_t __value)29781 __arm_vstrwq (uint32_t * __addr, uint32x4_t __value)
29782 {
29783  __arm_vstrwq_u32 (__addr, __value);
29784 }
29785 
29786 __extension__ extern __inline void
29787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_p(int32_t * __addr,int32x4_t __value,mve_pred16_t __p)29788 __arm_vstrwq_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
29789 {
29790  __arm_vstrwq_p_s32 (__addr, __value, __p);
29791 }
29792 
29793 __extension__ extern __inline void
29794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_p(uint32_t * __addr,uint32x4_t __value,mve_pred16_t __p)29795 __arm_vstrwq_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
29796 {
29797  __arm_vstrwq_p_u32 (__addr, __value, __p);
29798 }
29799 
29800 __extension__ extern __inline void
29801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_p(uint64x2_t __addr,const int __offset,int64x2_t __value,mve_pred16_t __p)29802 __arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
29803 {
29804  __arm_vstrdq_scatter_base_p_s64 (__addr, __offset, __value, __p);
29805 }
29806 
29807 __extension__ extern __inline void
29808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_p(uint64x2_t __addr,const int __offset,uint64x2_t __value,mve_pred16_t __p)29809 __arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
29810 {
29811  __arm_vstrdq_scatter_base_p_u64 (__addr, __offset, __value, __p);
29812 }
29813 
29814 __extension__ extern __inline void
29815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base(uint64x2_t __addr,const int __offset,int64x2_t __value)29816 __arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, int64x2_t __value)
29817 {
29818  __arm_vstrdq_scatter_base_s64 (__addr, __offset, __value);
29819 }
29820 
29821 __extension__ extern __inline void
29822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base(uint64x2_t __addr,const int __offset,uint64x2_t __value)29823 __arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, uint64x2_t __value)
29824 {
29825  __arm_vstrdq_scatter_base_u64 (__addr, __offset, __value);
29826 }
29827 
29828 __extension__ extern __inline void
29829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_offset_p(int64_t * __base,uint64x2_t __offset,int64x2_t __value,mve_pred16_t __p)29830 __arm_vstrdq_scatter_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
29831 {
29832  __arm_vstrdq_scatter_offset_p_s64 (__base, __offset, __value, __p);
29833 }
29834 
29835 __extension__ extern __inline void
29836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_offset_p(uint64_t * __base,uint64x2_t __offset,uint64x2_t __value,mve_pred16_t __p)29837 __arm_vstrdq_scatter_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
29838 {
29839  __arm_vstrdq_scatter_offset_p_u64 (__base, __offset, __value, __p);
29840 }
29841 
29842 __extension__ extern __inline void
29843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_offset(int64_t * __base,uint64x2_t __offset,int64x2_t __value)29844 __arm_vstrdq_scatter_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
29845 {
29846  __arm_vstrdq_scatter_offset_s64 (__base, __offset, __value);
29847 }
29848 
29849 __extension__ extern __inline void
29850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_offset(uint64_t * __base,uint64x2_t __offset,uint64x2_t __value)29851 __arm_vstrdq_scatter_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
29852 {
29853  __arm_vstrdq_scatter_offset_u64 (__base, __offset, __value);
29854 }
29855 
29856 __extension__ extern __inline void
29857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_shifted_offset_p(int64_t * __base,uint64x2_t __offset,int64x2_t __value,mve_pred16_t __p)29858 __arm_vstrdq_scatter_shifted_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
29859 {
29860  __arm_vstrdq_scatter_shifted_offset_p_s64 (__base, __offset, __value, __p);
29861 }
29862 
29863 __extension__ extern __inline void
29864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_shifted_offset_p(uint64_t * __base,uint64x2_t __offset,uint64x2_t __value,mve_pred16_t __p)29865 __arm_vstrdq_scatter_shifted_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
29866 {
29867  __arm_vstrdq_scatter_shifted_offset_p_u64 (__base, __offset, __value, __p);
29868 }
29869 
29870 __extension__ extern __inline void
29871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_shifted_offset(int64_t * __base,uint64x2_t __offset,int64x2_t __value)29872 __arm_vstrdq_scatter_shifted_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
29873 {
29874  __arm_vstrdq_scatter_shifted_offset_s64 (__base, __offset, __value);
29875 }
29876 
29877 __extension__ extern __inline void
29878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_shifted_offset(uint64_t * __base,uint64x2_t __offset,uint64x2_t __value)29879 __arm_vstrdq_scatter_shifted_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
29880 {
29881  __arm_vstrdq_scatter_shifted_offset_u64 (__base, __offset, __value);
29882 }
29883 
29884 __extension__ extern __inline void
29885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset_p(int32_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)29886 __arm_vstrwq_scatter_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29887 {
29888  __arm_vstrwq_scatter_offset_p_s32 (__base, __offset, __value, __p);
29889 }
29890 
29891 __extension__ extern __inline void
29892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset_p(uint32_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)29893 __arm_vstrwq_scatter_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29894 {
29895  __arm_vstrwq_scatter_offset_p_u32 (__base, __offset, __value, __p);
29896 }
29897 
29898 __extension__ extern __inline void
29899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset(int32_t * __base,uint32x4_t __offset,int32x4_t __value)29900 __arm_vstrwq_scatter_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
29901 {
29902  __arm_vstrwq_scatter_offset_s32 (__base, __offset, __value);
29903 }
29904 
29905 __extension__ extern __inline void
29906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset(uint32_t * __base,uint32x4_t __offset,uint32x4_t __value)29907 __arm_vstrwq_scatter_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
29908 {
29909  __arm_vstrwq_scatter_offset_u32 (__base, __offset, __value);
29910 }
29911 
29912 __extension__ extern __inline void
29913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset_p(int32_t * __base,uint32x4_t __offset,int32x4_t __value,mve_pred16_t __p)29914 __arm_vstrwq_scatter_shifted_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29915 {
29916  __arm_vstrwq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p);
29917 }
29918 
29919 __extension__ extern __inline void
29920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset_p(uint32_t * __base,uint32x4_t __offset,uint32x4_t __value,mve_pred16_t __p)29921 __arm_vstrwq_scatter_shifted_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29922 {
29923  __arm_vstrwq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p);
29924 }
29925 
29926 __extension__ extern __inline void
29927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset(int32_t * __base,uint32x4_t __offset,int32x4_t __value)29928 __arm_vstrwq_scatter_shifted_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
29929 {
29930  __arm_vstrwq_scatter_shifted_offset_s32 (__base, __offset, __value);
29931 }
29932 
29933 __extension__ extern __inline void
29934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset(uint32_t * __base,uint32x4_t __offset,uint32x4_t __value)29935 __arm_vstrwq_scatter_shifted_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
29936 {
29937  __arm_vstrwq_scatter_shifted_offset_u32 (__base, __offset, __value);
29938 }
29939 
29940 __extension__ extern __inline int8x16_t
29941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(int8x16_t __a,int8x16_t __b)29942 __arm_vaddq (int8x16_t __a, int8x16_t __b)
29943 {
29944  return __arm_vaddq_s8 (__a, __b);
29945 }
29946 
29947 __extension__ extern __inline int16x8_t
29948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(int16x8_t __a,int16x8_t __b)29949 __arm_vaddq (int16x8_t __a, int16x8_t __b)
29950 {
29951  return __arm_vaddq_s16 (__a, __b);
29952 }
29953 
29954 __extension__ extern __inline int32x4_t
29955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(int32x4_t __a,int32x4_t __b)29956 __arm_vaddq (int32x4_t __a, int32x4_t __b)
29957 {
29958  return __arm_vaddq_s32 (__a, __b);
29959 }
29960 
29961 __extension__ extern __inline uint8x16_t
29962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(uint8x16_t __a,uint8x16_t __b)29963 __arm_vaddq (uint8x16_t __a, uint8x16_t __b)
29964 {
29965  return __arm_vaddq_u8 (__a, __b);
29966 }
29967 
29968 __extension__ extern __inline uint16x8_t
29969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(uint16x8_t __a,uint16x8_t __b)29970 __arm_vaddq (uint16x8_t __a, uint16x8_t __b)
29971 {
29972  return __arm_vaddq_u16 (__a, __b);
29973 }
29974 
29975 __extension__ extern __inline uint32x4_t
29976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(uint32x4_t __a,uint32x4_t __b)29977 __arm_vaddq (uint32x4_t __a, uint32x4_t __b)
29978 {
29979  return __arm_vaddq_u32 (__a, __b);
29980 }
29981 
29982 __extension__ extern __inline uint8x16_t
29983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m(uint8x16_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)29984 __arm_vddupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
29985 {
29986  return __arm_vddupq_m_n_u8 (__inactive, __a, __imm, __p);
29987 }
29988 
29989 __extension__ extern __inline uint32x4_t
29990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m(uint32x4_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)29991 __arm_vddupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
29992 {
29993  return __arm_vddupq_m_n_u32 (__inactive, __a, __imm, __p);
29994 }
29995 
29996 __extension__ extern __inline uint16x8_t
29997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m(uint16x8_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)29998 __arm_vddupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
29999 {
30000  return __arm_vddupq_m_n_u16 (__inactive, __a, __imm, __p);
30001 }
30002 
30003 __extension__ extern __inline uint8x16_t
30004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m(uint8x16_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)30005 __arm_vddupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30006 {
30007  return __arm_vddupq_m_wb_u8 (__inactive, __a, __imm, __p);
30008 }
30009 
30010 __extension__ extern __inline uint16x8_t
30011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m(uint16x8_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)30012 __arm_vddupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30013 {
30014  return __arm_vddupq_m_wb_u16 (__inactive, __a, __imm, __p);
30015 }
30016 
30017 __extension__ extern __inline uint32x4_t
30018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_m(uint32x4_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)30019 __arm_vddupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30020 {
30021  return __arm_vddupq_m_wb_u32 (__inactive, __a, __imm, __p);
30022 }
30023 
30024 __extension__ extern __inline uint8x16_t
30025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_u8(uint32_t __a,const int __imm)30026 __arm_vddupq_u8 (uint32_t __a, const int __imm)
30027 {
30028  return __arm_vddupq_n_u8 (__a, __imm);
30029 }
30030 
30031 __extension__ extern __inline uint32x4_t
30032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_u32(uint32_t __a,const int __imm)30033 __arm_vddupq_u32 (uint32_t __a, const int __imm)
30034 {
30035  return __arm_vddupq_n_u32 (__a, __imm);
30036 }
30037 
30038 __extension__ extern __inline uint16x8_t
30039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_u16(uint32_t __a,const int __imm)30040 __arm_vddupq_u16 (uint32_t __a, const int __imm)
30041 {
30042  return __arm_vddupq_n_u16 (__a, __imm);
30043 }
30044 
30045 __extension__ extern __inline uint8x16_t
30046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m(uint8x16_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30047 __arm_vdwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30048 {
30049  return __arm_vdwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p);
30050 }
30051 
30052 __extension__ extern __inline uint32x4_t
30053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m(uint32x4_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30054 __arm_vdwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30055 {
30056  return __arm_vdwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p);
30057 }
30058 
30059 __extension__ extern __inline uint16x8_t
30060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m(uint16x8_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30061 __arm_vdwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30062 {
30063  return __arm_vdwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p);
30064 }
30065 
30066 __extension__ extern __inline uint8x16_t
30067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m(uint8x16_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30068 __arm_vdwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30069 {
30070  return __arm_vdwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p);
30071 }
30072 
30073 __extension__ extern __inline uint32x4_t
30074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m(uint32x4_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30075 __arm_vdwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30076 {
30077  return __arm_vdwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p);
30078 }
30079 
30080 __extension__ extern __inline uint16x8_t
30081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_m(uint16x8_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30082 __arm_vdwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30083 {
30084  return __arm_vdwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p);
30085 }
30086 
30087 __extension__ extern __inline uint8x16_t
30088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_u8(uint32_t __a,uint32_t __b,const int __imm)30089 __arm_vdwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm)
30090 {
30091  return __arm_vdwdupq_n_u8 (__a, __b, __imm);
30092 }
30093 
30094 __extension__ extern __inline uint32x4_t
30095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_u32(uint32_t __a,uint32_t __b,const int __imm)30096 __arm_vdwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm)
30097 {
30098  return __arm_vdwdupq_n_u32 (__a, __b, __imm);
30099 }
30100 
30101 __extension__ extern __inline uint16x8_t
30102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_u16(uint32_t __a,uint32_t __b,const int __imm)30103 __arm_vdwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm)
30104 {
30105  return __arm_vdwdupq_n_u16 (__a, __b, __imm);
30106 }
30107 
30108 __extension__ extern __inline uint8x16_t
30109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_u8(uint32_t * __a,uint32_t __b,const int __imm)30110 __arm_vdwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm)
30111 {
30112  return __arm_vdwdupq_wb_u8 (__a, __b, __imm);
30113 }
30114 
30115 __extension__ extern __inline uint32x4_t
30116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_u32(uint32_t * __a,uint32_t __b,const int __imm)30117 __arm_vdwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm)
30118 {
30119  return __arm_vdwdupq_wb_u32 (__a, __b, __imm);
30120 }
30121 
30122 __extension__ extern __inline uint16x8_t
30123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_u16(uint32_t * __a,uint32_t __b,const int __imm)30124 __arm_vdwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm)
30125 {
30126  return __arm_vdwdupq_wb_u16 (__a, __b, __imm);
30127 }
30128 
30129 __extension__ extern __inline uint8x16_t
30130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m(uint8x16_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)30131 __arm_vidupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
30132 {
30133  return __arm_vidupq_m_n_u8 (__inactive, __a, __imm, __p);
30134 }
30135 
30136 __extension__ extern __inline uint32x4_t
30137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m(uint32x4_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)30138 __arm_vidupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
30139 {
30140  return __arm_vidupq_m_n_u32 (__inactive, __a, __imm, __p);
30141 }
30142 
30143 __extension__ extern __inline uint16x8_t
30144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m(uint16x8_t __inactive,uint32_t __a,const int __imm,mve_pred16_t __p)30145 __arm_vidupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
30146 {
30147  return __arm_vidupq_m_n_u16 (__inactive, __a, __imm, __p);
30148 }
30149 
30150 __extension__ extern __inline uint8x16_t
30151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_u8(uint32_t __a,const int __imm)30152 __arm_vidupq_u8 (uint32_t __a, const int __imm)
30153 {
30154  return __arm_vidupq_n_u8 (__a, __imm);
30155 }
30156 
30157 __extension__ extern __inline uint8x16_t
30158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m(uint8x16_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)30159 __arm_vidupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30160 {
30161  return __arm_vidupq_m_wb_u8 (__inactive, __a, __imm, __p);
30162 }
30163 
30164 __extension__ extern __inline uint16x8_t
30165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m(uint16x8_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)30166 __arm_vidupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30167 {
30168  return __arm_vidupq_m_wb_u16 (__inactive, __a, __imm, __p);
30169 }
30170 
30171 __extension__ extern __inline uint32x4_t
30172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_m(uint32x4_t __inactive,uint32_t * __a,const int __imm,mve_pred16_t __p)30173 __arm_vidupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30174 {
30175  return __arm_vidupq_m_wb_u32 (__inactive, __a, __imm, __p);
30176 }
30177 
30178 __extension__ extern __inline uint32x4_t
30179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_u32(uint32_t __a,const int __imm)30180 __arm_vidupq_u32 (uint32_t __a, const int __imm)
30181 {
30182  return __arm_vidupq_n_u32 (__a, __imm);
30183 }
30184 
30185 __extension__ extern __inline uint16x8_t
30186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_u16(uint32_t __a,const int __imm)30187 __arm_vidupq_u16 (uint32_t __a, const int __imm)
30188 {
30189  return __arm_vidupq_n_u16 (__a, __imm);
30190 }
30191 
30192 __extension__ extern __inline uint8x16_t
30193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_u8(uint32_t * __a,const int __imm)30194 __arm_vidupq_u8 (uint32_t * __a, const int __imm)
30195 {
30196  return __arm_vidupq_wb_u8 (__a, __imm);
30197 }
30198 
30199 __extension__ extern __inline uint16x8_t
30200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_u16(uint32_t * __a,const int __imm)30201 __arm_vidupq_u16 (uint32_t * __a, const int __imm)
30202 {
30203  return __arm_vidupq_wb_u16 (__a, __imm);
30204 }
30205 
30206 __extension__ extern __inline uint32x4_t
30207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_u32(uint32_t * __a,const int __imm)30208 __arm_vidupq_u32 (uint32_t * __a, const int __imm)
30209 {
30210  return __arm_vidupq_wb_u32 (__a, __imm);
30211 }
30212 
30213 __extension__ extern __inline uint8x16_t
30214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_u8(uint32_t * __a,const int __imm)30215 __arm_vddupq_u8 (uint32_t * __a, const int __imm)
30216 {
30217  return __arm_vddupq_wb_u8 (__a, __imm);
30218 }
30219 
30220 __extension__ extern __inline uint16x8_t
30221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_u16(uint32_t * __a,const int __imm)30222 __arm_vddupq_u16 (uint32_t * __a, const int __imm)
30223 {
30224  return __arm_vddupq_wb_u16 (__a, __imm);
30225 }
30226 
30227 __extension__ extern __inline uint32x4_t
30228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_u32(uint32_t * __a,const int __imm)30229 __arm_vddupq_u32 (uint32_t * __a, const int __imm)
30230 {
30231  return __arm_vddupq_wb_u32 (__a, __imm);
30232 }
30233 
30234 __extension__ extern __inline uint8x16_t
30235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m(uint8x16_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30236 __arm_viwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30237 {
30238  return __arm_viwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p);
30239 }
30240 
30241 __extension__ extern __inline uint32x4_t
30242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m(uint32x4_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30243 __arm_viwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30244 {
30245  return __arm_viwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p);
30246 }
30247 
30248 __extension__ extern __inline uint16x8_t
30249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m(uint16x8_t __inactive,uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30250 __arm_viwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30251 {
30252  return __arm_viwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p);
30253 }
30254 
30255 __extension__ extern __inline uint8x16_t
30256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m(uint8x16_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30257 __arm_viwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30258 {
30259  return __arm_viwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p);
30260 }
30261 
30262 __extension__ extern __inline uint32x4_t
30263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m(uint32x4_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30264 __arm_viwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30265 {
30266  return __arm_viwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p);
30267 }
30268 
30269 __extension__ extern __inline uint16x8_t
30270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_m(uint16x8_t __inactive,uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30271 __arm_viwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30272 {
30273  return __arm_viwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p);
30274 }
30275 
30276 __extension__ extern __inline uint8x16_t
30277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_u8(uint32_t __a,uint32_t __b,const int __imm)30278 __arm_viwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm)
30279 {
30280  return __arm_viwdupq_n_u8 (__a, __b, __imm);
30281 }
30282 
30283 __extension__ extern __inline uint32x4_t
30284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_u32(uint32_t __a,uint32_t __b,const int __imm)30285 __arm_viwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm)
30286 {
30287  return __arm_viwdupq_n_u32 (__a, __b, __imm);
30288 }
30289 
30290 __extension__ extern __inline uint16x8_t
30291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_u16(uint32_t __a,uint32_t __b,const int __imm)30292 __arm_viwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm)
30293 {
30294  return __arm_viwdupq_n_u16 (__a, __b, __imm);
30295 }
30296 
30297 __extension__ extern __inline uint8x16_t
30298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_u8(uint32_t * __a,uint32_t __b,const int __imm)30299 __arm_viwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm)
30300 {
30301  return __arm_viwdupq_wb_u8 (__a, __b, __imm);
30302 }
30303 
30304 __extension__ extern __inline uint32x4_t
30305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_u32(uint32_t * __a,uint32_t __b,const int __imm)30306 __arm_viwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm)
30307 {
30308  return __arm_viwdupq_wb_u32 (__a, __b, __imm);
30309 }
30310 
30311 __extension__ extern __inline uint16x8_t
30312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_u16(uint32_t * __a,uint32_t __b,const int __imm)30313 __arm_viwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm)
30314 {
30315  return __arm_viwdupq_wb_u16 (__a, __b, __imm);
30316 }
30317 
30318 __extension__ extern __inline void
30319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_wb(uint64x2_t * __addr,const int __offset,int64x2_t __value)30320 __arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, int64x2_t __value)
30321 {
30322  __arm_vstrdq_scatter_base_wb_s64 (__addr, __offset, __value);
30323 }
30324 
30325 __extension__ extern __inline void
30326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_wb(uint64x2_t * __addr,const int __offset,uint64x2_t __value)30327 __arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, uint64x2_t __value)
30328 {
30329  __arm_vstrdq_scatter_base_wb_u64 (__addr, __offset, __value);
30330 }
30331 
30332 __extension__ extern __inline void
30333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_wb_p(uint64x2_t * __addr,const int __offset,int64x2_t __value,mve_pred16_t __p)30334 __arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
30335 {
30336  __arm_vstrdq_scatter_base_wb_p_s64 (__addr, __offset, __value, __p);
30337 }
30338 
30339 __extension__ extern __inline void
30340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrdq_scatter_base_wb_p(uint64x2_t * __addr,const int __offset,uint64x2_t __value,mve_pred16_t __p)30341 __arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
30342 {
30343  __arm_vstrdq_scatter_base_wb_p_u64 (__addr, __offset, __value, __p);
30344 }
30345 
30346 __extension__ extern __inline void
30347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb_p(uint32x4_t * __addr,const int __offset,int32x4_t __value,mve_pred16_t __p)30348 __arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
30349 {
30350  __arm_vstrwq_scatter_base_wb_p_s32 (__addr, __offset, __value, __p);
30351 }
30352 
30353 __extension__ extern __inline void
30354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb_p(uint32x4_t * __addr,const int __offset,uint32x4_t __value,mve_pred16_t __p)30355 __arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
30356 {
30357  __arm_vstrwq_scatter_base_wb_p_u32 (__addr, __offset, __value, __p);
30358 }
30359 
30360 __extension__ extern __inline void
30361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb(uint32x4_t * __addr,const int __offset,int32x4_t __value)30362 __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, int32x4_t __value)
30363 {
30364  __arm_vstrwq_scatter_base_wb_s32 (__addr, __offset, __value);
30365 }
30366 
30367 __extension__ extern __inline void
30368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb(uint32x4_t * __addr,const int __offset,uint32x4_t __value)30369 __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, uint32x4_t __value)
30370 {
30371  __arm_vstrwq_scatter_base_wb_u32 (__addr, __offset, __value);
30372 }
30373 
30374 __extension__ extern __inline uint8x16_t
30375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_u8(uint32_t __a,const int __imm,mve_pred16_t __p)30376 __arm_vddupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
30377 {
30378  return __arm_vddupq_x_n_u8 (__a, __imm, __p);
30379 }
30380 
30381 __extension__ extern __inline uint16x8_t
30382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_u16(uint32_t __a,const int __imm,mve_pred16_t __p)30383 __arm_vddupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
30384 {
30385  return __arm_vddupq_x_n_u16 (__a, __imm, __p);
30386 }
30387 
30388 __extension__ extern __inline uint32x4_t
30389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_u32(uint32_t __a,const int __imm,mve_pred16_t __p)30390 __arm_vddupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
30391 {
30392  return __arm_vddupq_x_n_u32 (__a, __imm, __p);
30393 }
30394 
30395 __extension__ extern __inline uint8x16_t
30396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_u8(uint32_t * __a,const int __imm,mve_pred16_t __p)30397 __arm_vddupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30398 {
30399  return __arm_vddupq_x_wb_u8 (__a, __imm, __p);
30400 }
30401 
30402 __extension__ extern __inline uint16x8_t
30403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_u16(uint32_t * __a,const int __imm,mve_pred16_t __p)30404 __arm_vddupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30405 {
30406  return __arm_vddupq_x_wb_u16 (__a, __imm, __p);
30407 }
30408 
30409 __extension__ extern __inline uint32x4_t
30410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vddupq_x_u32(uint32_t * __a,const int __imm,mve_pred16_t __p)30411 __arm_vddupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30412 {
30413  return __arm_vddupq_x_wb_u32 (__a, __imm, __p);
30414 }
30415 
30416 __extension__ extern __inline uint8x16_t
30417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_u8(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30418 __arm_vdwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30419 {
30420  return __arm_vdwdupq_x_n_u8 (__a, __b, __imm, __p);
30421 }
30422 
30423 __extension__ extern __inline uint16x8_t
30424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_u16(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30425 __arm_vdwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30426 {
30427  return __arm_vdwdupq_x_n_u16 (__a, __b, __imm, __p);
30428 }
30429 
30430 __extension__ extern __inline uint32x4_t
30431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_u32(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30432 __arm_vdwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30433 {
30434  return __arm_vdwdupq_x_n_u32 (__a, __b, __imm, __p);
30435 }
30436 
30437 __extension__ extern __inline uint8x16_t
30438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_u8(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30439 __arm_vdwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30440 {
30441  return __arm_vdwdupq_x_wb_u8 (__a, __b, __imm, __p);
30442 }
30443 
30444 __extension__ extern __inline uint16x8_t
30445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_u16(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30446 __arm_vdwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30447 {
30448  return __arm_vdwdupq_x_wb_u16 (__a, __b, __imm, __p);
30449 }
30450 
30451 __extension__ extern __inline uint32x4_t
30452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdwdupq_x_u32(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30453 __arm_vdwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30454 {
30455  return __arm_vdwdupq_x_wb_u32 (__a, __b, __imm, __p);
30456 }
30457 
30458 __extension__ extern __inline uint8x16_t
30459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_u8(uint32_t __a,const int __imm,mve_pred16_t __p)30460 __arm_vidupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
30461 {
30462  return __arm_vidupq_x_n_u8 (__a, __imm, __p);
30463 }
30464 
30465 __extension__ extern __inline uint16x8_t
30466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_u16(uint32_t __a,const int __imm,mve_pred16_t __p)30467 __arm_vidupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
30468 {
30469  return __arm_vidupq_x_n_u16 (__a, __imm, __p);
30470 }
30471 
30472 __extension__ extern __inline uint32x4_t
30473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_u32(uint32_t __a,const int __imm,mve_pred16_t __p)30474 __arm_vidupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
30475 {
30476  return __arm_vidupq_x_n_u32 (__a, __imm, __p);
30477 }
30478 
30479 __extension__ extern __inline uint8x16_t
30480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_u8(uint32_t * __a,const int __imm,mve_pred16_t __p)30481 __arm_vidupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30482 {
30483  return __arm_vidupq_x_wb_u8 (__a, __imm, __p);
30484 }
30485 
30486 __extension__ extern __inline uint16x8_t
30487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_u16(uint32_t * __a,const int __imm,mve_pred16_t __p)30488 __arm_vidupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30489 {
30490  return __arm_vidupq_x_wb_u16 (__a, __imm, __p);
30491 }
30492 
30493 __extension__ extern __inline uint32x4_t
30494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vidupq_x_u32(uint32_t * __a,const int __imm,mve_pred16_t __p)30495 __arm_vidupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30496 {
30497  return __arm_vidupq_x_wb_u32 (__a, __imm, __p);
30498 }
30499 
30500 __extension__ extern __inline uint8x16_t
30501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_u8(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30502 __arm_viwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30503 {
30504  return __arm_viwdupq_x_n_u8 (__a, __b, __imm, __p);
30505 }
30506 
30507 __extension__ extern __inline uint16x8_t
30508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_u16(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30509 __arm_viwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30510 {
30511  return __arm_viwdupq_x_n_u16 (__a, __b, __imm, __p);
30512 }
30513 
30514 __extension__ extern __inline uint32x4_t
30515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_u32(uint32_t __a,uint32_t __b,const int __imm,mve_pred16_t __p)30516 __arm_viwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30517 {
30518  return __arm_viwdupq_x_n_u32 (__a, __b, __imm, __p);
30519 }
30520 
30521 __extension__ extern __inline uint8x16_t
30522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_u8(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30523 __arm_viwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30524 {
30525  return __arm_viwdupq_x_wb_u8 (__a, __b, __imm, __p);
30526 }
30527 
30528 __extension__ extern __inline uint16x8_t
30529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_u16(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30530 __arm_viwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30531 {
30532  return __arm_viwdupq_x_wb_u16 (__a, __b, __imm, __p);
30533 }
30534 
30535 __extension__ extern __inline uint32x4_t
30536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_viwdupq_x_u32(uint32_t * __a,uint32_t __b,const int __imm,mve_pred16_t __p)30537 __arm_viwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30538 {
30539  return __arm_viwdupq_x_wb_u32 (__a, __b, __imm, __p);
30540 }
30541 
30542 __extension__ extern __inline int8x16_t
30543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)30544 __arm_vminq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30545 {
30546  return __arm_vminq_x_s8 (__a, __b, __p);
30547 }
30548 
30549 __extension__ extern __inline int16x8_t
30550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)30551 __arm_vminq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30552 {
30553  return __arm_vminq_x_s16 (__a, __b, __p);
30554 }
30555 
30556 __extension__ extern __inline int32x4_t
30557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)30558 __arm_vminq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30559 {
30560  return __arm_vminq_x_s32 (__a, __b, __p);
30561 }
30562 
30563 __extension__ extern __inline uint8x16_t
30564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)30565 __arm_vminq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30566 {
30567  return __arm_vminq_x_u8 (__a, __b, __p);
30568 }
30569 
30570 __extension__ extern __inline uint16x8_t
30571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)30572 __arm_vminq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30573 {
30574  return __arm_vminq_x_u16 (__a, __b, __p);
30575 }
30576 
30577 __extension__ extern __inline uint32x4_t
30578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)30579 __arm_vminq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30580 {
30581  return __arm_vminq_x_u32 (__a, __b, __p);
30582 }
30583 
30584 __extension__ extern __inline int8x16_t
30585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)30586 __arm_vmaxq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30587 {
30588  return __arm_vmaxq_x_s8 (__a, __b, __p);
30589 }
30590 
30591 __extension__ extern __inline int16x8_t
30592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)30593 __arm_vmaxq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30594 {
30595  return __arm_vmaxq_x_s16 (__a, __b, __p);
30596 }
30597 
30598 __extension__ extern __inline int32x4_t
30599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)30600 __arm_vmaxq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30601 {
30602  return __arm_vmaxq_x_s32 (__a, __b, __p);
30603 }
30604 
30605 __extension__ extern __inline uint8x16_t
30606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)30607 __arm_vmaxq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30608 {
30609  return __arm_vmaxq_x_u8 (__a, __b, __p);
30610 }
30611 
30612 __extension__ extern __inline uint16x8_t
30613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)30614 __arm_vmaxq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30615 {
30616  return __arm_vmaxq_x_u16 (__a, __b, __p);
30617 }
30618 
30619 __extension__ extern __inline uint32x4_t
30620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)30621 __arm_vmaxq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30622 {
30623  return __arm_vmaxq_x_u32 (__a, __b, __p);
30624 }
30625 
30626 __extension__ extern __inline int8x16_t
30627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)30628 __arm_vabdq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30629 {
30630  return __arm_vabdq_x_s8 (__a, __b, __p);
30631 }
30632 
30633 __extension__ extern __inline int16x8_t
30634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)30635 __arm_vabdq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30636 {
30637  return __arm_vabdq_x_s16 (__a, __b, __p);
30638 }
30639 
30640 __extension__ extern __inline int32x4_t
30641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)30642 __arm_vabdq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30643 {
30644  return __arm_vabdq_x_s32 (__a, __b, __p);
30645 }
30646 
30647 __extension__ extern __inline uint8x16_t
30648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)30649 __arm_vabdq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30650 {
30651  return __arm_vabdq_x_u8 (__a, __b, __p);
30652 }
30653 
30654 __extension__ extern __inline uint16x8_t
30655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)30656 __arm_vabdq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30657 {
30658  return __arm_vabdq_x_u16 (__a, __b, __p);
30659 }
30660 
30661 __extension__ extern __inline uint32x4_t
30662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)30663 __arm_vabdq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30664 {
30665  return __arm_vabdq_x_u32 (__a, __b, __p);
30666 }
30667 
30668 __extension__ extern __inline int8x16_t
30669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x(int8x16_t __a,mve_pred16_t __p)30670 __arm_vabsq_x (int8x16_t __a, mve_pred16_t __p)
30671 {
30672  return __arm_vabsq_x_s8 (__a, __p);
30673 }
30674 
30675 __extension__ extern __inline int16x8_t
30676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x(int16x8_t __a,mve_pred16_t __p)30677 __arm_vabsq_x (int16x8_t __a, mve_pred16_t __p)
30678 {
30679  return __arm_vabsq_x_s16 (__a, __p);
30680 }
30681 
30682 __extension__ extern __inline int32x4_t
30683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x(int32x4_t __a,mve_pred16_t __p)30684 __arm_vabsq_x (int32x4_t __a, mve_pred16_t __p)
30685 {
30686  return __arm_vabsq_x_s32 (__a, __p);
30687 }
30688 
30689 __extension__ extern __inline int8x16_t
30690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)30691 __arm_vaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30692 {
30693  return __arm_vaddq_x_s8 (__a, __b, __p);
30694 }
30695 
30696 __extension__ extern __inline int16x8_t
30697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)30698 __arm_vaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30699 {
30700  return __arm_vaddq_x_s16 (__a, __b, __p);
30701 }
30702 
30703 __extension__ extern __inline int32x4_t
30704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)30705 __arm_vaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30706 {
30707  return __arm_vaddq_x_s32 (__a, __b, __p);
30708 }
30709 
30710 __extension__ extern __inline int8x16_t
30711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(int8x16_t __a,int8_t __b,mve_pred16_t __p)30712 __arm_vaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
30713 {
30714  return __arm_vaddq_x_n_s8 (__a, __b, __p);
30715 }
30716 
30717 __extension__ extern __inline int16x8_t
30718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(int16x8_t __a,int16_t __b,mve_pred16_t __p)30719 __arm_vaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
30720 {
30721  return __arm_vaddq_x_n_s16 (__a, __b, __p);
30722 }
30723 
30724 __extension__ extern __inline int32x4_t
30725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(int32x4_t __a,int32_t __b,mve_pred16_t __p)30726 __arm_vaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
30727 {
30728  return __arm_vaddq_x_n_s32 (__a, __b, __p);
30729 }
30730 
30731 __extension__ extern __inline uint8x16_t
30732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)30733 __arm_vaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30734 {
30735  return __arm_vaddq_x_u8 (__a, __b, __p);
30736 }
30737 
30738 __extension__ extern __inline uint16x8_t
30739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)30740 __arm_vaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30741 {
30742  return __arm_vaddq_x_u16 (__a, __b, __p);
30743 }
30744 
30745 __extension__ extern __inline uint32x4_t
30746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)30747 __arm_vaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30748 {
30749  return __arm_vaddq_x_u32 (__a, __b, __p);
30750 }
30751 
30752 __extension__ extern __inline uint8x16_t
30753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)30754 __arm_vaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
30755 {
30756  return __arm_vaddq_x_n_u8 (__a, __b, __p);
30757 }
30758 
30759 __extension__ extern __inline uint16x8_t
30760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)30761 __arm_vaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
30762 {
30763  return __arm_vaddq_x_n_u16 (__a, __b, __p);
30764 }
30765 
30766 __extension__ extern __inline uint32x4_t
30767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)30768 __arm_vaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
30769 {
30770  return __arm_vaddq_x_n_u32 (__a, __b, __p);
30771 }
30772 
30773 __extension__ extern __inline int8x16_t
30774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_x(int8x16_t __a,mve_pred16_t __p)30775 __arm_vclsq_x (int8x16_t __a, mve_pred16_t __p)
30776 {
30777  return __arm_vclsq_x_s8 (__a, __p);
30778 }
30779 
30780 __extension__ extern __inline int16x8_t
30781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_x(int16x8_t __a,mve_pred16_t __p)30782 __arm_vclsq_x (int16x8_t __a, mve_pred16_t __p)
30783 {
30784  return __arm_vclsq_x_s16 (__a, __p);
30785 }
30786 
30787 __extension__ extern __inline int32x4_t
30788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclsq_x(int32x4_t __a,mve_pred16_t __p)30789 __arm_vclsq_x (int32x4_t __a, mve_pred16_t __p)
30790 {
30791  return __arm_vclsq_x_s32 (__a, __p);
30792 }
30793 
30794 __extension__ extern __inline int8x16_t
30795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x(int8x16_t __a,mve_pred16_t __p)30796 __arm_vclzq_x (int8x16_t __a, mve_pred16_t __p)
30797 {
30798  return __arm_vclzq_x_s8 (__a, __p);
30799 }
30800 
30801 __extension__ extern __inline int16x8_t
30802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x(int16x8_t __a,mve_pred16_t __p)30803 __arm_vclzq_x (int16x8_t __a, mve_pred16_t __p)
30804 {
30805  return __arm_vclzq_x_s16 (__a, __p);
30806 }
30807 
30808 __extension__ extern __inline int32x4_t
30809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x(int32x4_t __a,mve_pred16_t __p)30810 __arm_vclzq_x (int32x4_t __a, mve_pred16_t __p)
30811 {
30812  return __arm_vclzq_x_s32 (__a, __p);
30813 }
30814 
30815 __extension__ extern __inline uint8x16_t
30816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x(uint8x16_t __a,mve_pred16_t __p)30817 __arm_vclzq_x (uint8x16_t __a, mve_pred16_t __p)
30818 {
30819  return __arm_vclzq_x_u8 (__a, __p);
30820 }
30821 
30822 __extension__ extern __inline uint16x8_t
30823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x(uint16x8_t __a,mve_pred16_t __p)30824 __arm_vclzq_x (uint16x8_t __a, mve_pred16_t __p)
30825 {
30826  return __arm_vclzq_x_u16 (__a, __p);
30827 }
30828 
30829 __extension__ extern __inline uint32x4_t
30830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vclzq_x(uint32x4_t __a,mve_pred16_t __p)30831 __arm_vclzq_x (uint32x4_t __a, mve_pred16_t __p)
30832 {
30833  return __arm_vclzq_x_u32 (__a, __p);
30834 }
30835 
30836 __extension__ extern __inline int8x16_t
30837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x(int8x16_t __a,mve_pred16_t __p)30838 __arm_vnegq_x (int8x16_t __a, mve_pred16_t __p)
30839 {
30840  return __arm_vnegq_x_s8 (__a, __p);
30841 }
30842 
30843 __extension__ extern __inline int16x8_t
30844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x(int16x8_t __a,mve_pred16_t __p)30845 __arm_vnegq_x (int16x8_t __a, mve_pred16_t __p)
30846 {
30847  return __arm_vnegq_x_s16 (__a, __p);
30848 }
30849 
30850 __extension__ extern __inline int32x4_t
30851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x(int32x4_t __a,mve_pred16_t __p)30852 __arm_vnegq_x (int32x4_t __a, mve_pred16_t __p)
30853 {
30854  return __arm_vnegq_x_s32 (__a, __p);
30855 }
30856 
30857 __extension__ extern __inline int8x16_t
30858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)30859 __arm_vmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30860 {
30861  return __arm_vmulhq_x_s8 (__a, __b, __p);
30862 }
30863 
30864 __extension__ extern __inline int16x8_t
30865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)30866 __arm_vmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30867 {
30868  return __arm_vmulhq_x_s16 (__a, __b, __p);
30869 }
30870 
30871 __extension__ extern __inline int32x4_t
30872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)30873 __arm_vmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30874 {
30875  return __arm_vmulhq_x_s32 (__a, __b, __p);
30876 }
30877 
30878 __extension__ extern __inline uint8x16_t
30879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)30880 __arm_vmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30881 {
30882  return __arm_vmulhq_x_u8 (__a, __b, __p);
30883 }
30884 
30885 __extension__ extern __inline uint16x8_t
30886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)30887 __arm_vmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30888 {
30889  return __arm_vmulhq_x_u16 (__a, __b, __p);
30890 }
30891 
30892 __extension__ extern __inline uint32x4_t
30893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulhq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)30894 __arm_vmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30895 {
30896  return __arm_vmulhq_x_u32 (__a, __b, __p);
30897 }
30898 
30899 __extension__ extern __inline uint16x8_t
30900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)30901 __arm_vmullbq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30902 {
30903  return __arm_vmullbq_poly_x_p8 (__a, __b, __p);
30904 }
30905 
30906 __extension__ extern __inline uint32x4_t
30907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_poly_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)30908 __arm_vmullbq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30909 {
30910  return __arm_vmullbq_poly_x_p16 (__a, __b, __p);
30911 }
30912 
30913 __extension__ extern __inline int16x8_t
30914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)30915 __arm_vmullbq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30916 {
30917  return __arm_vmullbq_int_x_s8 (__a, __b, __p);
30918 }
30919 
30920 __extension__ extern __inline int32x4_t
30921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)30922 __arm_vmullbq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30923 {
30924  return __arm_vmullbq_int_x_s16 (__a, __b, __p);
30925 }
30926 
30927 __extension__ extern __inline int64x2_t
30928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)30929 __arm_vmullbq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30930 {
30931  return __arm_vmullbq_int_x_s32 (__a, __b, __p);
30932 }
30933 
30934 __extension__ extern __inline uint16x8_t
30935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)30936 __arm_vmullbq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30937 {
30938  return __arm_vmullbq_int_x_u8 (__a, __b, __p);
30939 }
30940 
30941 __extension__ extern __inline uint32x4_t
30942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)30943 __arm_vmullbq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30944 {
30945  return __arm_vmullbq_int_x_u16 (__a, __b, __p);
30946 }
30947 
30948 __extension__ extern __inline uint64x2_t
30949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmullbq_int_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)30950 __arm_vmullbq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30951 {
30952  return __arm_vmullbq_int_x_u32 (__a, __b, __p);
30953 }
30954 
30955 __extension__ extern __inline uint16x8_t
30956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)30957 __arm_vmulltq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30958 {
30959  return __arm_vmulltq_poly_x_p8 (__a, __b, __p);
30960 }
30961 
30962 __extension__ extern __inline uint32x4_t
30963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_poly_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)30964 __arm_vmulltq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30965 {
30966  return __arm_vmulltq_poly_x_p16 (__a, __b, __p);
30967 }
30968 
30969 __extension__ extern __inline int16x8_t
30970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)30971 __arm_vmulltq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30972 {
30973  return __arm_vmulltq_int_x_s8 (__a, __b, __p);
30974 }
30975 
30976 __extension__ extern __inline int32x4_t
30977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)30978 __arm_vmulltq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30979 {
30980  return __arm_vmulltq_int_x_s16 (__a, __b, __p);
30981 }
30982 
30983 __extension__ extern __inline int64x2_t
30984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)30985 __arm_vmulltq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30986 {
30987  return __arm_vmulltq_int_x_s32 (__a, __b, __p);
30988 }
30989 
30990 __extension__ extern __inline uint16x8_t
30991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)30992 __arm_vmulltq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30993 {
30994  return __arm_vmulltq_int_x_u8 (__a, __b, __p);
30995 }
30996 
30997 __extension__ extern __inline uint32x4_t
30998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)30999 __arm_vmulltq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31000 {
31001  return __arm_vmulltq_int_x_u16 (__a, __b, __p);
31002 }
31003 
31004 __extension__ extern __inline uint64x2_t
31005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulltq_int_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31006 __arm_vmulltq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31007 {
31008  return __arm_vmulltq_int_x_u32 (__a, __b, __p);
31009 }
31010 
31011 __extension__ extern __inline int8x16_t
31012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31013 __arm_vmulq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31014 {
31015  return __arm_vmulq_x_s8 (__a, __b, __p);
31016 }
31017 
31018 __extension__ extern __inline int16x8_t
31019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31020 __arm_vmulq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31021 {
31022  return __arm_vmulq_x_s16 (__a, __b, __p);
31023 }
31024 
31025 __extension__ extern __inline int32x4_t
31026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31027 __arm_vmulq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31028 {
31029  return __arm_vmulq_x_s32 (__a, __b, __p);
31030 }
31031 
31032 __extension__ extern __inline int8x16_t
31033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(int8x16_t __a,int8_t __b,mve_pred16_t __p)31034 __arm_vmulq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
31035 {
31036  return __arm_vmulq_x_n_s8 (__a, __b, __p);
31037 }
31038 
31039 __extension__ extern __inline int16x8_t
31040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(int16x8_t __a,int16_t __b,mve_pred16_t __p)31041 __arm_vmulq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
31042 {
31043  return __arm_vmulq_x_n_s16 (__a, __b, __p);
31044 }
31045 
31046 __extension__ extern __inline int32x4_t
31047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(int32x4_t __a,int32_t __b,mve_pred16_t __p)31048 __arm_vmulq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31049 {
31050  return __arm_vmulq_x_n_s32 (__a, __b, __p);
31051 }
31052 
31053 __extension__ extern __inline uint8x16_t
31054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31055 __arm_vmulq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31056 {
31057  return __arm_vmulq_x_u8 (__a, __b, __p);
31058 }
31059 
31060 __extension__ extern __inline uint16x8_t
31061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31062 __arm_vmulq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31063 {
31064  return __arm_vmulq_x_u16 (__a, __b, __p);
31065 }
31066 
31067 __extension__ extern __inline uint32x4_t
31068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31069 __arm_vmulq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31070 {
31071  return __arm_vmulq_x_u32 (__a, __b, __p);
31072 }
31073 
31074 __extension__ extern __inline uint8x16_t
31075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)31076 __arm_vmulq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
31077 {
31078  return __arm_vmulq_x_n_u8 (__a, __b, __p);
31079 }
31080 
31081 __extension__ extern __inline uint16x8_t
31082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)31083 __arm_vmulq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
31084 {
31085  return __arm_vmulq_x_n_u16 (__a, __b, __p);
31086 }
31087 
31088 __extension__ extern __inline uint32x4_t
31089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)31090 __arm_vmulq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
31091 {
31092  return __arm_vmulq_x_n_u32 (__a, __b, __p);
31093 }
31094 
31095 __extension__ extern __inline int8x16_t
31096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31097 __arm_vsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31098 {
31099  return __arm_vsubq_x_s8 (__a, __b, __p);
31100 }
31101 
31102 __extension__ extern __inline int16x8_t
31103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31104 __arm_vsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31105 {
31106  return __arm_vsubq_x_s16 (__a, __b, __p);
31107 }
31108 
31109 __extension__ extern __inline int32x4_t
31110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31111 __arm_vsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31112 {
31113  return __arm_vsubq_x_s32 (__a, __b, __p);
31114 }
31115 
31116 __extension__ extern __inline int8x16_t
31117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(int8x16_t __a,int8_t __b,mve_pred16_t __p)31118 __arm_vsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
31119 {
31120  return __arm_vsubq_x_n_s8 (__a, __b, __p);
31121 }
31122 
31123 __extension__ extern __inline int16x8_t
31124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(int16x8_t __a,int16_t __b,mve_pred16_t __p)31125 __arm_vsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
31126 {
31127  return __arm_vsubq_x_n_s16 (__a, __b, __p);
31128 }
31129 
31130 __extension__ extern __inline int32x4_t
31131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(int32x4_t __a,int32_t __b,mve_pred16_t __p)31132 __arm_vsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31133 {
31134  return __arm_vsubq_x_n_s32 (__a, __b, __p);
31135 }
31136 
31137 __extension__ extern __inline uint8x16_t
31138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31139 __arm_vsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31140 {
31141  return __arm_vsubq_x_u8 (__a, __b, __p);
31142 }
31143 
31144 __extension__ extern __inline uint16x8_t
31145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31146 __arm_vsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31147 {
31148  return __arm_vsubq_x_u16 (__a, __b, __p);
31149 }
31150 
31151 __extension__ extern __inline uint32x4_t
31152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31153 __arm_vsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31154 {
31155  return __arm_vsubq_x_u32 (__a, __b, __p);
31156 }
31157 
31158 __extension__ extern __inline uint8x16_t
31159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)31160 __arm_vsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
31161 {
31162  return __arm_vsubq_x_n_u8 (__a, __b, __p);
31163 }
31164 
31165 __extension__ extern __inline uint16x8_t
31166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)31167 __arm_vsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
31168 {
31169  return __arm_vsubq_x_n_u16 (__a, __b, __p);
31170 }
31171 
31172 __extension__ extern __inline uint32x4_t
31173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)31174 __arm_vsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
31175 {
31176  return __arm_vsubq_x_n_u32 (__a, __b, __p);
31177 }
31178 
31179 __extension__ extern __inline int8x16_t
31180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31181 __arm_vcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31182 {
31183  return __arm_vcaddq_rot90_x_s8 (__a, __b, __p);
31184 }
31185 
31186 __extension__ extern __inline int16x8_t
31187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31188 __arm_vcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31189 {
31190  return __arm_vcaddq_rot90_x_s16 (__a, __b, __p);
31191 }
31192 
31193 __extension__ extern __inline int32x4_t
31194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31195 __arm_vcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31196 {
31197  return __arm_vcaddq_rot90_x_s32 (__a, __b, __p);
31198 }
31199 
31200 __extension__ extern __inline uint8x16_t
31201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31202 __arm_vcaddq_rot90_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31203 {
31204  return __arm_vcaddq_rot90_x_u8 (__a, __b, __p);
31205 }
31206 
31207 __extension__ extern __inline uint16x8_t
31208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31209 __arm_vcaddq_rot90_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31210 {
31211  return __arm_vcaddq_rot90_x_u16 (__a, __b, __p);
31212 }
31213 
31214 __extension__ extern __inline uint32x4_t
31215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31216 __arm_vcaddq_rot90_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31217 {
31218  return __arm_vcaddq_rot90_x_u32 (__a, __b, __p);
31219 }
31220 
31221 __extension__ extern __inline int8x16_t
31222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31223 __arm_vcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31224 {
31225  return __arm_vcaddq_rot270_x_s8 (__a, __b, __p);
31226 }
31227 
31228 __extension__ extern __inline int16x8_t
31229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31230 __arm_vcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31231 {
31232  return __arm_vcaddq_rot270_x_s16 (__a, __b, __p);
31233 }
31234 
31235 __extension__ extern __inline int32x4_t
31236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31237 __arm_vcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31238 {
31239  return __arm_vcaddq_rot270_x_s32 (__a, __b, __p);
31240 }
31241 
31242 __extension__ extern __inline uint8x16_t
31243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31244 __arm_vcaddq_rot270_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31245 {
31246  return __arm_vcaddq_rot270_x_u8 (__a, __b, __p);
31247 }
31248 
31249 __extension__ extern __inline uint16x8_t
31250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31251 __arm_vcaddq_rot270_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31252 {
31253  return __arm_vcaddq_rot270_x_u16 (__a, __b, __p);
31254 }
31255 
31256 __extension__ extern __inline uint32x4_t
31257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31258 __arm_vcaddq_rot270_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31259 {
31260  return __arm_vcaddq_rot270_x_u32 (__a, __b, __p);
31261 }
31262 
31263 __extension__ extern __inline int8x16_t
31264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(int8x16_t __a,int8_t __b,mve_pred16_t __p)31265 __arm_vhaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
31266 {
31267  return __arm_vhaddq_x_n_s8 (__a, __b, __p);
31268 }
31269 
31270 __extension__ extern __inline int16x8_t
31271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(int16x8_t __a,int16_t __b,mve_pred16_t __p)31272 __arm_vhaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
31273 {
31274  return __arm_vhaddq_x_n_s16 (__a, __b, __p);
31275 }
31276 
31277 __extension__ extern __inline int32x4_t
31278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(int32x4_t __a,int32_t __b,mve_pred16_t __p)31279 __arm_vhaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31280 {
31281  return __arm_vhaddq_x_n_s32 (__a, __b, __p);
31282 }
31283 
31284 __extension__ extern __inline uint8x16_t
31285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)31286 __arm_vhaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
31287 {
31288  return __arm_vhaddq_x_n_u8 (__a, __b, __p);
31289 }
31290 
31291 __extension__ extern __inline uint16x8_t
31292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)31293 __arm_vhaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
31294 {
31295  return __arm_vhaddq_x_n_u16 (__a, __b, __p);
31296 }
31297 
31298 __extension__ extern __inline uint32x4_t
31299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)31300 __arm_vhaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
31301 {
31302  return __arm_vhaddq_x_n_u32 (__a, __b, __p);
31303 }
31304 
31305 __extension__ extern __inline int8x16_t
31306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31307 __arm_vhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31308 {
31309  return __arm_vhaddq_x_s8 (__a, __b, __p);
31310 }
31311 
31312 __extension__ extern __inline int16x8_t
31313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31314 __arm_vhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31315 {
31316  return __arm_vhaddq_x_s16 (__a, __b, __p);
31317 }
31318 
31319 __extension__ extern __inline int32x4_t
31320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31321 __arm_vhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31322 {
31323  return __arm_vhaddq_x_s32 (__a, __b, __p);
31324 }
31325 
31326 __extension__ extern __inline uint8x16_t
31327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31328 __arm_vhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31329 {
31330  return __arm_vhaddq_x_u8 (__a, __b, __p);
31331 }
31332 
31333 __extension__ extern __inline uint16x8_t
31334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31335 __arm_vhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31336 {
31337  return __arm_vhaddq_x_u16 (__a, __b, __p);
31338 }
31339 
31340 __extension__ extern __inline uint32x4_t
31341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhaddq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31342 __arm_vhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31343 {
31344  return __arm_vhaddq_x_u32 (__a, __b, __p);
31345 }
31346 
31347 __extension__ extern __inline int8x16_t
31348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31349 __arm_vhcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31350 {
31351  return __arm_vhcaddq_rot90_x_s8 (__a, __b, __p);
31352 }
31353 
31354 __extension__ extern __inline int16x8_t
31355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31356 __arm_vhcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31357 {
31358  return __arm_vhcaddq_rot90_x_s16 (__a, __b, __p);
31359 }
31360 
31361 __extension__ extern __inline int32x4_t
31362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot90_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31363 __arm_vhcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31364 {
31365  return __arm_vhcaddq_rot90_x_s32 (__a, __b, __p);
31366 }
31367 
31368 __extension__ extern __inline int8x16_t
31369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31370 __arm_vhcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31371 {
31372  return __arm_vhcaddq_rot270_x_s8 (__a, __b, __p);
31373 }
31374 
31375 __extension__ extern __inline int16x8_t
31376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31377 __arm_vhcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31378 {
31379  return __arm_vhcaddq_rot270_x_s16 (__a, __b, __p);
31380 }
31381 
31382 __extension__ extern __inline int32x4_t
31383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhcaddq_rot270_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31384 __arm_vhcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31385 {
31386  return __arm_vhcaddq_rot270_x_s32 (__a, __b, __p);
31387 }
31388 
31389 __extension__ extern __inline int8x16_t
31390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(int8x16_t __a,int8_t __b,mve_pred16_t __p)31391 __arm_vhsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
31392 {
31393  return __arm_vhsubq_x_n_s8 (__a, __b, __p);
31394 }
31395 
31396 __extension__ extern __inline int16x8_t
31397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(int16x8_t __a,int16_t __b,mve_pred16_t __p)31398 __arm_vhsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
31399 {
31400  return __arm_vhsubq_x_n_s16 (__a, __b, __p);
31401 }
31402 
31403 __extension__ extern __inline int32x4_t
31404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(int32x4_t __a,int32_t __b,mve_pred16_t __p)31405 __arm_vhsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31406 {
31407  return __arm_vhsubq_x_n_s32 (__a, __b, __p);
31408 }
31409 
31410 __extension__ extern __inline uint8x16_t
31411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(uint8x16_t __a,uint8_t __b,mve_pred16_t __p)31412 __arm_vhsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
31413 {
31414  return __arm_vhsubq_x_n_u8 (__a, __b, __p);
31415 }
31416 
31417 __extension__ extern __inline uint16x8_t
31418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(uint16x8_t __a,uint16_t __b,mve_pred16_t __p)31419 __arm_vhsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
31420 {
31421  return __arm_vhsubq_x_n_u16 (__a, __b, __p);
31422 }
31423 
31424 __extension__ extern __inline uint32x4_t
31425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(uint32x4_t __a,uint32_t __b,mve_pred16_t __p)31426 __arm_vhsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
31427 {
31428  return __arm_vhsubq_x_n_u32 (__a, __b, __p);
31429 }
31430 
31431 __extension__ extern __inline int8x16_t
31432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31433 __arm_vhsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31434 {
31435  return __arm_vhsubq_x_s8 (__a, __b, __p);
31436 }
31437 
31438 __extension__ extern __inline int16x8_t
31439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31440 __arm_vhsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31441 {
31442  return __arm_vhsubq_x_s16 (__a, __b, __p);
31443 }
31444 
31445 __extension__ extern __inline int32x4_t
31446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31447 __arm_vhsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31448 {
31449  return __arm_vhsubq_x_s32 (__a, __b, __p);
31450 }
31451 
31452 __extension__ extern __inline uint8x16_t
31453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31454 __arm_vhsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31455 {
31456  return __arm_vhsubq_x_u8 (__a, __b, __p);
31457 }
31458 
31459 __extension__ extern __inline uint16x8_t
31460 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31461 __arm_vhsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31462 {
31463  return __arm_vhsubq_x_u16 (__a, __b, __p);
31464 }
31465 
31466 __extension__ extern __inline uint32x4_t
31467 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vhsubq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31468 __arm_vhsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31469 {
31470  return __arm_vhsubq_x_u32 (__a, __b, __p);
31471 }
31472 
31473 __extension__ extern __inline int8x16_t
31474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31475 __arm_vrhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31476 {
31477  return __arm_vrhaddq_x_s8 (__a, __b, __p);
31478 }
31479 
31480 __extension__ extern __inline int16x8_t
31481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31482 __arm_vrhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31483 {
31484  return __arm_vrhaddq_x_s16 (__a, __b, __p);
31485 }
31486 
31487 __extension__ extern __inline int32x4_t
31488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31489 __arm_vrhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31490 {
31491  return __arm_vrhaddq_x_s32 (__a, __b, __p);
31492 }
31493 
31494 __extension__ extern __inline uint8x16_t
31495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31496 __arm_vrhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31497 {
31498  return __arm_vrhaddq_x_u8 (__a, __b, __p);
31499 }
31500 
31501 __extension__ extern __inline uint16x8_t
31502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31503 __arm_vrhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31504 {
31505  return __arm_vrhaddq_x_u16 (__a, __b, __p);
31506 }
31507 
31508 __extension__ extern __inline uint32x4_t
31509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrhaddq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31510 __arm_vrhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31511 {
31512  return __arm_vrhaddq_x_u32 (__a, __b, __p);
31513 }
31514 
31515 __extension__ extern __inline int8x16_t
31516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31517 __arm_vrmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31518 {
31519  return __arm_vrmulhq_x_s8 (__a, __b, __p);
31520 }
31521 
31522 __extension__ extern __inline int16x8_t
31523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31524 __arm_vrmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31525 {
31526  return __arm_vrmulhq_x_s16 (__a, __b, __p);
31527 }
31528 
31529 __extension__ extern __inline int32x4_t
31530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31531 __arm_vrmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31532 {
31533  return __arm_vrmulhq_x_s32 (__a, __b, __p);
31534 }
31535 
31536 __extension__ extern __inline uint8x16_t
31537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31538 __arm_vrmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31539 {
31540  return __arm_vrmulhq_x_u8 (__a, __b, __p);
31541 }
31542 
31543 __extension__ extern __inline uint16x8_t
31544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31545 __arm_vrmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31546 {
31547  return __arm_vrmulhq_x_u16 (__a, __b, __p);
31548 }
31549 
31550 __extension__ extern __inline uint32x4_t
31551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrmulhq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31552 __arm_vrmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31553 {
31554  return __arm_vrmulhq_x_u32 (__a, __b, __p);
31555 }
31556 
31557 __extension__ extern __inline int8x16_t
31558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31559 __arm_vandq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31560 {
31561  return __arm_vandq_x_s8 (__a, __b, __p);
31562 }
31563 
31564 __extension__ extern __inline int16x8_t
31565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31566 __arm_vandq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31567 {
31568  return __arm_vandq_x_s16 (__a, __b, __p);
31569 }
31570 
31571 __extension__ extern __inline int32x4_t
31572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31573 __arm_vandq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31574 {
31575  return __arm_vandq_x_s32 (__a, __b, __p);
31576 }
31577 
31578 __extension__ extern __inline uint8x16_t
31579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31580 __arm_vandq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31581 {
31582  return __arm_vandq_x_u8 (__a, __b, __p);
31583 }
31584 
31585 __extension__ extern __inline uint16x8_t
31586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31587 __arm_vandq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31588 {
31589  return __arm_vandq_x_u16 (__a, __b, __p);
31590 }
31591 
31592 __extension__ extern __inline uint32x4_t
31593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31594 __arm_vandq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31595 {
31596  return __arm_vandq_x_u32 (__a, __b, __p);
31597 }
31598 
31599 __extension__ extern __inline int8x16_t
31600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31601 __arm_vbicq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31602 {
31603  return __arm_vbicq_x_s8 (__a, __b, __p);
31604 }
31605 
31606 __extension__ extern __inline int16x8_t
31607 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31608 __arm_vbicq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31609 {
31610  return __arm_vbicq_x_s16 (__a, __b, __p);
31611 }
31612 
31613 __extension__ extern __inline int32x4_t
31614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31615 __arm_vbicq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31616 {
31617  return __arm_vbicq_x_s32 (__a, __b, __p);
31618 }
31619 
31620 __extension__ extern __inline uint8x16_t
31621 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31622 __arm_vbicq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31623 {
31624  return __arm_vbicq_x_u8 (__a, __b, __p);
31625 }
31626 
31627 __extension__ extern __inline uint16x8_t
31628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31629 __arm_vbicq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31630 {
31631  return __arm_vbicq_x_u16 (__a, __b, __p);
31632 }
31633 
31634 __extension__ extern __inline uint32x4_t
31635 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31636 __arm_vbicq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31637 {
31638  return __arm_vbicq_x_u32 (__a, __b, __p);
31639 }
31640 
31641 __extension__ extern __inline int8x16_t
31642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x(int8x16_t __a,int32_t __b,mve_pred16_t __p)31643 __arm_vbrsrq_x (int8x16_t __a, int32_t __b, mve_pred16_t __p)
31644 {
31645  return __arm_vbrsrq_x_n_s8 (__a, __b, __p);
31646 }
31647 
31648 __extension__ extern __inline int16x8_t
31649 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x(int16x8_t __a,int32_t __b,mve_pred16_t __p)31650 __arm_vbrsrq_x (int16x8_t __a, int32_t __b, mve_pred16_t __p)
31651 {
31652  return __arm_vbrsrq_x_n_s16 (__a, __b, __p);
31653 }
31654 
31655 __extension__ extern __inline int32x4_t
31656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x(int32x4_t __a,int32_t __b,mve_pred16_t __p)31657 __arm_vbrsrq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31658 {
31659  return __arm_vbrsrq_x_n_s32 (__a, __b, __p);
31660 }
31661 
31662 __extension__ extern __inline uint8x16_t
31663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x(uint8x16_t __a,int32_t __b,mve_pred16_t __p)31664 __arm_vbrsrq_x (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
31665 {
31666  return __arm_vbrsrq_x_n_u8 (__a, __b, __p);
31667 }
31668 
31669 __extension__ extern __inline uint16x8_t
31670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x(uint16x8_t __a,int32_t __b,mve_pred16_t __p)31671 __arm_vbrsrq_x (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
31672 {
31673  return __arm_vbrsrq_x_n_u16 (__a, __b, __p);
31674 }
31675 
31676 __extension__ extern __inline uint32x4_t
31677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x(uint32x4_t __a,int32_t __b,mve_pred16_t __p)31678 __arm_vbrsrq_x (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
31679 {
31680  return __arm_vbrsrq_x_n_u32 (__a, __b, __p);
31681 }
31682 
31683 __extension__ extern __inline int8x16_t
31684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31685 __arm_veorq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31686 {
31687  return __arm_veorq_x_s8 (__a, __b, __p);
31688 }
31689 
31690 __extension__ extern __inline int16x8_t
31691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31692 __arm_veorq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31693 {
31694  return __arm_veorq_x_s16 (__a, __b, __p);
31695 }
31696 
31697 __extension__ extern __inline int32x4_t
31698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31699 __arm_veorq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31700 {
31701  return __arm_veorq_x_s32 (__a, __b, __p);
31702 }
31703 
31704 __extension__ extern __inline uint8x16_t
31705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31706 __arm_veorq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31707 {
31708  return __arm_veorq_x_u8 (__a, __b, __p);
31709 }
31710 
31711 __extension__ extern __inline uint16x8_t
31712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31713 __arm_veorq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31714 {
31715  return __arm_veorq_x_u16 (__a, __b, __p);
31716 }
31717 
31718 __extension__ extern __inline uint32x4_t
31719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31720 __arm_veorq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31721 {
31722  return __arm_veorq_x_u32 (__a, __b, __p);
31723 }
31724 
31725 __extension__ extern __inline int16x8_t
31726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_x(int8x16_t __a,mve_pred16_t __p)31727 __arm_vmovlbq_x (int8x16_t __a, mve_pred16_t __p)
31728 {
31729  return __arm_vmovlbq_x_s8 (__a, __p);
31730 }
31731 
31732 __extension__ extern __inline int32x4_t
31733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_x(int16x8_t __a,mve_pred16_t __p)31734 __arm_vmovlbq_x (int16x8_t __a, mve_pred16_t __p)
31735 {
31736  return __arm_vmovlbq_x_s16 (__a, __p);
31737 }
31738 
31739 __extension__ extern __inline uint16x8_t
31740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_x(uint8x16_t __a,mve_pred16_t __p)31741 __arm_vmovlbq_x (uint8x16_t __a, mve_pred16_t __p)
31742 {
31743  return __arm_vmovlbq_x_u8 (__a, __p);
31744 }
31745 
31746 __extension__ extern __inline uint32x4_t
31747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovlbq_x(uint16x8_t __a,mve_pred16_t __p)31748 __arm_vmovlbq_x (uint16x8_t __a, mve_pred16_t __p)
31749 {
31750  return __arm_vmovlbq_x_u16 (__a, __p);
31751 }
31752 
31753 __extension__ extern __inline int16x8_t
31754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_x(int8x16_t __a,mve_pred16_t __p)31755 __arm_vmovltq_x (int8x16_t __a, mve_pred16_t __p)
31756 {
31757  return __arm_vmovltq_x_s8 (__a, __p);
31758 }
31759 
31760 __extension__ extern __inline int32x4_t
31761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_x(int16x8_t __a,mve_pred16_t __p)31762 __arm_vmovltq_x (int16x8_t __a, mve_pred16_t __p)
31763 {
31764  return __arm_vmovltq_x_s16 (__a, __p);
31765 }
31766 
31767 __extension__ extern __inline uint16x8_t
31768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_x(uint8x16_t __a,mve_pred16_t __p)31769 __arm_vmovltq_x (uint8x16_t __a, mve_pred16_t __p)
31770 {
31771  return __arm_vmovltq_x_u8 (__a, __p);
31772 }
31773 
31774 __extension__ extern __inline uint32x4_t
31775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmovltq_x(uint16x8_t __a,mve_pred16_t __p)31776 __arm_vmovltq_x (uint16x8_t __a, mve_pred16_t __p)
31777 {
31778  return __arm_vmovltq_x_u16 (__a, __p);
31779 }
31780 
31781 __extension__ extern __inline int8x16_t
31782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x(int8x16_t __a,mve_pred16_t __p)31783 __arm_vmvnq_x (int8x16_t __a, mve_pred16_t __p)
31784 {
31785  return __arm_vmvnq_x_s8 (__a, __p);
31786 }
31787 
31788 __extension__ extern __inline int16x8_t
31789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x(int16x8_t __a,mve_pred16_t __p)31790 __arm_vmvnq_x (int16x8_t __a, mve_pred16_t __p)
31791 {
31792  return __arm_vmvnq_x_s16 (__a, __p);
31793 }
31794 
31795 __extension__ extern __inline int32x4_t
31796 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x(int32x4_t __a,mve_pred16_t __p)31797 __arm_vmvnq_x (int32x4_t __a, mve_pred16_t __p)
31798 {
31799  return __arm_vmvnq_x_s32 (__a, __p);
31800 }
31801 
31802 __extension__ extern __inline uint8x16_t
31803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x(uint8x16_t __a,mve_pred16_t __p)31804 __arm_vmvnq_x (uint8x16_t __a, mve_pred16_t __p)
31805 {
31806  return __arm_vmvnq_x_u8 (__a, __p);
31807 }
31808 
31809 __extension__ extern __inline uint16x8_t
31810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x(uint16x8_t __a,mve_pred16_t __p)31811 __arm_vmvnq_x (uint16x8_t __a, mve_pred16_t __p)
31812 {
31813  return __arm_vmvnq_x_u16 (__a, __p);
31814 }
31815 
31816 __extension__ extern __inline uint32x4_t
31817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmvnq_x(uint32x4_t __a,mve_pred16_t __p)31818 __arm_vmvnq_x (uint32x4_t __a, mve_pred16_t __p)
31819 {
31820  return __arm_vmvnq_x_u32 (__a, __p);
31821 }
31822 
31823 __extension__ extern __inline int8x16_t
31824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31825 __arm_vornq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31826 {
31827  return __arm_vornq_x_s8 (__a, __b, __p);
31828 }
31829 
31830 __extension__ extern __inline int16x8_t
31831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31832 __arm_vornq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31833 {
31834  return __arm_vornq_x_s16 (__a, __b, __p);
31835 }
31836 
31837 __extension__ extern __inline int32x4_t
31838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31839 __arm_vornq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31840 {
31841  return __arm_vornq_x_s32 (__a, __b, __p);
31842 }
31843 
31844 __extension__ extern __inline uint8x16_t
31845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31846 __arm_vornq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31847 {
31848  return __arm_vornq_x_u8 (__a, __b, __p);
31849 }
31850 
31851 __extension__ extern __inline uint16x8_t
31852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31853 __arm_vornq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31854 {
31855  return __arm_vornq_x_u16 (__a, __b, __p);
31856 }
31857 
31858 __extension__ extern __inline uint32x4_t
31859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31860 __arm_vornq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31861 {
31862  return __arm_vornq_x_u32 (__a, __b, __p);
31863 }
31864 
31865 __extension__ extern __inline int8x16_t
31866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31867 __arm_vorrq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31868 {
31869  return __arm_vorrq_x_s8 (__a, __b, __p);
31870 }
31871 
31872 __extension__ extern __inline int16x8_t
31873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)31874 __arm_vorrq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31875 {
31876  return __arm_vorrq_x_s16 (__a, __b, __p);
31877 }
31878 
31879 __extension__ extern __inline int32x4_t
31880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)31881 __arm_vorrq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31882 {
31883  return __arm_vorrq_x_s32 (__a, __b, __p);
31884 }
31885 
31886 __extension__ extern __inline uint8x16_t
31887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x(uint8x16_t __a,uint8x16_t __b,mve_pred16_t __p)31888 __arm_vorrq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31889 {
31890  return __arm_vorrq_x_u8 (__a, __b, __p);
31891 }
31892 
31893 __extension__ extern __inline uint16x8_t
31894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x(uint16x8_t __a,uint16x8_t __b,mve_pred16_t __p)31895 __arm_vorrq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31896 {
31897  return __arm_vorrq_x_u16 (__a, __b, __p);
31898 }
31899 
31900 __extension__ extern __inline uint32x4_t
31901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x(uint32x4_t __a,uint32x4_t __b,mve_pred16_t __p)31902 __arm_vorrq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31903 {
31904  return __arm_vorrq_x_u32 (__a, __b, __p);
31905 }
31906 
31907 __extension__ extern __inline int8x16_t
31908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_x(int8x16_t __a,mve_pred16_t __p)31909 __arm_vrev16q_x (int8x16_t __a, mve_pred16_t __p)
31910 {
31911  return __arm_vrev16q_x_s8 (__a, __p);
31912 }
31913 
31914 __extension__ extern __inline uint8x16_t
31915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev16q_x(uint8x16_t __a,mve_pred16_t __p)31916 __arm_vrev16q_x (uint8x16_t __a, mve_pred16_t __p)
31917 {
31918  return __arm_vrev16q_x_u8 (__a, __p);
31919 }
31920 
31921 __extension__ extern __inline int8x16_t
31922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x(int8x16_t __a,mve_pred16_t __p)31923 __arm_vrev32q_x (int8x16_t __a, mve_pred16_t __p)
31924 {
31925  return __arm_vrev32q_x_s8 (__a, __p);
31926 }
31927 
31928 __extension__ extern __inline int16x8_t
31929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x(int16x8_t __a,mve_pred16_t __p)31930 __arm_vrev32q_x (int16x8_t __a, mve_pred16_t __p)
31931 {
31932  return __arm_vrev32q_x_s16 (__a, __p);
31933 }
31934 
31935 __extension__ extern __inline uint8x16_t
31936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x(uint8x16_t __a,mve_pred16_t __p)31937 __arm_vrev32q_x (uint8x16_t __a, mve_pred16_t __p)
31938 {
31939  return __arm_vrev32q_x_u8 (__a, __p);
31940 }
31941 
31942 __extension__ extern __inline uint16x8_t
31943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x(uint16x8_t __a,mve_pred16_t __p)31944 __arm_vrev32q_x (uint16x8_t __a, mve_pred16_t __p)
31945 {
31946  return __arm_vrev32q_x_u16 (__a, __p);
31947 }
31948 
31949 __extension__ extern __inline int8x16_t
31950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x(int8x16_t __a,mve_pred16_t __p)31951 __arm_vrev64q_x (int8x16_t __a, mve_pred16_t __p)
31952 {
31953  return __arm_vrev64q_x_s8 (__a, __p);
31954 }
31955 
31956 __extension__ extern __inline int16x8_t
31957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x(int16x8_t __a,mve_pred16_t __p)31958 __arm_vrev64q_x (int16x8_t __a, mve_pred16_t __p)
31959 {
31960  return __arm_vrev64q_x_s16 (__a, __p);
31961 }
31962 
31963 __extension__ extern __inline int32x4_t
31964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x(int32x4_t __a,mve_pred16_t __p)31965 __arm_vrev64q_x (int32x4_t __a, mve_pred16_t __p)
31966 {
31967  return __arm_vrev64q_x_s32 (__a, __p);
31968 }
31969 
31970 __extension__ extern __inline uint8x16_t
31971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x(uint8x16_t __a,mve_pred16_t __p)31972 __arm_vrev64q_x (uint8x16_t __a, mve_pred16_t __p)
31973 {
31974  return __arm_vrev64q_x_u8 (__a, __p);
31975 }
31976 
31977 __extension__ extern __inline uint16x8_t
31978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x(uint16x8_t __a,mve_pred16_t __p)31979 __arm_vrev64q_x (uint16x8_t __a, mve_pred16_t __p)
31980 {
31981  return __arm_vrev64q_x_u16 (__a, __p);
31982 }
31983 
31984 __extension__ extern __inline uint32x4_t
31985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x(uint32x4_t __a,mve_pred16_t __p)31986 __arm_vrev64q_x (uint32x4_t __a, mve_pred16_t __p)
31987 {
31988  return __arm_vrev64q_x_u32 (__a, __p);
31989 }
31990 
31991 __extension__ extern __inline int8x16_t
31992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)31993 __arm_vrshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31994 {
31995  return __arm_vrshlq_x_s8 (__a, __b, __p);
31996 }
31997 
31998 __extension__ extern __inline int16x8_t
31999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)32000 __arm_vrshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
32001 {
32002  return __arm_vrshlq_x_s16 (__a, __b, __p);
32003 }
32004 
32005 __extension__ extern __inline int32x4_t
32006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)32007 __arm_vrshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
32008 {
32009  return __arm_vrshlq_x_s32 (__a, __b, __p);
32010 }
32011 
32012 __extension__ extern __inline uint8x16_t
32013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x(uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)32014 __arm_vrshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
32015 {
32016  return __arm_vrshlq_x_u8 (__a, __b, __p);
32017 }
32018 
32019 __extension__ extern __inline uint16x8_t
32020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x(uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)32021 __arm_vrshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
32022 {
32023  return __arm_vrshlq_x_u16 (__a, __b, __p);
32024 }
32025 
32026 __extension__ extern __inline uint32x4_t
32027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshlq_x(uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)32028 __arm_vrshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
32029 {
32030  return __arm_vrshlq_x_u32 (__a, __b, __p);
32031 }
32032 
32033 __extension__ extern __inline int16x8_t
32034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x(int8x16_t __a,const int __imm,mve_pred16_t __p)32035 __arm_vshllbq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
32036 {
32037  return __arm_vshllbq_x_n_s8 (__a, __imm, __p);
32038 }
32039 
32040 __extension__ extern __inline int32x4_t
32041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x(int16x8_t __a,const int __imm,mve_pred16_t __p)32042 __arm_vshllbq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
32043 {
32044  return __arm_vshllbq_x_n_s16 (__a, __imm, __p);
32045 }
32046 
32047 __extension__ extern __inline uint16x8_t
32048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x(uint8x16_t __a,const int __imm,mve_pred16_t __p)32049 __arm_vshllbq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32050 {
32051  return __arm_vshllbq_x_n_u8 (__a, __imm, __p);
32052 }
32053 
32054 __extension__ extern __inline uint32x4_t
32055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshllbq_x(uint16x8_t __a,const int __imm,mve_pred16_t __p)32056 __arm_vshllbq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32057 {
32058  return __arm_vshllbq_x_n_u16 (__a, __imm, __p);
32059 }
32060 
32061 __extension__ extern __inline int16x8_t
32062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_x(int8x16_t __a,const int __imm,mve_pred16_t __p)32063 __arm_vshlltq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
32064 {
32065  return __arm_vshlltq_x_n_s8 (__a, __imm, __p);
32066 }
32067 
32068 __extension__ extern __inline int32x4_t
32069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_x(int16x8_t __a,const int __imm,mve_pred16_t __p)32070 __arm_vshlltq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
32071 {
32072  return __arm_vshlltq_x_n_s16 (__a, __imm, __p);
32073 }
32074 
32075 __extension__ extern __inline uint16x8_t
32076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_x(uint8x16_t __a,const int __imm,mve_pred16_t __p)32077 __arm_vshlltq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32078 {
32079  return __arm_vshlltq_x_n_u8 (__a, __imm, __p);
32080 }
32081 
32082 __extension__ extern __inline uint32x4_t
32083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlltq_x(uint16x8_t __a,const int __imm,mve_pred16_t __p)32084 __arm_vshlltq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32085 {
32086  return __arm_vshlltq_x_n_u16 (__a, __imm, __p);
32087 }
32088 
32089 __extension__ extern __inline int8x16_t
32090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x(int8x16_t __a,int8x16_t __b,mve_pred16_t __p)32091 __arm_vshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
32092 {
32093  return __arm_vshlq_x_s8 (__a, __b, __p);
32094 }
32095 
32096 __extension__ extern __inline int16x8_t
32097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x(int16x8_t __a,int16x8_t __b,mve_pred16_t __p)32098 __arm_vshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
32099 {
32100  return __arm_vshlq_x_s16 (__a, __b, __p);
32101 }
32102 
32103 __extension__ extern __inline int32x4_t
32104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x(int32x4_t __a,int32x4_t __b,mve_pred16_t __p)32105 __arm_vshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
32106 {
32107  return __arm_vshlq_x_s32 (__a, __b, __p);
32108 }
32109 
32110 __extension__ extern __inline uint8x16_t
32111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x(uint8x16_t __a,int8x16_t __b,mve_pred16_t __p)32112 __arm_vshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
32113 {
32114  return __arm_vshlq_x_u8 (__a, __b, __p);
32115 }
32116 
32117 __extension__ extern __inline uint16x8_t
32118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x(uint16x8_t __a,int16x8_t __b,mve_pred16_t __p)32119 __arm_vshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
32120 {
32121  return __arm_vshlq_x_u16 (__a, __b, __p);
32122 }
32123 
32124 __extension__ extern __inline uint32x4_t
32125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x(uint32x4_t __a,int32x4_t __b,mve_pred16_t __p)32126 __arm_vshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
32127 {
32128  return __arm_vshlq_x_u32 (__a, __b, __p);
32129 }
32130 
32131 __extension__ extern __inline int8x16_t
32132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n(int8x16_t __a,const int __imm,mve_pred16_t __p)32133 __arm_vshlq_x_n (int8x16_t __a, const int __imm, mve_pred16_t __p)
32134 {
32135  return __arm_vshlq_x_n_s8 (__a, __imm, __p);
32136 }
32137 
32138 __extension__ extern __inline int16x8_t
32139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n(int16x8_t __a,const int __imm,mve_pred16_t __p)32140 __arm_vshlq_x_n (int16x8_t __a, const int __imm, mve_pred16_t __p)
32141 {
32142  return __arm_vshlq_x_n_s16 (__a, __imm, __p);
32143 }
32144 
32145 __extension__ extern __inline int32x4_t
32146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n(int32x4_t __a,const int __imm,mve_pred16_t __p)32147 __arm_vshlq_x_n (int32x4_t __a, const int __imm, mve_pred16_t __p)
32148 {
32149  return __arm_vshlq_x_n_s32 (__a, __imm, __p);
32150 }
32151 
32152 __extension__ extern __inline uint8x16_t
32153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n(uint8x16_t __a,const int __imm,mve_pred16_t __p)32154 __arm_vshlq_x_n (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32155 {
32156  return __arm_vshlq_x_n_u8 (__a, __imm, __p);
32157 }
32158 
32159 __extension__ extern __inline uint16x8_t
32160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n(uint16x8_t __a,const int __imm,mve_pred16_t __p)32161 __arm_vshlq_x_n (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32162 {
32163  return __arm_vshlq_x_n_u16 (__a, __imm, __p);
32164 }
32165 
32166 __extension__ extern __inline uint32x4_t
32167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlq_x_n(uint32x4_t __a,const int __imm,mve_pred16_t __p)32168 __arm_vshlq_x_n (uint32x4_t __a, const int __imm, mve_pred16_t __p)
32169 {
32170  return __arm_vshlq_x_n_u32 (__a, __imm, __p);
32171 }
32172 
32173 __extension__ extern __inline int8x16_t
32174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x(int8x16_t __a,const int __imm,mve_pred16_t __p)32175 __arm_vrshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
32176 {
32177  return __arm_vrshrq_x_n_s8 (__a, __imm, __p);
32178 }
32179 
32180 __extension__ extern __inline int16x8_t
32181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x(int16x8_t __a,const int __imm,mve_pred16_t __p)32182 __arm_vrshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
32183 {
32184  return __arm_vrshrq_x_n_s16 (__a, __imm, __p);
32185 }
32186 
32187 __extension__ extern __inline int32x4_t
32188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x(int32x4_t __a,const int __imm,mve_pred16_t __p)32189 __arm_vrshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p)
32190 {
32191  return __arm_vrshrq_x_n_s32 (__a, __imm, __p);
32192 }
32193 
32194 __extension__ extern __inline uint8x16_t
32195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x(uint8x16_t __a,const int __imm,mve_pred16_t __p)32196 __arm_vrshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32197 {
32198  return __arm_vrshrq_x_n_u8 (__a, __imm, __p);
32199 }
32200 
32201 __extension__ extern __inline uint16x8_t
32202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x(uint16x8_t __a,const int __imm,mve_pred16_t __p)32203 __arm_vrshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32204 {
32205  return __arm_vrshrq_x_n_u16 (__a, __imm, __p);
32206 }
32207 
32208 __extension__ extern __inline uint32x4_t
32209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrshrq_x(uint32x4_t __a,const int __imm,mve_pred16_t __p)32210 __arm_vrshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p)
32211 {
32212  return __arm_vrshrq_x_n_u32 (__a, __imm, __p);
32213 }
32214 
32215 __extension__ extern __inline int8x16_t
32216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x(int8x16_t __a,const int __imm,mve_pred16_t __p)32217 __arm_vshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
32218 {
32219  return __arm_vshrq_x_n_s8 (__a, __imm, __p);
32220 }
32221 
32222 __extension__ extern __inline int16x8_t
32223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x(int16x8_t __a,const int __imm,mve_pred16_t __p)32224 __arm_vshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
32225 {
32226  return __arm_vshrq_x_n_s16 (__a, __imm, __p);
32227 }
32228 
32229 __extension__ extern __inline int32x4_t
32230 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x(int32x4_t __a,const int __imm,mve_pred16_t __p)32231 __arm_vshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p)
32232 {
32233  return __arm_vshrq_x_n_s32 (__a, __imm, __p);
32234 }
32235 
32236 __extension__ extern __inline uint8x16_t
32237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x(uint8x16_t __a,const int __imm,mve_pred16_t __p)32238 __arm_vshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32239 {
32240  return __arm_vshrq_x_n_u8 (__a, __imm, __p);
32241 }
32242 
32243 __extension__ extern __inline uint16x8_t
32244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x(uint16x8_t __a,const int __imm,mve_pred16_t __p)32245 __arm_vshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32246 {
32247  return __arm_vshrq_x_n_u16 (__a, __imm, __p);
32248 }
32249 
32250 __extension__ extern __inline uint32x4_t
32251 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshrq_x(uint32x4_t __a,const int __imm,mve_pred16_t __p)32252 __arm_vshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p)
32253 {
32254  return __arm_vshrq_x_n_u32 (__a, __imm, __p);
32255 }
32256 
32257 __extension__ extern __inline int32x4_t
32258 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadciq(int32x4_t __a,int32x4_t __b,unsigned * __carry_out)32259 __arm_vadciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
32260 {
32261  return __arm_vadciq_s32 (__a, __b, __carry_out);
32262 }
32263 
32264 __extension__ extern __inline uint32x4_t
32265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadciq(uint32x4_t __a,uint32x4_t __b,unsigned * __carry_out)32266 __arm_vadciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
32267 {
32268  return __arm_vadciq_u32 (__a, __b, __carry_out);
32269 }
32270 
32271 __extension__ extern __inline int32x4_t
32272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadciq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,unsigned * __carry_out,mve_pred16_t __p)32273 __arm_vadciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
32274 {
32275  return __arm_vadciq_m_s32 (__inactive, __a, __b, __carry_out, __p);
32276 }
32277 
32278 __extension__ extern __inline uint32x4_t
32279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadciq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,unsigned * __carry_out,mve_pred16_t __p)32280 __arm_vadciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
32281 {
32282  return __arm_vadciq_m_u32 (__inactive, __a, __b, __carry_out, __p);
32283 }
32284 
32285 __extension__ extern __inline int32x4_t
32286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq(int32x4_t __a,int32x4_t __b,unsigned * __carry)32287 __arm_vadcq (int32x4_t __a, int32x4_t __b, unsigned * __carry)
32288 {
32289  return __arm_vadcq_s32 (__a, __b, __carry);
32290 }
32291 
32292 __extension__ extern __inline uint32x4_t
32293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq(uint32x4_t __a,uint32x4_t __b,unsigned * __carry)32294 __arm_vadcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
32295 {
32296  return __arm_vadcq_u32 (__a, __b, __carry);
32297 }
32298 
32299 __extension__ extern __inline int32x4_t
32300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,unsigned * __carry,mve_pred16_t __p)32301 __arm_vadcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
32302 {
32303  return __arm_vadcq_m_s32 (__inactive, __a, __b, __carry, __p);
32304 }
32305 
32306 __extension__ extern __inline uint32x4_t
32307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vadcq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,unsigned * __carry,mve_pred16_t __p)32308 __arm_vadcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
32309 {
32310  return __arm_vadcq_m_u32 (__inactive, __a, __b, __carry, __p);
32311 }
32312 
32313 __extension__ extern __inline int32x4_t
32314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbciq(int32x4_t __a,int32x4_t __b,unsigned * __carry_out)32315 __arm_vsbciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
32316 {
32317  return __arm_vsbciq_s32 (__a, __b, __carry_out);
32318 }
32319 
32320 __extension__ extern __inline uint32x4_t
32321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbciq(uint32x4_t __a,uint32x4_t __b,unsigned * __carry_out)32322 __arm_vsbciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
32323 {
32324  return __arm_vsbciq_u32 (__a, __b, __carry_out);
32325 }
32326 
32327 __extension__ extern __inline int32x4_t
32328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbciq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,unsigned * __carry_out,mve_pred16_t __p)32329 __arm_vsbciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
32330 {
32331  return __arm_vsbciq_m_s32 (__inactive, __a, __b, __carry_out, __p);
32332 }
32333 
32334 __extension__ extern __inline uint32x4_t
32335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbciq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,unsigned * __carry_out,mve_pred16_t __p)32336 __arm_vsbciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
32337 {
32338  return __arm_vsbciq_m_u32 (__inactive, __a, __b, __carry_out, __p);
32339 }
32340 
32341 __extension__ extern __inline int32x4_t
32342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq(int32x4_t __a,int32x4_t __b,unsigned * __carry)32343 __arm_vsbcq (int32x4_t __a, int32x4_t __b, unsigned * __carry)
32344 {
32345  return __arm_vsbcq_s32 (__a, __b, __carry);
32346 }
32347 
32348 __extension__ extern __inline uint32x4_t
32349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq(uint32x4_t __a,uint32x4_t __b,unsigned * __carry)32350 __arm_vsbcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
32351 {
32352  return __arm_vsbcq_u32 (__a, __b, __carry);
32353 }
32354 
32355 __extension__ extern __inline int32x4_t
32356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_m(int32x4_t __inactive,int32x4_t __a,int32x4_t __b,unsigned * __carry,mve_pred16_t __p)32357 __arm_vsbcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
32358 {
32359  return __arm_vsbcq_m_s32 (__inactive, __a, __b, __carry, __p);
32360 }
32361 
32362 __extension__ extern __inline uint32x4_t
32363 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsbcq_m(uint32x4_t __inactive,uint32x4_t __a,uint32x4_t __b,unsigned * __carry,mve_pred16_t __p)32364 __arm_vsbcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
32365 {
32366  return __arm_vsbcq_m_u32 (__inactive, __a, __b, __carry, __p);
32367 }
32368 
32369 __extension__ extern __inline void
32370 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p(uint8_t * __addr,uint8x16_t __value,mve_pred16_t __p)32371 __arm_vst1q_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
32372 {
32373  __arm_vst1q_p_u8 (__addr, __value, __p);
32374 }
32375 
32376 __extension__ extern __inline void
32377 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p(int8_t * __addr,int8x16_t __value,mve_pred16_t __p)32378 __arm_vst1q_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
32379 {
32380  __arm_vst1q_p_s8 (__addr, __value, __p);
32381 }
32382 
32383 __extension__ extern __inline void
32384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q(int8_t * __addr,int8x16x2_t __value)32385 __arm_vst2q (int8_t * __addr, int8x16x2_t __value)
32386 {
32387  __arm_vst2q_s8 (__addr, __value);
32388 }
32389 
32390 __extension__ extern __inline void
32391 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q(uint8_t * __addr,uint8x16x2_t __value)32392 __arm_vst2q (uint8_t * __addr, uint8x16x2_t __value)
32393 {
32394  __arm_vst2q_u8 (__addr, __value);
32395 }
32396 
32397 __extension__ extern __inline uint8x16_t
32398 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z(uint8_t const * __base,mve_pred16_t __p)32399 __arm_vld1q_z (uint8_t const *__base, mve_pred16_t __p)
32400 {
32401  return __arm_vld1q_z_u8 (__base, __p);
32402 }
32403 
32404 __extension__ extern __inline int8x16_t
32405 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z(int8_t const * __base,mve_pred16_t __p)32406 __arm_vld1q_z (int8_t const *__base, mve_pred16_t __p)
32407 {
32408  return __arm_vld1q_z_s8 (__base, __p);
32409 }
32410 
32411 __extension__ extern __inline int8x16x2_t
32412 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q(int8_t const * __addr)32413 __arm_vld2q (int8_t const * __addr)
32414 {
32415  return __arm_vld2q_s8 (__addr);
32416 }
32417 
32418 __extension__ extern __inline uint8x16x2_t
32419 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q(uint8_t const * __addr)32420 __arm_vld2q (uint8_t const * __addr)
32421 {
32422  return __arm_vld2q_u8 (__addr);
32423 }
32424 
32425 __extension__ extern __inline int8x16x4_t
32426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q(int8_t const * __addr)32427 __arm_vld4q (int8_t const * __addr)
32428 {
32429  return __arm_vld4q_s8 (__addr);
32430 }
32431 
32432 __extension__ extern __inline uint8x16x4_t
32433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q(uint8_t const * __addr)32434 __arm_vld4q (uint8_t const * __addr)
32435 {
32436  return __arm_vld4q_u8 (__addr);
32437 }
32438 
32439 __extension__ extern __inline void
32440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p(uint16_t * __addr,uint16x8_t __value,mve_pred16_t __p)32441 __arm_vst1q_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
32442 {
32443  __arm_vst1q_p_u16 (__addr, __value, __p);
32444 }
32445 
32446 __extension__ extern __inline void
32447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p(int16_t * __addr,int16x8_t __value,mve_pred16_t __p)32448 __arm_vst1q_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
32449 {
32450  __arm_vst1q_p_s16 (__addr, __value, __p);
32451 }
32452 
32453 __extension__ extern __inline void
32454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q(int16_t * __addr,int16x8x2_t __value)32455 __arm_vst2q (int16_t * __addr, int16x8x2_t __value)
32456 {
32457  __arm_vst2q_s16 (__addr, __value);
32458 }
32459 
32460 __extension__ extern __inline void
32461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q(uint16_t * __addr,uint16x8x2_t __value)32462 __arm_vst2q (uint16_t * __addr, uint16x8x2_t __value)
32463 {
32464  __arm_vst2q_u16 (__addr, __value);
32465 }
32466 
32467 __extension__ extern __inline uint16x8_t
32468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z(uint16_t const * __base,mve_pred16_t __p)32469 __arm_vld1q_z (uint16_t const *__base, mve_pred16_t __p)
32470 {
32471  return __arm_vld1q_z_u16 (__base, __p);
32472 }
32473 
32474 __extension__ extern __inline int16x8_t
32475 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z(int16_t const * __base,mve_pred16_t __p)32476 __arm_vld1q_z (int16_t const *__base, mve_pred16_t __p)
32477 {
32478  return __arm_vld1q_z_s16 (__base, __p);
32479 }
32480 
32481 __extension__ extern __inline int16x8x2_t
32482 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q(int16_t const * __addr)32483 __arm_vld2q (int16_t const * __addr)
32484 {
32485  return __arm_vld2q_s16 (__addr);
32486 }
32487 
32488 __extension__ extern __inline uint16x8x2_t
32489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q(uint16_t const * __addr)32490 __arm_vld2q (uint16_t const * __addr)
32491 {
32492  return __arm_vld2q_u16 (__addr);
32493 }
32494 
32495 __extension__ extern __inline int16x8x4_t
32496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q(int16_t const * __addr)32497 __arm_vld4q (int16_t const * __addr)
32498 {
32499  return __arm_vld4q_s16 (__addr);
32500 }
32501 
32502 __extension__ extern __inline uint16x8x4_t
32503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q(uint16_t const * __addr)32504 __arm_vld4q (uint16_t const * __addr)
32505 {
32506  return __arm_vld4q_u16 (__addr);
32507 }
32508 
32509 __extension__ extern __inline void
32510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p(uint32_t * __addr,uint32x4_t __value,mve_pred16_t __p)32511 __arm_vst1q_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
32512 {
32513  __arm_vst1q_p_u32 (__addr, __value, __p);
32514 }
32515 
32516 __extension__ extern __inline void
32517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p(int32_t * __addr,int32x4_t __value,mve_pred16_t __p)32518 __arm_vst1q_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
32519 {
32520  __arm_vst1q_p_s32 (__addr, __value, __p);
32521 }
32522 
32523 __extension__ extern __inline void
32524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q(int32_t * __addr,int32x4x2_t __value)32525 __arm_vst2q (int32_t * __addr, int32x4x2_t __value)
32526 {
32527  __arm_vst2q_s32 (__addr, __value);
32528 }
32529 
32530 __extension__ extern __inline void
32531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q(uint32_t * __addr,uint32x4x2_t __value)32532 __arm_vst2q (uint32_t * __addr, uint32x4x2_t __value)
32533 {
32534  __arm_vst2q_u32 (__addr, __value);
32535 }
32536 
32537 __extension__ extern __inline uint32x4_t
32538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z(uint32_t const * __base,mve_pred16_t __p)32539 __arm_vld1q_z (uint32_t const *__base, mve_pred16_t __p)
32540 {
32541  return __arm_vld1q_z_u32 (__base, __p);
32542 }
32543 
32544 __extension__ extern __inline int32x4_t
32545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z(int32_t const * __base,mve_pred16_t __p)32546 __arm_vld1q_z (int32_t const *__base, mve_pred16_t __p)
32547 {
32548  return __arm_vld1q_z_s32 (__base, __p);
32549 }
32550 
32551 __extension__ extern __inline int32x4x2_t
32552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q(int32_t const * __addr)32553 __arm_vld2q (int32_t const * __addr)
32554 {
32555  return __arm_vld2q_s32 (__addr);
32556 }
32557 
32558 __extension__ extern __inline uint32x4x2_t
32559 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q(uint32_t const * __addr)32560 __arm_vld2q (uint32_t const * __addr)
32561 {
32562  return __arm_vld2q_u32 (__addr);
32563 }
32564 
32565 __extension__ extern __inline int32x4x4_t
32566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q(int32_t const * __addr)32567 __arm_vld4q (int32_t const * __addr)
32568 {
32569  return __arm_vld4q_s32 (__addr);
32570 }
32571 
32572 __extension__ extern __inline uint32x4x4_t
32573 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q(uint32_t const * __addr)32574 __arm_vld4q (uint32_t const * __addr)
32575 {
32576  return __arm_vld4q_u32 (__addr);
32577 }
32578 
32579 __extension__ extern __inline int16x8_t
32580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(int16_t __a,int16x8_t __b,const int __idx)32581 __arm_vsetq_lane (int16_t __a, int16x8_t __b, const int __idx)
32582 {
32583  return __arm_vsetq_lane_s16 (__a, __b, __idx);
32584 }
32585 
32586 __extension__ extern __inline int32x4_t
32587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(int32_t __a,int32x4_t __b,const int __idx)32588 __arm_vsetq_lane (int32_t __a, int32x4_t __b, const int __idx)
32589 {
32590  return __arm_vsetq_lane_s32 (__a, __b, __idx);
32591 }
32592 
32593 __extension__ extern __inline int8x16_t
32594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(int8_t __a,int8x16_t __b,const int __idx)32595 __arm_vsetq_lane (int8_t __a, int8x16_t __b, const int __idx)
32596 {
32597  return __arm_vsetq_lane_s8 (__a, __b, __idx);
32598 }
32599 
32600 __extension__ extern __inline int64x2_t
32601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(int64_t __a,int64x2_t __b,const int __idx)32602 __arm_vsetq_lane (int64_t __a, int64x2_t __b, const int __idx)
32603 {
32604  return __arm_vsetq_lane_s64 (__a, __b, __idx);
32605 }
32606 
32607 __extension__ extern __inline uint8x16_t
32608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(uint8_t __a,uint8x16_t __b,const int __idx)32609 __arm_vsetq_lane (uint8_t __a, uint8x16_t __b, const int __idx)
32610 {
32611  return __arm_vsetq_lane_u8 (__a, __b, __idx);
32612 }
32613 
32614 __extension__ extern __inline uint16x8_t
32615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(uint16_t __a,uint16x8_t __b,const int __idx)32616 __arm_vsetq_lane (uint16_t __a, uint16x8_t __b, const int __idx)
32617 {
32618  return __arm_vsetq_lane_u16 (__a, __b, __idx);
32619 }
32620 
32621 __extension__ extern __inline uint32x4_t
32622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(uint32_t __a,uint32x4_t __b,const int __idx)32623 __arm_vsetq_lane (uint32_t __a, uint32x4_t __b, const int __idx)
32624 {
32625  return __arm_vsetq_lane_u32 (__a, __b, __idx);
32626 }
32627 
32628 __extension__ extern __inline uint64x2_t
32629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(uint64_t __a,uint64x2_t __b,const int __idx)32630 __arm_vsetq_lane (uint64_t __a, uint64x2_t __b, const int __idx)
32631 {
32632  return __arm_vsetq_lane_u64 (__a, __b, __idx);
32633 }
32634 
32635 __extension__ extern __inline int16_t
32636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(int16x8_t __a,const int __idx)32637 __arm_vgetq_lane (int16x8_t __a, const int __idx)
32638 {
32639  return __arm_vgetq_lane_s16 (__a, __idx);
32640 }
32641 
32642 __extension__ extern __inline int32_t
32643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(int32x4_t __a,const int __idx)32644 __arm_vgetq_lane (int32x4_t __a, const int __idx)
32645 {
32646  return __arm_vgetq_lane_s32 (__a, __idx);
32647 }
32648 
32649 __extension__ extern __inline int8_t
32650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(int8x16_t __a,const int __idx)32651 __arm_vgetq_lane (int8x16_t __a, const int __idx)
32652 {
32653  return __arm_vgetq_lane_s8 (__a, __idx);
32654 }
32655 
32656 __extension__ extern __inline int64_t
32657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(int64x2_t __a,const int __idx)32658 __arm_vgetq_lane (int64x2_t __a, const int __idx)
32659 {
32660  return __arm_vgetq_lane_s64 (__a, __idx);
32661 }
32662 
32663 __extension__ extern __inline uint8_t
32664 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(uint8x16_t __a,const int __idx)32665 __arm_vgetq_lane (uint8x16_t __a, const int __idx)
32666 {
32667  return __arm_vgetq_lane_u8 (__a, __idx);
32668 }
32669 
32670 __extension__ extern __inline uint16_t
32671 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(uint16x8_t __a,const int __idx)32672 __arm_vgetq_lane (uint16x8_t __a, const int __idx)
32673 {
32674  return __arm_vgetq_lane_u16 (__a, __idx);
32675 }
32676 
32677 __extension__ extern __inline uint32_t
32678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(uint32x4_t __a,const int __idx)32679 __arm_vgetq_lane (uint32x4_t __a, const int __idx)
32680 {
32681  return __arm_vgetq_lane_u32 (__a, __idx);
32682 }
32683 
32684 __extension__ extern __inline uint64_t
32685 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(uint64x2_t __a,const int __idx)32686 __arm_vgetq_lane (uint64x2_t __a, const int __idx)
32687 {
32688  return __arm_vgetq_lane_u64 (__a, __idx);
32689 }
32690 
32691 __extension__ extern __inline int8x16_t
32692 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m(int8x16_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)32693 __arm_vshlcq_m (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32694 {
32695  return __arm_vshlcq_m_s8 (__a, __b, __imm, __p);
32696 }
32697 
32698 __extension__ extern __inline uint8x16_t
32699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m(uint8x16_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)32700 __arm_vshlcq_m (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32701 {
32702  return __arm_vshlcq_m_u8 (__a, __b, __imm, __p);
32703 }
32704 
32705 __extension__ extern __inline int16x8_t
32706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m(int16x8_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)32707 __arm_vshlcq_m (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32708 {
32709  return __arm_vshlcq_m_s16 (__a, __b, __imm, __p);
32710 }
32711 
32712 __extension__ extern __inline uint16x8_t
32713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m(uint16x8_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)32714 __arm_vshlcq_m (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32715 {
32716  return __arm_vshlcq_m_u16 (__a, __b, __imm, __p);
32717 }
32718 
32719 __extension__ extern __inline int32x4_t
32720 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m(int32x4_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)32721 __arm_vshlcq_m (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32722 {
32723  return __arm_vshlcq_m_s32 (__a, __b, __imm, __p);
32724 }
32725 
32726 __extension__ extern __inline uint32x4_t
32727 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vshlcq_m(uint32x4_t __a,uint32_t * __b,const int __imm,mve_pred16_t __p)32728 __arm_vshlcq_m (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32729 {
32730  return __arm_vshlcq_m_u32 (__a, __b, __imm, __p);
32731 }
32732 
32733 #if (__ARM_FEATURE_MVE & 2)  /* MVE Floating point.  */
32734 
32735 __extension__ extern __inline void
32736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q(float16_t * __addr,float16x8x4_t __value)32737 __arm_vst4q (float16_t * __addr, float16x8x4_t __value)
32738 {
32739  __arm_vst4q_f16 (__addr, __value);
32740 }
32741 
32742 __extension__ extern __inline void
32743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst4q(float32_t * __addr,float32x4x4_t __value)32744 __arm_vst4q (float32_t * __addr, float32x4x4_t __value)
32745 {
32746  __arm_vst4q_f32 (__addr, __value);
32747 }
32748 
32749 __extension__ extern __inline float16x8_t
32750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq(float16x8_t __a)32751 __arm_vrndxq (float16x8_t __a)
32752 {
32753  return __arm_vrndxq_f16 (__a);
32754 }
32755 
32756 __extension__ extern __inline float32x4_t
32757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq(float32x4_t __a)32758 __arm_vrndxq (float32x4_t __a)
32759 {
32760  return __arm_vrndxq_f32 (__a);
32761 }
32762 
32763 __extension__ extern __inline float16x8_t
32764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq(float16x8_t __a)32765 __arm_vrndq (float16x8_t __a)
32766 {
32767  return __arm_vrndq_f16 (__a);
32768 }
32769 
32770 __extension__ extern __inline float32x4_t
32771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq(float32x4_t __a)32772 __arm_vrndq (float32x4_t __a)
32773 {
32774  return __arm_vrndq_f32 (__a);
32775 }
32776 
32777 __extension__ extern __inline float16x8_t
32778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq(float16x8_t __a)32779 __arm_vrndpq (float16x8_t __a)
32780 {
32781  return __arm_vrndpq_f16 (__a);
32782 }
32783 
32784 __extension__ extern __inline float32x4_t
32785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq(float32x4_t __a)32786 __arm_vrndpq (float32x4_t __a)
32787 {
32788  return __arm_vrndpq_f32 (__a);
32789 }
32790 
32791 __extension__ extern __inline float16x8_t
32792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq(float16x8_t __a)32793 __arm_vrndnq (float16x8_t __a)
32794 {
32795  return __arm_vrndnq_f16 (__a);
32796 }
32797 
32798 __extension__ extern __inline float32x4_t
32799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq(float32x4_t __a)32800 __arm_vrndnq (float32x4_t __a)
32801 {
32802  return __arm_vrndnq_f32 (__a);
32803 }
32804 
32805 __extension__ extern __inline float16x8_t
32806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq(float16x8_t __a)32807 __arm_vrndmq (float16x8_t __a)
32808 {
32809  return __arm_vrndmq_f16 (__a);
32810 }
32811 
32812 __extension__ extern __inline float32x4_t
32813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq(float32x4_t __a)32814 __arm_vrndmq (float32x4_t __a)
32815 {
32816  return __arm_vrndmq_f32 (__a);
32817 }
32818 
32819 __extension__ extern __inline float16x8_t
32820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq(float16x8_t __a)32821 __arm_vrndaq (float16x8_t __a)
32822 {
32823  return __arm_vrndaq_f16 (__a);
32824 }
32825 
32826 __extension__ extern __inline float32x4_t
32827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq(float32x4_t __a)32828 __arm_vrndaq (float32x4_t __a)
32829 {
32830  return __arm_vrndaq_f32 (__a);
32831 }
32832 
32833 __extension__ extern __inline float16x8_t
32834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q(float16x8_t __a)32835 __arm_vrev64q (float16x8_t __a)
32836 {
32837  return __arm_vrev64q_f16 (__a);
32838 }
32839 
32840 __extension__ extern __inline float32x4_t
32841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q(float32x4_t __a)32842 __arm_vrev64q (float32x4_t __a)
32843 {
32844  return __arm_vrev64q_f32 (__a);
32845 }
32846 
32847 __extension__ extern __inline float16x8_t
32848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq(float16x8_t __a)32849 __arm_vnegq (float16x8_t __a)
32850 {
32851  return __arm_vnegq_f16 (__a);
32852 }
32853 
32854 __extension__ extern __inline float32x4_t
32855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq(float32x4_t __a)32856 __arm_vnegq (float32x4_t __a)
32857 {
32858  return __arm_vnegq_f32 (__a);
32859 }
32860 
32861 __extension__ extern __inline float16x8_t
32862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n(float16_t __a)32863 __arm_vdupq_n (float16_t __a)
32864 {
32865  return __arm_vdupq_n_f16 (__a);
32866 }
32867 
32868 __extension__ extern __inline float32x4_t
32869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_n(float32_t __a)32870 __arm_vdupq_n (float32_t __a)
32871 {
32872  return __arm_vdupq_n_f32 (__a);
32873 }
32874 
32875 __extension__ extern __inline float16x8_t
32876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq(float16x8_t __a)32877 __arm_vabsq (float16x8_t __a)
32878 {
32879  return __arm_vabsq_f16 (__a);
32880 }
32881 
32882 __extension__ extern __inline float32x4_t
32883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq(float32x4_t __a)32884 __arm_vabsq (float32x4_t __a)
32885 {
32886  return __arm_vabsq_f32 (__a);
32887 }
32888 
32889 __extension__ extern __inline float16x8_t
32890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q(float16x8_t __a)32891 __arm_vrev32q (float16x8_t __a)
32892 {
32893  return __arm_vrev32q_f16 (__a);
32894 }
32895 
32896 __extension__ extern __inline float32x4_t
32897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvttq_f32(float16x8_t __a)32898 __arm_vcvttq_f32 (float16x8_t __a)
32899 {
32900  return __arm_vcvttq_f32_f16 (__a);
32901 }
32902 
32903 __extension__ extern __inline float32x4_t
32904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtbq_f32(float16x8_t __a)32905 __arm_vcvtbq_f32 (float16x8_t __a)
32906 {
32907  return __arm_vcvtbq_f32_f16 (__a);
32908 }
32909 
32910 __extension__ extern __inline float16x8_t
32911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq(int16x8_t __a)32912 __arm_vcvtq (int16x8_t __a)
32913 {
32914  return __arm_vcvtq_f16_s16 (__a);
32915 }
32916 
32917 __extension__ extern __inline float32x4_t
32918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq(int32x4_t __a)32919 __arm_vcvtq (int32x4_t __a)
32920 {
32921  return __arm_vcvtq_f32_s32 (__a);
32922 }
32923 
32924 __extension__ extern __inline float16x8_t
32925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq(uint16x8_t __a)32926 __arm_vcvtq (uint16x8_t __a)
32927 {
32928  return __arm_vcvtq_f16_u16 (__a);
32929 }
32930 
32931 __extension__ extern __inline float32x4_t
32932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq(uint32x4_t __a)32933 __arm_vcvtq (uint32x4_t __a)
32934 {
32935  return __arm_vcvtq_f32_u32 (__a);
32936 }
32937 
32938 __extension__ extern __inline float16x8_t
32939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(float16x8_t __a,float16_t __b)32940 __arm_vsubq (float16x8_t __a, float16_t __b)
32941 {
32942  return __arm_vsubq_n_f16 (__a, __b);
32943 }
32944 
32945 __extension__ extern __inline float32x4_t
32946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(float32x4_t __a,float32_t __b)32947 __arm_vsubq (float32x4_t __a, float32_t __b)
32948 {
32949  return __arm_vsubq_n_f32 (__a, __b);
32950 }
32951 
32952 __extension__ extern __inline float16x8_t
32953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq(float16x8_t __a,int32_t __b)32954 __arm_vbrsrq (float16x8_t __a, int32_t __b)
32955 {
32956  return __arm_vbrsrq_n_f16 (__a, __b);
32957 }
32958 
32959 __extension__ extern __inline float32x4_t
32960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq(float32x4_t __a,int32_t __b)32961 __arm_vbrsrq (float32x4_t __a, int32_t __b)
32962 {
32963  return __arm_vbrsrq_n_f32 (__a, __b);
32964 }
32965 
32966 __extension__ extern __inline float16x8_t
32967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n(int16x8_t __a,const int __imm6)32968 __arm_vcvtq_n (int16x8_t __a, const int __imm6)
32969 {
32970  return __arm_vcvtq_n_f16_s16 (__a, __imm6);
32971 }
32972 
32973 __extension__ extern __inline float32x4_t
32974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n(int32x4_t __a,const int __imm6)32975 __arm_vcvtq_n (int32x4_t __a, const int __imm6)
32976 {
32977  return __arm_vcvtq_n_f32_s32 (__a, __imm6);
32978 }
32979 
32980 __extension__ extern __inline float16x8_t
32981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n(uint16x8_t __a,const int __imm6)32982 __arm_vcvtq_n (uint16x8_t __a, const int __imm6)
32983 {
32984  return __arm_vcvtq_n_f16_u16 (__a, __imm6);
32985 }
32986 
32987 __extension__ extern __inline float32x4_t
32988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_n(uint32x4_t __a,const int __imm6)32989 __arm_vcvtq_n (uint32x4_t __a, const int __imm6)
32990 {
32991  return __arm_vcvtq_n_f32_u32 (__a, __imm6);
32992 }
32993 
32994 __extension__ extern __inline mve_pred16_t
32995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(float16x8_t __a,float16_t __b)32996 __arm_vcmpneq (float16x8_t __a, float16_t __b)
32997 {
32998  return __arm_vcmpneq_n_f16 (__a, __b);
32999 }
33000 
33001 __extension__ extern __inline mve_pred16_t
33002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(float16x8_t __a,float16x8_t __b)33003 __arm_vcmpneq (float16x8_t __a, float16x8_t __b)
33004 {
33005  return __arm_vcmpneq_f16 (__a, __b);
33006 }
33007 
33008 __extension__ extern __inline mve_pred16_t
33009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(float16x8_t __a,float16_t __b)33010 __arm_vcmpltq (float16x8_t __a, float16_t __b)
33011 {
33012  return __arm_vcmpltq_n_f16 (__a, __b);
33013 }
33014 
33015 __extension__ extern __inline mve_pred16_t
33016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(float16x8_t __a,float16x8_t __b)33017 __arm_vcmpltq (float16x8_t __a, float16x8_t __b)
33018 {
33019  return __arm_vcmpltq_f16 (__a, __b);
33020 }
33021 
33022 __extension__ extern __inline mve_pred16_t
33023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(float16x8_t __a,float16_t __b)33024 __arm_vcmpleq (float16x8_t __a, float16_t __b)
33025 {
33026  return __arm_vcmpleq_n_f16 (__a, __b);
33027 }
33028 
33029 __extension__ extern __inline mve_pred16_t
33030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(float16x8_t __a,float16x8_t __b)33031 __arm_vcmpleq (float16x8_t __a, float16x8_t __b)
33032 {
33033  return __arm_vcmpleq_f16 (__a, __b);
33034 }
33035 
33036 __extension__ extern __inline mve_pred16_t
33037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(float16x8_t __a,float16_t __b)33038 __arm_vcmpgtq (float16x8_t __a, float16_t __b)
33039 {
33040  return __arm_vcmpgtq_n_f16 (__a, __b);
33041 }
33042 
33043 __extension__ extern __inline mve_pred16_t
33044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(float16x8_t __a,float16x8_t __b)33045 __arm_vcmpgtq (float16x8_t __a, float16x8_t __b)
33046 {
33047  return __arm_vcmpgtq_f16 (__a, __b);
33048 }
33049 
33050 __extension__ extern __inline mve_pred16_t
33051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(float16x8_t __a,float16_t __b)33052 __arm_vcmpgeq (float16x8_t __a, float16_t __b)
33053 {
33054  return __arm_vcmpgeq_n_f16 (__a, __b);
33055 }
33056 
33057 __extension__ extern __inline mve_pred16_t
33058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(float16x8_t __a,float16x8_t __b)33059 __arm_vcmpgeq (float16x8_t __a, float16x8_t __b)
33060 {
33061  return __arm_vcmpgeq_f16 (__a, __b);
33062 }
33063 
33064 __extension__ extern __inline mve_pred16_t
33065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(float16x8_t __a,float16_t __b)33066 __arm_vcmpeqq (float16x8_t __a, float16_t __b)
33067 {
33068  return __arm_vcmpeqq_n_f16 (__a, __b);
33069 }
33070 
33071 __extension__ extern __inline mve_pred16_t
33072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(float16x8_t __a,float16x8_t __b)33073 __arm_vcmpeqq (float16x8_t __a, float16x8_t __b)
33074 {
33075  return __arm_vcmpeqq_f16 (__a, __b);
33076 }
33077 
33078 __extension__ extern __inline float16x8_t
33079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(float16x8_t __a,float16x8_t __b)33080 __arm_vsubq (float16x8_t __a, float16x8_t __b)
33081 {
33082  return __arm_vsubq_f16 (__a, __b);
33083 }
33084 
33085 __extension__ extern __inline float16x8_t
33086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(float16x8_t __a,float16x8_t __b)33087 __arm_vorrq (float16x8_t __a, float16x8_t __b)
33088 {
33089  return __arm_vorrq_f16 (__a, __b);
33090 }
33091 
33092 __extension__ extern __inline float16x8_t
33093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq(float16x8_t __a,float16x8_t __b)33094 __arm_vornq (float16x8_t __a, float16x8_t __b)
33095 {
33096  return __arm_vornq_f16 (__a, __b);
33097 }
33098 
33099 __extension__ extern __inline float16x8_t
33100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(float16x8_t __a,float16_t __b)33101 __arm_vmulq (float16x8_t __a, float16_t __b)
33102 {
33103  return __arm_vmulq_n_f16 (__a, __b);
33104 }
33105 
33106 __extension__ extern __inline float16x8_t
33107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(float16x8_t __a,float16x8_t __b)33108 __arm_vmulq (float16x8_t __a, float16x8_t __b)
33109 {
33110  return __arm_vmulq_f16 (__a, __b);
33111 }
33112 
33113 __extension__ extern __inline float16_t
33114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmvq(float16_t __a,float16x8_t __b)33115 __arm_vminnmvq (float16_t __a, float16x8_t __b)
33116 {
33117  return __arm_vminnmvq_f16 (__a, __b);
33118 }
33119 
33120 __extension__ extern __inline float16x8_t
33121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq(float16x8_t __a,float16x8_t __b)33122 __arm_vminnmq (float16x8_t __a, float16x8_t __b)
33123 {
33124  return __arm_vminnmq_f16 (__a, __b);
33125 }
33126 
33127 __extension__ extern __inline float16_t
33128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmavq(float16_t __a,float16x8_t __b)33129 __arm_vminnmavq (float16_t __a, float16x8_t __b)
33130 {
33131  return __arm_vminnmavq_f16 (__a, __b);
33132 }
33133 
33134 __extension__ extern __inline float16x8_t
33135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmaq(float16x8_t __a,float16x8_t __b)33136 __arm_vminnmaq (float16x8_t __a, float16x8_t __b)
33137 {
33138  return __arm_vminnmaq_f16 (__a, __b);
33139 }
33140 
33141 __extension__ extern __inline float16_t
33142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmvq(float16_t __a,float16x8_t __b)33143 __arm_vmaxnmvq (float16_t __a, float16x8_t __b)
33144 {
33145  return __arm_vmaxnmvq_f16 (__a, __b);
33146 }
33147 
33148 __extension__ extern __inline float16x8_t
33149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq(float16x8_t __a,float16x8_t __b)33150 __arm_vmaxnmq (float16x8_t __a, float16x8_t __b)
33151 {
33152  return __arm_vmaxnmq_f16 (__a, __b);
33153 }
33154 
33155 __extension__ extern __inline float16_t
33156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmavq(float16_t __a,float16x8_t __b)33157 __arm_vmaxnmavq (float16_t __a, float16x8_t __b)
33158 {
33159  return __arm_vmaxnmavq_f16 (__a, __b);
33160 }
33161 
33162 __extension__ extern __inline float16x8_t
33163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmaq(float16x8_t __a,float16x8_t __b)33164 __arm_vmaxnmaq (float16x8_t __a, float16x8_t __b)
33165 {
33166  return __arm_vmaxnmaq_f16 (__a, __b);
33167 }
33168 
33169 __extension__ extern __inline float16x8_t
33170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq(float16x8_t __a,float16x8_t __b)33171 __arm_veorq (float16x8_t __a, float16x8_t __b)
33172 {
33173  return __arm_veorq_f16 (__a, __b);
33174 }
33175 
33176 __extension__ extern __inline float16x8_t
33177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90(float16x8_t __a,float16x8_t __b)33178 __arm_vcmulq_rot90 (float16x8_t __a, float16x8_t __b)
33179 {
33180  return __arm_vcmulq_rot90_f16 (__a, __b);
33181 }
33182 
33183 __extension__ extern __inline float16x8_t
33184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270(float16x8_t __a,float16x8_t __b)33185 __arm_vcmulq_rot270 (float16x8_t __a, float16x8_t __b)
33186 {
33187  return __arm_vcmulq_rot270_f16 (__a, __b);
33188 }
33189 
33190 __extension__ extern __inline float16x8_t
33191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180(float16x8_t __a,float16x8_t __b)33192 __arm_vcmulq_rot180 (float16x8_t __a, float16x8_t __b)
33193 {
33194  return __arm_vcmulq_rot180_f16 (__a, __b);
33195 }
33196 
33197 __extension__ extern __inline float16x8_t
33198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq(float16x8_t __a,float16x8_t __b)33199 __arm_vcmulq (float16x8_t __a, float16x8_t __b)
33200 {
33201  return __arm_vcmulq_f16 (__a, __b);
33202 }
33203 
33204 __extension__ extern __inline float16x8_t
33205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90(float16x8_t __a,float16x8_t __b)33206 __arm_vcaddq_rot90 (float16x8_t __a, float16x8_t __b)
33207 {
33208  return __arm_vcaddq_rot90_f16 (__a, __b);
33209 }
33210 
33211 __extension__ extern __inline float16x8_t
33212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270(float16x8_t __a,float16x8_t __b)33213 __arm_vcaddq_rot270 (float16x8_t __a, float16x8_t __b)
33214 {
33215  return __arm_vcaddq_rot270_f16 (__a, __b);
33216 }
33217 
33218 __extension__ extern __inline float16x8_t
33219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(float16x8_t __a,float16x8_t __b)33220 __arm_vbicq (float16x8_t __a, float16x8_t __b)
33221 {
33222  return __arm_vbicq_f16 (__a, __b);
33223 }
33224 
33225 __extension__ extern __inline float16x8_t
33226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq(float16x8_t __a,float16x8_t __b)33227 __arm_vandq (float16x8_t __a, float16x8_t __b)
33228 {
33229  return __arm_vandq_f16 (__a, __b);
33230 }
33231 
33232 __extension__ extern __inline float16x8_t
33233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(float16x8_t __a,float16_t __b)33234 __arm_vaddq (float16x8_t __a, float16_t __b)
33235 {
33236  return __arm_vaddq_n_f16 (__a, __b);
33237 }
33238 
33239 __extension__ extern __inline float16x8_t
33240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq(float16x8_t __a,float16x8_t __b)33241 __arm_vabdq (float16x8_t __a, float16x8_t __b)
33242 {
33243  return __arm_vabdq_f16 (__a, __b);
33244 }
33245 
33246 __extension__ extern __inline mve_pred16_t
33247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(float32x4_t __a,float32_t __b)33248 __arm_vcmpneq (float32x4_t __a, float32_t __b)
33249 {
33250  return __arm_vcmpneq_n_f32 (__a, __b);
33251 }
33252 
33253 __extension__ extern __inline mve_pred16_t
33254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq(float32x4_t __a,float32x4_t __b)33255 __arm_vcmpneq (float32x4_t __a, float32x4_t __b)
33256 {
33257  return __arm_vcmpneq_f32 (__a, __b);
33258 }
33259 
33260 __extension__ extern __inline mve_pred16_t
33261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(float32x4_t __a,float32_t __b)33262 __arm_vcmpltq (float32x4_t __a, float32_t __b)
33263 {
33264  return __arm_vcmpltq_n_f32 (__a, __b);
33265 }
33266 
33267 __extension__ extern __inline mve_pred16_t
33268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq(float32x4_t __a,float32x4_t __b)33269 __arm_vcmpltq (float32x4_t __a, float32x4_t __b)
33270 {
33271  return __arm_vcmpltq_f32 (__a, __b);
33272 }
33273 
33274 __extension__ extern __inline mve_pred16_t
33275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(float32x4_t __a,float32_t __b)33276 __arm_vcmpleq (float32x4_t __a, float32_t __b)
33277 {
33278  return __arm_vcmpleq_n_f32 (__a, __b);
33279 }
33280 
33281 __extension__ extern __inline mve_pred16_t
33282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq(float32x4_t __a,float32x4_t __b)33283 __arm_vcmpleq (float32x4_t __a, float32x4_t __b)
33284 {
33285  return __arm_vcmpleq_f32 (__a, __b);
33286 }
33287 
33288 __extension__ extern __inline mve_pred16_t
33289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(float32x4_t __a,float32_t __b)33290 __arm_vcmpgtq (float32x4_t __a, float32_t __b)
33291 {
33292  return __arm_vcmpgtq_n_f32 (__a, __b);
33293 }
33294 
33295 __extension__ extern __inline mve_pred16_t
33296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq(float32x4_t __a,float32x4_t __b)33297 __arm_vcmpgtq (float32x4_t __a, float32x4_t __b)
33298 {
33299  return __arm_vcmpgtq_f32 (__a, __b);
33300 }
33301 
33302 __extension__ extern __inline mve_pred16_t
33303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(float32x4_t __a,float32_t __b)33304 __arm_vcmpgeq (float32x4_t __a, float32_t __b)
33305 {
33306  return __arm_vcmpgeq_n_f32 (__a, __b);
33307 }
33308 
33309 __extension__ extern __inline mve_pred16_t
33310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq(float32x4_t __a,float32x4_t __b)33311 __arm_vcmpgeq (float32x4_t __a, float32x4_t __b)
33312 {
33313  return __arm_vcmpgeq_f32 (__a, __b);
33314 }
33315 
33316 __extension__ extern __inline mve_pred16_t
33317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(float32x4_t __a,float32_t __b)33318 __arm_vcmpeqq (float32x4_t __a, float32_t __b)
33319 {
33320  return __arm_vcmpeqq_n_f32 (__a, __b);
33321 }
33322 
33323 __extension__ extern __inline mve_pred16_t
33324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq(float32x4_t __a,float32x4_t __b)33325 __arm_vcmpeqq (float32x4_t __a, float32x4_t __b)
33326 {
33327  return __arm_vcmpeqq_f32 (__a, __b);
33328 }
33329 
33330 __extension__ extern __inline float32x4_t
33331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq(float32x4_t __a,float32x4_t __b)33332 __arm_vsubq (float32x4_t __a, float32x4_t __b)
33333 {
33334  return __arm_vsubq_f32 (__a, __b);
33335 }
33336 
33337 __extension__ extern __inline float32x4_t
33338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq(float32x4_t __a,float32x4_t __b)33339 __arm_vorrq (float32x4_t __a, float32x4_t __b)
33340 {
33341  return __arm_vorrq_f32 (__a, __b);
33342 }
33343 
33344 __extension__ extern __inline float32x4_t
33345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq(float32x4_t __a,float32x4_t __b)33346 __arm_vornq (float32x4_t __a, float32x4_t __b)
33347 {
33348  return __arm_vornq_f32 (__a, __b);
33349 }
33350 
33351 __extension__ extern __inline float32x4_t
33352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(float32x4_t __a,float32_t __b)33353 __arm_vmulq (float32x4_t __a, float32_t __b)
33354 {
33355  return __arm_vmulq_n_f32 (__a, __b);
33356 }
33357 
33358 __extension__ extern __inline float32x4_t
33359 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq(float32x4_t __a,float32x4_t __b)33360 __arm_vmulq (float32x4_t __a, float32x4_t __b)
33361 {
33362  return __arm_vmulq_f32 (__a, __b);
33363 }
33364 
33365 __extension__ extern __inline float32_t
33366 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmvq(float32_t __a,float32x4_t __b)33367 __arm_vminnmvq (float32_t __a, float32x4_t __b)
33368 {
33369  return __arm_vminnmvq_f32 (__a, __b);
33370 }
33371 
33372 __extension__ extern __inline float32x4_t
33373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq(float32x4_t __a,float32x4_t __b)33374 __arm_vminnmq (float32x4_t __a, float32x4_t __b)
33375 {
33376  return __arm_vminnmq_f32 (__a, __b);
33377 }
33378 
33379 __extension__ extern __inline float32_t
33380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmavq(float32_t __a,float32x4_t __b)33381 __arm_vminnmavq (float32_t __a, float32x4_t __b)
33382 {
33383  return __arm_vminnmavq_f32 (__a, __b);
33384 }
33385 
33386 __extension__ extern __inline float32x4_t
33387 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmaq(float32x4_t __a,float32x4_t __b)33388 __arm_vminnmaq (float32x4_t __a, float32x4_t __b)
33389 {
33390  return __arm_vminnmaq_f32 (__a, __b);
33391 }
33392 
33393 __extension__ extern __inline float32_t
33394 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmvq(float32_t __a,float32x4_t __b)33395 __arm_vmaxnmvq (float32_t __a, float32x4_t __b)
33396 {
33397  return __arm_vmaxnmvq_f32 (__a, __b);
33398 }
33399 
33400 __extension__ extern __inline float32x4_t
33401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq(float32x4_t __a,float32x4_t __b)33402 __arm_vmaxnmq (float32x4_t __a, float32x4_t __b)
33403 {
33404  return __arm_vmaxnmq_f32 (__a, __b);
33405 }
33406 
33407 __extension__ extern __inline float32_t
33408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmavq(float32_t __a,float32x4_t __b)33409 __arm_vmaxnmavq (float32_t __a, float32x4_t __b)
33410 {
33411  return __arm_vmaxnmavq_f32 (__a, __b);
33412 }
33413 
33414 __extension__ extern __inline float32x4_t
33415 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmaq(float32x4_t __a,float32x4_t __b)33416 __arm_vmaxnmaq (float32x4_t __a, float32x4_t __b)
33417 {
33418  return __arm_vmaxnmaq_f32 (__a, __b);
33419 }
33420 
33421 __extension__ extern __inline float32x4_t
33422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq(float32x4_t __a,float32x4_t __b)33423 __arm_veorq (float32x4_t __a, float32x4_t __b)
33424 {
33425  return __arm_veorq_f32 (__a, __b);
33426 }
33427 
33428 __extension__ extern __inline float32x4_t
33429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90(float32x4_t __a,float32x4_t __b)33430 __arm_vcmulq_rot90 (float32x4_t __a, float32x4_t __b)
33431 {
33432  return __arm_vcmulq_rot90_f32 (__a, __b);
33433 }
33434 
33435 __extension__ extern __inline float32x4_t
33436 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270(float32x4_t __a,float32x4_t __b)33437 __arm_vcmulq_rot270 (float32x4_t __a, float32x4_t __b)
33438 {
33439  return __arm_vcmulq_rot270_f32 (__a, __b);
33440 }
33441 
33442 __extension__ extern __inline float32x4_t
33443 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180(float32x4_t __a,float32x4_t __b)33444 __arm_vcmulq_rot180 (float32x4_t __a, float32x4_t __b)
33445 {
33446  return __arm_vcmulq_rot180_f32 (__a, __b);
33447 }
33448 
33449 __extension__ extern __inline float32x4_t
33450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq(float32x4_t __a,float32x4_t __b)33451 __arm_vcmulq (float32x4_t __a, float32x4_t __b)
33452 {
33453  return __arm_vcmulq_f32 (__a, __b);
33454 }
33455 
33456 __extension__ extern __inline float32x4_t
33457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90(float32x4_t __a,float32x4_t __b)33458 __arm_vcaddq_rot90 (float32x4_t __a, float32x4_t __b)
33459 {
33460  return __arm_vcaddq_rot90_f32 (__a, __b);
33461 }
33462 
33463 __extension__ extern __inline float32x4_t
33464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270(float32x4_t __a,float32x4_t __b)33465 __arm_vcaddq_rot270 (float32x4_t __a, float32x4_t __b)
33466 {
33467  return __arm_vcaddq_rot270_f32 (__a, __b);
33468 }
33469 
33470 __extension__ extern __inline float32x4_t
33471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq(float32x4_t __a,float32x4_t __b)33472 __arm_vbicq (float32x4_t __a, float32x4_t __b)
33473 {
33474  return __arm_vbicq_f32 (__a, __b);
33475 }
33476 
33477 __extension__ extern __inline float32x4_t
33478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq(float32x4_t __a,float32x4_t __b)33479 __arm_vandq (float32x4_t __a, float32x4_t __b)
33480 {
33481  return __arm_vandq_f32 (__a, __b);
33482 }
33483 
33484 __extension__ extern __inline float32x4_t
33485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(float32x4_t __a,float32_t __b)33486 __arm_vaddq (float32x4_t __a, float32_t __b)
33487 {
33488  return __arm_vaddq_n_f32 (__a, __b);
33489 }
33490 
33491 __extension__ extern __inline float32x4_t
33492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq(float32x4_t __a,float32x4_t __b)33493 __arm_vabdq (float32x4_t __a, float32x4_t __b)
33494 {
33495  return __arm_vabdq_f32 (__a, __b);
33496 }
33497 
33498 __extension__ extern __inline mve_pred16_t
33499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)33500 __arm_vcmpeqq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33501 {
33502  return __arm_vcmpeqq_m_f16 (__a, __b, __p);
33503 }
33504 
33505 __extension__ extern __inline mve_pred16_t
33506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)33507 __arm_vcmpeqq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
33508 {
33509  return __arm_vcmpeqq_m_f32 (__a, __b, __p);
33510 }
33511 
33512 __extension__ extern __inline int16x8_t
33513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_m(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33514 __arm_vcvtaq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33515 {
33516  return __arm_vcvtaq_m_s16_f16 (__inactive, __a, __p);
33517 }
33518 
33519 __extension__ extern __inline uint16x8_t
33520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_m(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33521 __arm_vcvtaq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33522 {
33523  return __arm_vcvtaq_m_u16_f16 (__inactive, __a, __p);
33524 }
33525 
33526 __extension__ extern __inline int32x4_t
33527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_m(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)33528 __arm_vcvtaq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33529 {
33530  return __arm_vcvtaq_m_s32_f32 (__inactive, __a, __p);
33531 }
33532 
33533 __extension__ extern __inline uint32x4_t
33534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtaq_m(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)33535 __arm_vcvtaq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33536 {
33537  return __arm_vcvtaq_m_u32_f32 (__inactive, __a, __p);
33538 }
33539 
33540 __extension__ extern __inline float16x8_t
33541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m(float16x8_t __inactive,int16x8_t __a,mve_pred16_t __p)33542 __arm_vcvtq_m (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
33543 {
33544  return __arm_vcvtq_m_f16_s16 (__inactive, __a, __p);
33545 }
33546 
33547 __extension__ extern __inline float16x8_t
33548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m(float16x8_t __inactive,uint16x8_t __a,mve_pred16_t __p)33549 __arm_vcvtq_m (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
33550 {
33551  return __arm_vcvtq_m_f16_u16 (__inactive, __a, __p);
33552 }
33553 
33554 __extension__ extern __inline float32x4_t
33555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m(float32x4_t __inactive,int32x4_t __a,mve_pred16_t __p)33556 __arm_vcvtq_m (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
33557 {
33558  return __arm_vcvtq_m_f32_s32 (__inactive, __a, __p);
33559 }
33560 
33561 __extension__ extern __inline float32x4_t
33562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m(float32x4_t __inactive,uint32x4_t __a,mve_pred16_t __p)33563 __arm_vcvtq_m (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
33564 {
33565  return __arm_vcvtq_m_f32_u32 (__inactive, __a, __p);
33566 }
33567 
33568 __extension__ extern __inline float16x8_t
33569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtbq_m(float16x8_t __a,float32x4_t __b,mve_pred16_t __p)33570 __arm_vcvtbq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
33571 {
33572  return __arm_vcvtbq_m_f16_f32 (__a, __b, __p);
33573 }
33574 
33575 __extension__ extern __inline float32x4_t
33576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtbq_m(float32x4_t __inactive,float16x8_t __a,mve_pred16_t __p)33577 __arm_vcvtbq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
33578 {
33579  return __arm_vcvtbq_m_f32_f16 (__inactive, __a, __p);
33580 }
33581 
33582 __extension__ extern __inline float16x8_t
33583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvttq_m(float16x8_t __a,float32x4_t __b,mve_pred16_t __p)33584 __arm_vcvttq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
33585 {
33586  return __arm_vcvttq_m_f16_f32 (__a, __b, __p);
33587 }
33588 
33589 __extension__ extern __inline float32x4_t
33590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvttq_m(float32x4_t __inactive,float16x8_t __a,mve_pred16_t __p)33591 __arm_vcvttq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
33592 {
33593  return __arm_vcvttq_m_f32_f16 (__inactive, __a, __p);
33594 }
33595 
33596 __extension__ extern __inline float16x8_t
33597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33598 __arm_vrev32q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33599 {
33600  return __arm_vrev32q_m_f16 (__inactive, __a, __p);
33601 }
33602 
33603 __extension__ extern __inline float16x8_t
33604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq(float16x8_t __a,float16x8_t __b,float16x8_t __c)33605 __arm_vcmlaq (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33606 {
33607  return __arm_vcmlaq_f16 (__a, __b, __c);
33608 }
33609 
33610 __extension__ extern __inline float16x8_t
33611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180(float16x8_t __a,float16x8_t __b,float16x8_t __c)33612 __arm_vcmlaq_rot180 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33613 {
33614  return __arm_vcmlaq_rot180_f16 (__a, __b, __c);
33615 }
33616 
33617 __extension__ extern __inline float16x8_t
33618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270(float16x8_t __a,float16x8_t __b,float16x8_t __c)33619 __arm_vcmlaq_rot270 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33620 {
33621  return __arm_vcmlaq_rot270_f16 (__a, __b, __c);
33622 }
33623 
33624 __extension__ extern __inline float16x8_t
33625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90(float16x8_t __a,float16x8_t __b,float16x8_t __c)33626 __arm_vcmlaq_rot90 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33627 {
33628  return __arm_vcmlaq_rot90_f16 (__a, __b, __c);
33629 }
33630 
33631 __extension__ extern __inline float16x8_t
33632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq(float16x8_t __a,float16x8_t __b,float16x8_t __c)33633 __arm_vfmaq (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33634 {
33635  return __arm_vfmaq_f16 (__a, __b, __c);
33636 }
33637 
33638 __extension__ extern __inline float16x8_t
33639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq(float16x8_t __a,float16x8_t __b,float16_t __c)33640 __arm_vfmaq (float16x8_t __a, float16x8_t __b, float16_t __c)
33641 {
33642  return __arm_vfmaq_n_f16 (__a, __b, __c);
33643 }
33644 
33645 __extension__ extern __inline float16x8_t
33646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmasq(float16x8_t __a,float16x8_t __b,float16_t __c)33647 __arm_vfmasq (float16x8_t __a, float16x8_t __b, float16_t __c)
33648 {
33649  return __arm_vfmasq_n_f16 (__a, __b, __c);
33650 }
33651 
33652 __extension__ extern __inline float16x8_t
33653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmsq(float16x8_t __a,float16x8_t __b,float16x8_t __c)33654 __arm_vfmsq (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33655 {
33656  return __arm_vfmsq_f16 (__a, __b, __c);
33657 }
33658 
33659 __extension__ extern __inline float16x8_t
33660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33661 __arm_vabsq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33662 {
33663  return __arm_vabsq_m_f16 (__inactive, __a, __p);
33664 }
33665 
33666 __extension__ extern __inline int16x8_t
33667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_m(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33668 __arm_vcvtmq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33669 {
33670  return __arm_vcvtmq_m_s16_f16 (__inactive, __a, __p);
33671 }
33672 
33673 __extension__ extern __inline int16x8_t
33674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_m(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33675 __arm_vcvtnq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33676 {
33677  return __arm_vcvtnq_m_s16_f16 (__inactive, __a, __p);
33678 }
33679 
33680 __extension__ extern __inline int16x8_t
33681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_m(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33682 __arm_vcvtpq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33683 {
33684  return __arm_vcvtpq_m_s16_f16 (__inactive, __a, __p);
33685 }
33686 
33687 __extension__ extern __inline int16x8_t
33688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m(int16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33689 __arm_vcvtq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33690 {
33691  return __arm_vcvtq_m_s16_f16 (__inactive, __a, __p);
33692 }
33693 
33694 __extension__ extern __inline float16x8_t
33695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m(float16x8_t __inactive,float16_t __a,mve_pred16_t __p)33696 __arm_vdupq_m (float16x8_t __inactive, float16_t __a, mve_pred16_t __p)
33697 {
33698  return __arm_vdupq_m_n_f16 (__inactive, __a, __p);
33699 }
33700 
33701 __extension__ extern __inline float16x8_t
33702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmaq_m(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)33703 __arm_vmaxnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33704 {
33705  return __arm_vmaxnmaq_m_f16 (__a, __b, __p);
33706 }
33707 
33708 __extension__ extern __inline float16_t
33709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmavq_p(float16_t __a,float16x8_t __b,mve_pred16_t __p)33710 __arm_vmaxnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
33711 {
33712  return __arm_vmaxnmavq_p_f16 (__a, __b, __p);
33713 }
33714 
33715 __extension__ extern __inline float16_t
33716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmvq_p(float16_t __a,float16x8_t __b,mve_pred16_t __p)33717 __arm_vmaxnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
33718 {
33719  return __arm_vmaxnmvq_p_f16 (__a, __b, __p);
33720 }
33721 
33722 __extension__ extern __inline float16x8_t
33723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmaq_m(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)33724 __arm_vminnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33725 {
33726  return __arm_vminnmaq_m_f16 (__a, __b, __p);
33727 }
33728 
33729 __extension__ extern __inline float16_t
33730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmavq_p(float16_t __a,float16x8_t __b,mve_pred16_t __p)33731 __arm_vminnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
33732 {
33733  return __arm_vminnmavq_p_f16 (__a, __b, __p);
33734 }
33735 
33736 __extension__ extern __inline float16_t
33737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmvq_p(float16_t __a,float16x8_t __b,mve_pred16_t __p)33738 __arm_vminnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
33739 {
33740  return __arm_vminnmvq_p_f16 (__a, __b, __p);
33741 }
33742 
33743 __extension__ extern __inline float16x8_t
33744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33745 __arm_vnegq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33746 {
33747  return __arm_vnegq_m_f16 (__inactive, __a, __p);
33748 }
33749 
33750 __extension__ extern __inline float16x8_t
33751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)33752 __arm_vpselq (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33753 {
33754  return __arm_vpselq_f16 (__a, __b, __p);
33755 }
33756 
33757 __extension__ extern __inline float16x8_t
33758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33759 __arm_vrev64q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33760 {
33761  return __arm_vrev64q_m_f16 (__inactive, __a, __p);
33762 }
33763 
33764 __extension__ extern __inline float16x8_t
33765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33766 __arm_vrndaq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33767 {
33768  return __arm_vrndaq_m_f16 (__inactive, __a, __p);
33769 }
33770 
33771 __extension__ extern __inline float16x8_t
33772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33773 __arm_vrndmq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33774 {
33775  return __arm_vrndmq_m_f16 (__inactive, __a, __p);
33776 }
33777 
33778 __extension__ extern __inline float16x8_t
33779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33780 __arm_vrndnq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33781 {
33782  return __arm_vrndnq_m_f16 (__inactive, __a, __p);
33783 }
33784 
33785 __extension__ extern __inline float16x8_t
33786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33787 __arm_vrndpq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33788 {
33789  return __arm_vrndpq_m_f16 (__inactive, __a, __p);
33790 }
33791 
33792 __extension__ extern __inline float16x8_t
33793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33794 __arm_vrndq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33795 {
33796  return __arm_vrndq_m_f16 (__inactive, __a, __p);
33797 }
33798 
33799 __extension__ extern __inline float16x8_t
33800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_m(float16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33801 __arm_vrndxq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33802 {
33803  return __arm_vrndxq_m_f16 (__inactive, __a, __p);
33804 }
33805 
33806 __extension__ extern __inline mve_pred16_t
33807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(float16x8_t __a,float16_t __b,mve_pred16_t __p)33808 __arm_vcmpeqq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33809 {
33810  return __arm_vcmpeqq_m_n_f16 (__a, __b, __p);
33811 }
33812 
33813 __extension__ extern __inline mve_pred16_t
33814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)33815 __arm_vcmpgeq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33816 {
33817  return __arm_vcmpgeq_m_f16 (__a, __b, __p);
33818 }
33819 
33820 __extension__ extern __inline mve_pred16_t
33821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(float16x8_t __a,float16_t __b,mve_pred16_t __p)33822 __arm_vcmpgeq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33823 {
33824  return __arm_vcmpgeq_m_n_f16 (__a, __b, __p);
33825 }
33826 
33827 __extension__ extern __inline mve_pred16_t
33828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)33829 __arm_vcmpgtq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33830 {
33831  return __arm_vcmpgtq_m_f16 (__a, __b, __p);
33832 }
33833 
33834 __extension__ extern __inline mve_pred16_t
33835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(float16x8_t __a,float16_t __b,mve_pred16_t __p)33836 __arm_vcmpgtq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33837 {
33838  return __arm_vcmpgtq_m_n_f16 (__a, __b, __p);
33839 }
33840 
33841 __extension__ extern __inline mve_pred16_t
33842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)33843 __arm_vcmpleq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33844 {
33845  return __arm_vcmpleq_m_f16 (__a, __b, __p);
33846 }
33847 
33848 __extension__ extern __inline mve_pred16_t
33849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(float16x8_t __a,float16_t __b,mve_pred16_t __p)33850 __arm_vcmpleq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33851 {
33852  return __arm_vcmpleq_m_n_f16 (__a, __b, __p);
33853 }
33854 
33855 __extension__ extern __inline mve_pred16_t
33856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)33857 __arm_vcmpltq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33858 {
33859  return __arm_vcmpltq_m_f16 (__a, __b, __p);
33860 }
33861 
33862 __extension__ extern __inline mve_pred16_t
33863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(float16x8_t __a,float16_t __b,mve_pred16_t __p)33864 __arm_vcmpltq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33865 {
33866  return __arm_vcmpltq_m_n_f16 (__a, __b, __p);
33867 }
33868 
33869 __extension__ extern __inline mve_pred16_t
33870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)33871 __arm_vcmpneq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33872 {
33873  return __arm_vcmpneq_m_f16 (__a, __b, __p);
33874 }
33875 
33876 __extension__ extern __inline mve_pred16_t
33877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(float16x8_t __a,float16_t __b,mve_pred16_t __p)33878 __arm_vcmpneq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33879 {
33880  return __arm_vcmpneq_m_n_f16 (__a, __b, __p);
33881 }
33882 
33883 __extension__ extern __inline uint16x8_t
33884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_m(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33885 __arm_vcvtmq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33886 {
33887  return __arm_vcvtmq_m_u16_f16 (__inactive, __a, __p);
33888 }
33889 
33890 __extension__ extern __inline uint16x8_t
33891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_m(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33892 __arm_vcvtnq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33893 {
33894  return __arm_vcvtnq_m_u16_f16 (__inactive, __a, __p);
33895 }
33896 
33897 __extension__ extern __inline uint16x8_t
33898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_m(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33899 __arm_vcvtpq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33900 {
33901  return __arm_vcvtpq_m_u16_f16 (__inactive, __a, __p);
33902 }
33903 
33904 __extension__ extern __inline uint16x8_t
33905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m(uint16x8_t __inactive,float16x8_t __a,mve_pred16_t __p)33906 __arm_vcvtq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33907 {
33908  return __arm_vcvtq_m_u16_f16 (__inactive, __a, __p);
33909 }
33910 
33911 __extension__ extern __inline float32x4_t
33912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq(float32x4_t __a,float32x4_t __b,float32x4_t __c)33913 __arm_vcmlaq (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33914 {
33915  return __arm_vcmlaq_f32 (__a, __b, __c);
33916 }
33917 
33918 __extension__ extern __inline float32x4_t
33919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180(float32x4_t __a,float32x4_t __b,float32x4_t __c)33920 __arm_vcmlaq_rot180 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33921 {
33922  return __arm_vcmlaq_rot180_f32 (__a, __b, __c);
33923 }
33924 
33925 __extension__ extern __inline float32x4_t
33926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270(float32x4_t __a,float32x4_t __b,float32x4_t __c)33927 __arm_vcmlaq_rot270 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33928 {
33929  return __arm_vcmlaq_rot270_f32 (__a, __b, __c);
33930 }
33931 
33932 __extension__ extern __inline float32x4_t
33933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90(float32x4_t __a,float32x4_t __b,float32x4_t __c)33934 __arm_vcmlaq_rot90 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33935 {
33936  return __arm_vcmlaq_rot90_f32 (__a, __b, __c);
33937 }
33938 
33939 __extension__ extern __inline float32x4_t
33940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq(float32x4_t __a,float32x4_t __b,float32x4_t __c)33941 __arm_vfmaq (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33942 {
33943  return __arm_vfmaq_f32 (__a, __b, __c);
33944 }
33945 
33946 __extension__ extern __inline float32x4_t
33947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq(float32x4_t __a,float32x4_t __b,float32_t __c)33948 __arm_vfmaq (float32x4_t __a, float32x4_t __b, float32_t __c)
33949 {
33950  return __arm_vfmaq_n_f32 (__a, __b, __c);
33951 }
33952 
33953 __extension__ extern __inline float32x4_t
33954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmasq(float32x4_t __a,float32x4_t __b,float32_t __c)33955 __arm_vfmasq (float32x4_t __a, float32x4_t __b, float32_t __c)
33956 {
33957  return __arm_vfmasq_n_f32 (__a, __b, __c);
33958 }
33959 
33960 __extension__ extern __inline float32x4_t
33961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmsq(float32x4_t __a,float32x4_t __b,float32x4_t __c)33962 __arm_vfmsq (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33963 {
33964  return __arm_vfmsq_f32 (__a, __b, __c);
33965 }
33966 
33967 __extension__ extern __inline float32x4_t
33968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_m(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)33969 __arm_vabsq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33970 {
33971  return __arm_vabsq_m_f32 (__inactive, __a, __p);
33972 }
33973 
33974 __extension__ extern __inline int32x4_t
33975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_m(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)33976 __arm_vcvtmq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33977 {
33978  return __arm_vcvtmq_m_s32_f32 (__inactive, __a, __p);
33979 }
33980 
33981 __extension__ extern __inline int32x4_t
33982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_m(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)33983 __arm_vcvtnq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33984 {
33985  return __arm_vcvtnq_m_s32_f32 (__inactive, __a, __p);
33986 }
33987 
33988 __extension__ extern __inline int32x4_t
33989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_m(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)33990 __arm_vcvtpq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33991 {
33992  return __arm_vcvtpq_m_s32_f32 (__inactive, __a, __p);
33993 }
33994 
33995 __extension__ extern __inline int32x4_t
33996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m(int32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)33997 __arm_vcvtq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33998 {
33999  return __arm_vcvtq_m_s32_f32 (__inactive, __a, __p);
34000 }
34001 
34002 __extension__ extern __inline float32x4_t
34003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vdupq_m(float32x4_t __inactive,float32_t __a,mve_pred16_t __p)34004 __arm_vdupq_m (float32x4_t __inactive, float32_t __a, mve_pred16_t __p)
34005 {
34006  return __arm_vdupq_m_n_f32 (__inactive, __a, __p);
34007 }
34008 
34009 __extension__ extern __inline float32x4_t
34010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmaq_m(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34011 __arm_vmaxnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34012 {
34013  return __arm_vmaxnmaq_m_f32 (__a, __b, __p);
34014 }
34015 
34016 __extension__ extern __inline float32_t
34017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmavq_p(float32_t __a,float32x4_t __b,mve_pred16_t __p)34018 __arm_vmaxnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
34019 {
34020  return __arm_vmaxnmavq_p_f32 (__a, __b, __p);
34021 }
34022 
34023 __extension__ extern __inline float32_t
34024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmvq_p(float32_t __a,float32x4_t __b,mve_pred16_t __p)34025 __arm_vmaxnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
34026 {
34027  return __arm_vmaxnmvq_p_f32 (__a, __b, __p);
34028 }
34029 
34030 __extension__ extern __inline float32x4_t
34031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmaq_m(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34032 __arm_vminnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34033 {
34034  return __arm_vminnmaq_m_f32 (__a, __b, __p);
34035 }
34036 
34037 __extension__ extern __inline float32_t
34038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmavq_p(float32_t __a,float32x4_t __b,mve_pred16_t __p)34039 __arm_vminnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
34040 {
34041  return __arm_vminnmavq_p_f32 (__a, __b, __p);
34042 }
34043 
34044 __extension__ extern __inline float32_t
34045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmvq_p(float32_t __a,float32x4_t __b,mve_pred16_t __p)34046 __arm_vminnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
34047 {
34048  return __arm_vminnmvq_p_f32 (__a, __b, __p);
34049 }
34050 
34051 __extension__ extern __inline float32x4_t
34052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_m(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34053 __arm_vnegq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34054 {
34055  return __arm_vnegq_m_f32 (__inactive, __a, __p);
34056 }
34057 
34058 __extension__ extern __inline float32x4_t
34059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vpselq(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34060 __arm_vpselq (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34061 {
34062  return __arm_vpselq_f32 (__a, __b, __p);
34063 }
34064 
34065 __extension__ extern __inline float32x4_t
34066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_m(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34067 __arm_vrev64q_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34068 {
34069  return __arm_vrev64q_m_f32 (__inactive, __a, __p);
34070 }
34071 
34072 __extension__ extern __inline float32x4_t
34073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_m(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34074 __arm_vrndaq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34075 {
34076  return __arm_vrndaq_m_f32 (__inactive, __a, __p);
34077 }
34078 
34079 __extension__ extern __inline float32x4_t
34080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_m(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34081 __arm_vrndmq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34082 {
34083  return __arm_vrndmq_m_f32 (__inactive, __a, __p);
34084 }
34085 
34086 __extension__ extern __inline float32x4_t
34087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_m(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34088 __arm_vrndnq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34089 {
34090  return __arm_vrndnq_m_f32 (__inactive, __a, __p);
34091 }
34092 
34093 __extension__ extern __inline float32x4_t
34094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_m(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34095 __arm_vrndpq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34096 {
34097  return __arm_vrndpq_m_f32 (__inactive, __a, __p);
34098 }
34099 
34100 __extension__ extern __inline float32x4_t
34101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_m(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34102 __arm_vrndq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34103 {
34104  return __arm_vrndq_m_f32 (__inactive, __a, __p);
34105 }
34106 
34107 __extension__ extern __inline float32x4_t
34108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_m(float32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34109 __arm_vrndxq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34110 {
34111  return __arm_vrndxq_m_f32 (__inactive, __a, __p);
34112 }
34113 
34114 __extension__ extern __inline mve_pred16_t
34115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpeqq_m(float32x4_t __a,float32_t __b,mve_pred16_t __p)34116 __arm_vcmpeqq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34117 {
34118  return __arm_vcmpeqq_m_n_f32 (__a, __b, __p);
34119 }
34120 
34121 __extension__ extern __inline mve_pred16_t
34122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34123 __arm_vcmpgeq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34124 {
34125  return __arm_vcmpgeq_m_f32 (__a, __b, __p);
34126 }
34127 
34128 __extension__ extern __inline mve_pred16_t
34129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgeq_m(float32x4_t __a,float32_t __b,mve_pred16_t __p)34130 __arm_vcmpgeq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34131 {
34132  return __arm_vcmpgeq_m_n_f32 (__a, __b, __p);
34133 }
34134 
34135 __extension__ extern __inline mve_pred16_t
34136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34137 __arm_vcmpgtq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34138 {
34139  return __arm_vcmpgtq_m_f32 (__a, __b, __p);
34140 }
34141 
34142 __extension__ extern __inline mve_pred16_t
34143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpgtq_m(float32x4_t __a,float32_t __b,mve_pred16_t __p)34144 __arm_vcmpgtq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34145 {
34146  return __arm_vcmpgtq_m_n_f32 (__a, __b, __p);
34147 }
34148 
34149 __extension__ extern __inline mve_pred16_t
34150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34151 __arm_vcmpleq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34152 {
34153  return __arm_vcmpleq_m_f32 (__a, __b, __p);
34154 }
34155 
34156 __extension__ extern __inline mve_pred16_t
34157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpleq_m(float32x4_t __a,float32_t __b,mve_pred16_t __p)34158 __arm_vcmpleq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34159 {
34160  return __arm_vcmpleq_m_n_f32 (__a, __b, __p);
34161 }
34162 
34163 __extension__ extern __inline mve_pred16_t
34164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34165 __arm_vcmpltq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34166 {
34167  return __arm_vcmpltq_m_f32 (__a, __b, __p);
34168 }
34169 
34170 __extension__ extern __inline mve_pred16_t
34171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpltq_m(float32x4_t __a,float32_t __b,mve_pred16_t __p)34172 __arm_vcmpltq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34173 {
34174  return __arm_vcmpltq_m_n_f32 (__a, __b, __p);
34175 }
34176 
34177 __extension__ extern __inline mve_pred16_t
34178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34179 __arm_vcmpneq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34180 {
34181  return __arm_vcmpneq_m_f32 (__a, __b, __p);
34182 }
34183 
34184 __extension__ extern __inline mve_pred16_t
34185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmpneq_m(float32x4_t __a,float32_t __b,mve_pred16_t __p)34186 __arm_vcmpneq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34187 {
34188  return __arm_vcmpneq_m_n_f32 (__a, __b, __p);
34189 }
34190 
34191 __extension__ extern __inline uint32x4_t
34192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtmq_m(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34193 __arm_vcvtmq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34194 {
34195  return __arm_vcvtmq_m_u32_f32 (__inactive, __a, __p);
34196 }
34197 
34198 __extension__ extern __inline uint32x4_t
34199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtnq_m(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34200 __arm_vcvtnq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34201 {
34202  return __arm_vcvtnq_m_u32_f32 (__inactive, __a, __p);
34203 }
34204 
34205 __extension__ extern __inline uint32x4_t
34206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtpq_m(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34207 __arm_vcvtpq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34208 {
34209  return __arm_vcvtpq_m_u32_f32 (__inactive, __a, __p);
34210 }
34211 
34212 __extension__ extern __inline uint32x4_t
34213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m(uint32x4_t __inactive,float32x4_t __a,mve_pred16_t __p)34214 __arm_vcvtq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34215 {
34216  return __arm_vcvtq_m_u32_f32 (__inactive, __a, __p);
34217 }
34218 
34219 __extension__ extern __inline float16x8_t
34220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n(float16x8_t __inactive,uint16x8_t __a,const int __imm6,mve_pred16_t __p)34221 __arm_vcvtq_m_n (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p)
34222 {
34223  return __arm_vcvtq_m_n_f16_u16 (__inactive, __a, __imm6, __p);
34224 }
34225 
34226 __extension__ extern __inline float16x8_t
34227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n(float16x8_t __inactive,int16x8_t __a,const int __imm6,mve_pred16_t __p)34228 __arm_vcvtq_m_n (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p)
34229 {
34230  return __arm_vcvtq_m_n_f16_s16 (__inactive, __a, __imm6, __p);
34231 }
34232 
34233 __extension__ extern __inline float32x4_t
34234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n(float32x4_t __inactive,uint32x4_t __a,const int __imm6,mve_pred16_t __p)34235 __arm_vcvtq_m_n (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p)
34236 {
34237  return __arm_vcvtq_m_n_f32_u32 (__inactive, __a, __imm6, __p);
34238 }
34239 
34240 __extension__ extern __inline float32x4_t
34241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n(float32x4_t __inactive,int32x4_t __a,const int __imm6,mve_pred16_t __p)34242 __arm_vcvtq_m_n (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p)
34243 {
34244  return __arm_vcvtq_m_n_f32_s32 (__inactive, __a, __imm6, __p);
34245 }
34246 
34247 __extension__ extern __inline float32x4_t
34248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34249 __arm_vabdq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34250 {
34251  return __arm_vabdq_m_f32 (__inactive, __a, __b, __p);
34252 }
34253 
34254 __extension__ extern __inline float16x8_t
34255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34256 __arm_vabdq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34257 {
34258  return __arm_vabdq_m_f16 (__inactive, __a, __b, __p);
34259 }
34260 
34261 __extension__ extern __inline float32x4_t
34262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34263 __arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34264 {
34265  return __arm_vaddq_m_f32 (__inactive, __a, __b, __p);
34266 }
34267 
34268 __extension__ extern __inline float16x8_t
34269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34270 __arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34271 {
34272  return __arm_vaddq_m_f16 (__inactive, __a, __b, __p);
34273 }
34274 
34275 __extension__ extern __inline float32x4_t
34276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(float32x4_t __inactive,float32x4_t __a,float32_t __b,mve_pred16_t __p)34277 __arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
34278 {
34279  return __arm_vaddq_m_n_f32 (__inactive, __a, __b, __p);
34280 }
34281 
34282 __extension__ extern __inline float16x8_t
34283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_m(float16x8_t __inactive,float16x8_t __a,float16_t __b,mve_pred16_t __p)34284 __arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
34285 {
34286  return __arm_vaddq_m_n_f16 (__inactive, __a, __b, __p);
34287 }
34288 
34289 __extension__ extern __inline float32x4_t
34290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34291 __arm_vandq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34292 {
34293  return __arm_vandq_m_f32 (__inactive, __a, __b, __p);
34294 }
34295 
34296 __extension__ extern __inline float16x8_t
34297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34298 __arm_vandq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34299 {
34300  return __arm_vandq_m_f16 (__inactive, __a, __b, __p);
34301 }
34302 
34303 __extension__ extern __inline float32x4_t
34304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34305 __arm_vbicq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34306 {
34307  return __arm_vbicq_m_f32 (__inactive, __a, __b, __p);
34308 }
34309 
34310 __extension__ extern __inline float16x8_t
34311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34312 __arm_vbicq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34313 {
34314  return __arm_vbicq_m_f16 (__inactive, __a, __b, __p);
34315 }
34316 
34317 __extension__ extern __inline float32x4_t
34318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m(float32x4_t __inactive,float32x4_t __a,int32_t __b,mve_pred16_t __p)34319 __arm_vbrsrq_m (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p)
34320 {
34321  return __arm_vbrsrq_m_n_f32 (__inactive, __a, __b, __p);
34322 }
34323 
34324 __extension__ extern __inline float16x8_t
34325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_m(float16x8_t __inactive,float16x8_t __a,int32_t __b,mve_pred16_t __p)34326 __arm_vbrsrq_m (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p)
34327 {
34328  return __arm_vbrsrq_m_n_f16 (__inactive, __a, __b, __p);
34329 }
34330 
34331 __extension__ extern __inline float32x4_t
34332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34333 __arm_vcaddq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34334 {
34335  return __arm_vcaddq_rot270_m_f32 (__inactive, __a, __b, __p);
34336 }
34337 
34338 __extension__ extern __inline float16x8_t
34339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34340 __arm_vcaddq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34341 {
34342  return __arm_vcaddq_rot270_m_f16 (__inactive, __a, __b, __p);
34343 }
34344 
34345 __extension__ extern __inline float32x4_t
34346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34347 __arm_vcaddq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34348 {
34349  return __arm_vcaddq_rot90_m_f32 (__inactive, __a, __b, __p);
34350 }
34351 
34352 __extension__ extern __inline float16x8_t
34353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34354 __arm_vcaddq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34355 {
34356  return __arm_vcaddq_rot90_m_f16 (__inactive, __a, __b, __p);
34357 }
34358 
34359 __extension__ extern __inline float32x4_t
34360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_m(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)34361 __arm_vcmlaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34362 {
34363  return __arm_vcmlaq_m_f32 (__a, __b, __c, __p);
34364 }
34365 
34366 __extension__ extern __inline float16x8_t
34367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_m(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)34368 __arm_vcmlaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34369 {
34370  return __arm_vcmlaq_m_f16 (__a, __b, __c, __p);
34371 }
34372 
34373 __extension__ extern __inline float32x4_t
34374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180_m(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)34375 __arm_vcmlaq_rot180_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34376 {
34377  return __arm_vcmlaq_rot180_m_f32 (__a, __b, __c, __p);
34378 }
34379 
34380 __extension__ extern __inline float16x8_t
34381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180_m(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)34382 __arm_vcmlaq_rot180_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34383 {
34384  return __arm_vcmlaq_rot180_m_f16 (__a, __b, __c, __p);
34385 }
34386 
34387 __extension__ extern __inline float32x4_t
34388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270_m(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)34389 __arm_vcmlaq_rot270_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34390 {
34391  return __arm_vcmlaq_rot270_m_f32 (__a, __b, __c, __p);
34392 }
34393 
34394 __extension__ extern __inline float16x8_t
34395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270_m(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)34396 __arm_vcmlaq_rot270_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34397 {
34398  return __arm_vcmlaq_rot270_m_f16 (__a, __b, __c, __p);
34399 }
34400 
34401 __extension__ extern __inline float32x4_t
34402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90_m(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)34403 __arm_vcmlaq_rot90_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34404 {
34405  return __arm_vcmlaq_rot90_m_f32 (__a, __b, __c, __p);
34406 }
34407 
34408 __extension__ extern __inline float16x8_t
34409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90_m(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)34410 __arm_vcmlaq_rot90_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34411 {
34412  return __arm_vcmlaq_rot90_m_f16 (__a, __b, __c, __p);
34413 }
34414 
34415 __extension__ extern __inline float32x4_t
34416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34417 __arm_vcmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34418 {
34419  return __arm_vcmulq_m_f32 (__inactive, __a, __b, __p);
34420 }
34421 
34422 __extension__ extern __inline float16x8_t
34423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34424 __arm_vcmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34425 {
34426  return __arm_vcmulq_m_f16 (__inactive, __a, __b, __p);
34427 }
34428 
34429 __extension__ extern __inline float32x4_t
34430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34431 __arm_vcmulq_rot180_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34432 {
34433  return __arm_vcmulq_rot180_m_f32 (__inactive, __a, __b, __p);
34434 }
34435 
34436 __extension__ extern __inline float16x8_t
34437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34438 __arm_vcmulq_rot180_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34439 {
34440  return __arm_vcmulq_rot180_m_f16 (__inactive, __a, __b, __p);
34441 }
34442 
34443 __extension__ extern __inline float32x4_t
34444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34445 __arm_vcmulq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34446 {
34447  return __arm_vcmulq_rot270_m_f32 (__inactive, __a, __b, __p);
34448 }
34449 
34450 __extension__ extern __inline float16x8_t
34451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34452 __arm_vcmulq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34453 {
34454  return __arm_vcmulq_rot270_m_f16 (__inactive, __a, __b, __p);
34455 }
34456 
34457 __extension__ extern __inline float32x4_t
34458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34459 __arm_vcmulq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34460 {
34461  return __arm_vcmulq_rot90_m_f32 (__inactive, __a, __b, __p);
34462 }
34463 
34464 __extension__ extern __inline float16x8_t
34465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34466 __arm_vcmulq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34467 {
34468  return __arm_vcmulq_rot90_m_f16 (__inactive, __a, __b, __p);
34469 }
34470 
34471 __extension__ extern __inline int32x4_t
34472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n(int32x4_t __inactive,float32x4_t __a,const int __imm6,mve_pred16_t __p)34473 __arm_vcvtq_m_n (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
34474 {
34475  return __arm_vcvtq_m_n_s32_f32 (__inactive, __a, __imm6, __p);
34476 }
34477 
34478 __extension__ extern __inline int16x8_t
34479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n(int16x8_t __inactive,float16x8_t __a,const int __imm6,mve_pred16_t __p)34480 __arm_vcvtq_m_n (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
34481 {
34482  return __arm_vcvtq_m_n_s16_f16 (__inactive, __a, __imm6, __p);
34483 }
34484 
34485 __extension__ extern __inline uint32x4_t
34486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n(uint32x4_t __inactive,float32x4_t __a,const int __imm6,mve_pred16_t __p)34487 __arm_vcvtq_m_n (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
34488 {
34489  return __arm_vcvtq_m_n_u32_f32 (__inactive, __a, __imm6, __p);
34490 }
34491 
34492 __extension__ extern __inline uint16x8_t
34493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_m_n(uint16x8_t __inactive,float16x8_t __a,const int __imm6,mve_pred16_t __p)34494 __arm_vcvtq_m_n (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
34495 {
34496  return __arm_vcvtq_m_n_u16_f16 (__inactive, __a, __imm6, __p);
34497 }
34498 
34499 __extension__ extern __inline float32x4_t
34500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34501 __arm_veorq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34502 {
34503  return __arm_veorq_m_f32 (__inactive, __a, __b, __p);
34504 }
34505 
34506 __extension__ extern __inline float16x8_t
34507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34508 __arm_veorq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34509 {
34510  return __arm_veorq_m_f16 (__inactive, __a, __b, __p);
34511 }
34512 
34513 __extension__ extern __inline float32x4_t
34514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_m(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)34515 __arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34516 {
34517  return __arm_vfmaq_m_f32 (__a, __b, __c, __p);
34518 }
34519 
34520 __extension__ extern __inline float16x8_t
34521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_m(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)34522 __arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34523 {
34524  return __arm_vfmaq_m_f16 (__a, __b, __c, __p);
34525 }
34526 
34527 __extension__ extern __inline float32x4_t
34528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_m(float32x4_t __a,float32x4_t __b,float32_t __c,mve_pred16_t __p)34529 __arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
34530 {
34531  return __arm_vfmaq_m_n_f32 (__a, __b, __c, __p);
34532 }
34533 
34534 __extension__ extern __inline float16x8_t
34535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmaq_m(float16x8_t __a,float16x8_t __b,float16_t __c,mve_pred16_t __p)34536 __arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
34537 {
34538  return __arm_vfmaq_m_n_f16 (__a, __b, __c, __p);
34539 }
34540 
34541 __extension__ extern __inline float32x4_t
34542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmasq_m(float32x4_t __a,float32x4_t __b,float32_t __c,mve_pred16_t __p)34543 __arm_vfmasq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
34544 {
34545  return __arm_vfmasq_m_n_f32 (__a, __b, __c, __p);
34546 }
34547 
34548 __extension__ extern __inline float16x8_t
34549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmasq_m(float16x8_t __a,float16x8_t __b,float16_t __c,mve_pred16_t __p)34550 __arm_vfmasq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
34551 {
34552  return __arm_vfmasq_m_n_f16 (__a, __b, __c, __p);
34553 }
34554 
34555 __extension__ extern __inline float32x4_t
34556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmsq_m(float32x4_t __a,float32x4_t __b,float32x4_t __c,mve_pred16_t __p)34557 __arm_vfmsq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34558 {
34559  return __arm_vfmsq_m_f32 (__a, __b, __c, __p);
34560 }
34561 
34562 __extension__ extern __inline float16x8_t
34563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vfmsq_m(float16x8_t __a,float16x8_t __b,float16x8_t __c,mve_pred16_t __p)34564 __arm_vfmsq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34565 {
34566  return __arm_vfmsq_m_f16 (__a, __b, __c, __p);
34567 }
34568 
34569 __extension__ extern __inline float32x4_t
34570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34571 __arm_vmaxnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34572 {
34573  return __arm_vmaxnmq_m_f32 (__inactive, __a, __b, __p);
34574 }
34575 
34576 __extension__ extern __inline float16x8_t
34577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34578 __arm_vmaxnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34579 {
34580  return __arm_vmaxnmq_m_f16 (__inactive, __a, __b, __p);
34581 }
34582 
34583 __extension__ extern __inline float32x4_t
34584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34585 __arm_vminnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34586 {
34587  return __arm_vminnmq_m_f32 (__inactive, __a, __b, __p);
34588 }
34589 
34590 __extension__ extern __inline float16x8_t
34591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34592 __arm_vminnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34593 {
34594  return __arm_vminnmq_m_f16 (__inactive, __a, __b, __p);
34595 }
34596 
34597 __extension__ extern __inline float32x4_t
34598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34599 __arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34600 {
34601  return __arm_vmulq_m_f32 (__inactive, __a, __b, __p);
34602 }
34603 
34604 __extension__ extern __inline float16x8_t
34605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34606 __arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34607 {
34608  return __arm_vmulq_m_f16 (__inactive, __a, __b, __p);
34609 }
34610 
34611 __extension__ extern __inline float32x4_t
34612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(float32x4_t __inactive,float32x4_t __a,float32_t __b,mve_pred16_t __p)34613 __arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
34614 {
34615  return __arm_vmulq_m_n_f32 (__inactive, __a, __b, __p);
34616 }
34617 
34618 __extension__ extern __inline float16x8_t
34619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_m(float16x8_t __inactive,float16x8_t __a,float16_t __b,mve_pred16_t __p)34620 __arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
34621 {
34622  return __arm_vmulq_m_n_f16 (__inactive, __a, __b, __p);
34623 }
34624 
34625 __extension__ extern __inline float32x4_t
34626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34627 __arm_vornq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34628 {
34629  return __arm_vornq_m_f32 (__inactive, __a, __b, __p);
34630 }
34631 
34632 __extension__ extern __inline float16x8_t
34633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34634 __arm_vornq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34635 {
34636  return __arm_vornq_m_f16 (__inactive, __a, __b, __p);
34637 }
34638 
34639 __extension__ extern __inline float32x4_t
34640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34641 __arm_vorrq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34642 {
34643  return __arm_vorrq_m_f32 (__inactive, __a, __b, __p);
34644 }
34645 
34646 __extension__ extern __inline float16x8_t
34647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34648 __arm_vorrq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34649 {
34650  return __arm_vorrq_m_f16 (__inactive, __a, __b, __p);
34651 }
34652 
34653 __extension__ extern __inline float32x4_t
34654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(float32x4_t __inactive,float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34655 __arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34656 {
34657  return __arm_vsubq_m_f32 (__inactive, __a, __b, __p);
34658 }
34659 
34660 __extension__ extern __inline float16x8_t
34661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(float16x8_t __inactive,float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34662 __arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34663 {
34664  return __arm_vsubq_m_f16 (__inactive, __a, __b, __p);
34665 }
34666 
34667 __extension__ extern __inline float32x4_t
34668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(float32x4_t __inactive,float32x4_t __a,float32_t __b,mve_pred16_t __p)34669 __arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
34670 {
34671  return __arm_vsubq_m_n_f32 (__inactive, __a, __b, __p);
34672 }
34673 
34674 __extension__ extern __inline float16x8_t
34675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_m(float16x8_t __inactive,float16x8_t __a,float16_t __b,mve_pred16_t __p)34676 __arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
34677 {
34678  return __arm_vsubq_m_n_f16 (__inactive, __a, __b, __p);
34679 }
34680 
34681 __extension__ extern __inline float32x4_t
34682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q(float32_t const * __base)34683 __arm_vld1q (float32_t const * __base)
34684 {
34685  return __arm_vld1q_f32 (__base);
34686 }
34687 
34688 __extension__ extern __inline float16x8_t
34689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q(float16_t const * __base)34690 __arm_vld1q (float16_t const * __base)
34691 {
34692  return __arm_vld1q_f16 (__base);
34693 }
34694 
34695 __extension__ extern __inline float16x8_t
34696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset(float16_t const * __base,uint16x8_t __offset)34697 __arm_vldrhq_gather_offset (float16_t const * __base, uint16x8_t __offset)
34698 {
34699  return __arm_vldrhq_gather_offset_f16 (__base, __offset);
34700 }
34701 
34702 __extension__ extern __inline float16x8_t
34703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_offset_z(float16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)34704 __arm_vldrhq_gather_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
34705 {
34706  return __arm_vldrhq_gather_offset_z_f16 (__base, __offset, __p);
34707 }
34708 
34709 __extension__ extern __inline float16x8_t
34710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset(float16_t const * __base,uint16x8_t __offset)34711 __arm_vldrhq_gather_shifted_offset (float16_t const * __base, uint16x8_t __offset)
34712 {
34713  return __arm_vldrhq_gather_shifted_offset_f16 (__base, __offset);
34714 }
34715 
34716 __extension__ extern __inline float16x8_t
34717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrhq_gather_shifted_offset_z(float16_t const * __base,uint16x8_t __offset,mve_pred16_t __p)34718 __arm_vldrhq_gather_shifted_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
34719 {
34720  return __arm_vldrhq_gather_shifted_offset_z_f16 (__base, __offset, __p);
34721 }
34722 
34723 __extension__ extern __inline float32x4_t
34724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset(float32_t const * __base,uint32x4_t __offset)34725 __arm_vldrwq_gather_offset (float32_t const * __base, uint32x4_t __offset)
34726 {
34727  return __arm_vldrwq_gather_offset_f32 (__base, __offset);
34728 }
34729 
34730 __extension__ extern __inline float32x4_t
34731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_offset_z(float32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)34732 __arm_vldrwq_gather_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
34733 {
34734  return __arm_vldrwq_gather_offset_z_f32 (__base, __offset, __p);
34735 }
34736 
34737 __extension__ extern __inline float32x4_t
34738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset(float32_t const * __base,uint32x4_t __offset)34739 __arm_vldrwq_gather_shifted_offset (float32_t const * __base, uint32x4_t __offset)
34740 {
34741  return __arm_vldrwq_gather_shifted_offset_f32 (__base, __offset);
34742 }
34743 
34744 __extension__ extern __inline float32x4_t
34745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_shifted_offset_z(float32_t const * __base,uint32x4_t __offset,mve_pred16_t __p)34746 __arm_vldrwq_gather_shifted_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
34747 {
34748  return __arm_vldrwq_gather_shifted_offset_z_f32 (__base, __offset, __p);
34749 }
34750 
34751 __extension__ extern __inline void
34752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_p(float32_t * __addr,float32x4_t __value,mve_pred16_t __p)34753 __arm_vstrwq_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
34754 {
34755  __arm_vstrwq_p_f32 (__addr, __value, __p);
34756 }
34757 
34758 __extension__ extern __inline void
34759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq(float32_t * __addr,float32x4_t __value)34760 __arm_vstrwq (float32_t * __addr, float32x4_t __value)
34761 {
34762  __arm_vstrwq_f32 (__addr, __value);
34763 }
34764 
34765 __extension__ extern __inline void
34766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q(float32_t * __addr,float32x4_t __value)34767 __arm_vst1q (float32_t * __addr, float32x4_t __value)
34768 {
34769  __arm_vst1q_f32 (__addr, __value);
34770 }
34771 
34772 __extension__ extern __inline void
34773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q(float16_t * __addr,float16x8_t __value)34774 __arm_vst1q (float16_t * __addr, float16x8_t __value)
34775 {
34776  __arm_vst1q_f16 (__addr, __value);
34777 }
34778 
34779 __extension__ extern __inline void
34780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq(float16_t * __addr,float16x8_t __value)34781 __arm_vstrhq (float16_t * __addr, float16x8_t __value)
34782 {
34783  __arm_vstrhq_f16 (__addr, __value);
34784 }
34785 
34786 __extension__ extern __inline void
34787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_p(float16_t * __addr,float16x8_t __value,mve_pred16_t __p)34788 __arm_vstrhq_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
34789 {
34790  __arm_vstrhq_p_f16 (__addr, __value, __p);
34791 }
34792 
34793 __extension__ extern __inline void
34794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset(float16_t * __base,uint16x8_t __offset,float16x8_t __value)34795 __arm_vstrhq_scatter_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
34796 {
34797  __arm_vstrhq_scatter_offset_f16 (__base, __offset, __value);
34798 }
34799 
34800 __extension__ extern __inline void
34801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_offset_p(float16_t * __base,uint16x8_t __offset,float16x8_t __value,mve_pred16_t __p)34802 __arm_vstrhq_scatter_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
34803 {
34804  __arm_vstrhq_scatter_offset_p_f16 (__base, __offset, __value, __p);
34805 }
34806 
34807 __extension__ extern __inline void
34808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset(float16_t * __base,uint16x8_t __offset,float16x8_t __value)34809 __arm_vstrhq_scatter_shifted_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
34810 {
34811  __arm_vstrhq_scatter_shifted_offset_f16 (__base, __offset, __value);
34812 }
34813 
34814 __extension__ extern __inline void
34815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrhq_scatter_shifted_offset_p(float16_t * __base,uint16x8_t __offset,float16x8_t __value,mve_pred16_t __p)34816 __arm_vstrhq_scatter_shifted_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
34817 {
34818  __arm_vstrhq_scatter_shifted_offset_p_f16 (__base, __offset, __value, __p);
34819 }
34820 
34821 __extension__ extern __inline void
34822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base(uint32x4_t __addr,const int __offset,float32x4_t __value)34823 __arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, float32x4_t __value)
34824 {
34825  __arm_vstrwq_scatter_base_f32 (__addr, __offset, __value);
34826 }
34827 
34828 __extension__ extern __inline void
34829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_p(uint32x4_t __addr,const int __offset,float32x4_t __value,mve_pred16_t __p)34830 __arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
34831 {
34832  __arm_vstrwq_scatter_base_p_f32 (__addr, __offset, __value, __p);
34833 }
34834 
34835 __extension__ extern __inline void
34836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset(float32_t * __base,uint32x4_t __offset,float32x4_t __value)34837 __arm_vstrwq_scatter_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
34838 {
34839  __arm_vstrwq_scatter_offset_f32 (__base, __offset, __value);
34840 }
34841 
34842 __extension__ extern __inline void
34843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_offset_p(float32_t * __base,uint32x4_t __offset,float32x4_t __value,mve_pred16_t __p)34844 __arm_vstrwq_scatter_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
34845 {
34846  __arm_vstrwq_scatter_offset_p_f32 (__base, __offset, __value, __p);
34847 }
34848 
34849 __extension__ extern __inline void
34850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset(float32_t * __base,uint32x4_t __offset,float32x4_t __value)34851 __arm_vstrwq_scatter_shifted_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
34852 {
34853  __arm_vstrwq_scatter_shifted_offset_f32 (__base, __offset, __value);
34854 }
34855 
34856 __extension__ extern __inline void
34857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_shifted_offset_p(float32_t * __base,uint32x4_t __offset,float32x4_t __value,mve_pred16_t __p)34858 __arm_vstrwq_scatter_shifted_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
34859 {
34860  __arm_vstrwq_scatter_shifted_offset_p_f32 (__base, __offset, __value, __p);
34861 }
34862 
34863 __extension__ extern __inline float16x8_t
34864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(float16x8_t __a,float16x8_t __b)34865 __arm_vaddq (float16x8_t __a, float16x8_t __b)
34866 {
34867  return __arm_vaddq_f16 (__a, __b);
34868 }
34869 
34870 __extension__ extern __inline float32x4_t
34871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq(float32x4_t __a,float32x4_t __b)34872 __arm_vaddq (float32x4_t __a, float32x4_t __b)
34873 {
34874  return __arm_vaddq_f32 (__a, __b);
34875 }
34876 
34877 __extension__ extern __inline void
34878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb(uint32x4_t * __addr,const int __offset,float32x4_t __value)34879 __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, float32x4_t __value)
34880 {
34881  __arm_vstrwq_scatter_base_wb_f32 (__addr, __offset, __value);
34882 }
34883 
34884 __extension__ extern __inline void
34885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vstrwq_scatter_base_wb_p(uint32x4_t * __addr,const int __offset,float32x4_t __value,mve_pred16_t __p)34886 __arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
34887 {
34888  __arm_vstrwq_scatter_base_wb_p_f32 (__addr, __offset, __value, __p);
34889 }
34890 
34891 __extension__ extern __inline float16x8_t
34892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34893 __arm_vminnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34894 {
34895  return __arm_vminnmq_x_f16 (__a, __b, __p);
34896 }
34897 
34898 __extension__ extern __inline float32x4_t
34899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vminnmq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34900 __arm_vminnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34901 {
34902  return __arm_vminnmq_x_f32 (__a, __b, __p);
34903 }
34904 
34905 __extension__ extern __inline float16x8_t
34906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34907 __arm_vmaxnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34908 {
34909  return __arm_vmaxnmq_x_f16 (__a, __b, __p);
34910 }
34911 
34912 __extension__ extern __inline float32x4_t
34913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmaxnmq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34914 __arm_vmaxnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34915 {
34916  return __arm_vmaxnmq_x_f32 (__a, __b, __p);
34917 }
34918 
34919 __extension__ extern __inline float16x8_t
34920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34921 __arm_vabdq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34922 {
34923  return __arm_vabdq_x_f16 (__a, __b, __p);
34924 }
34925 
34926 __extension__ extern __inline float32x4_t
34927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabdq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34928 __arm_vabdq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34929 {
34930  return __arm_vabdq_x_f32 (__a, __b, __p);
34931 }
34932 
34933 __extension__ extern __inline float16x8_t
34934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x(float16x8_t __a,mve_pred16_t __p)34935 __arm_vabsq_x (float16x8_t __a, mve_pred16_t __p)
34936 {
34937  return __arm_vabsq_x_f16 (__a, __p);
34938 }
34939 
34940 __extension__ extern __inline float32x4_t
34941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vabsq_x(float32x4_t __a,mve_pred16_t __p)34942 __arm_vabsq_x (float32x4_t __a, mve_pred16_t __p)
34943 {
34944  return __arm_vabsq_x_f32 (__a, __p);
34945 }
34946 
34947 __extension__ extern __inline float16x8_t
34948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34949 __arm_vaddq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34950 {
34951  return __arm_vaddq_x_f16 (__a, __b, __p);
34952 }
34953 
34954 __extension__ extern __inline float32x4_t
34955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34956 __arm_vaddq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34957 {
34958  return __arm_vaddq_x_f32 (__a, __b, __p);
34959 }
34960 
34961 __extension__ extern __inline float16x8_t
34962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(float16x8_t __a,float16_t __b,mve_pred16_t __p)34963 __arm_vaddq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
34964 {
34965  return __arm_vaddq_x_n_f16 (__a, __b, __p);
34966 }
34967 
34968 __extension__ extern __inline float32x4_t
34969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_x(float32x4_t __a,float32_t __b,mve_pred16_t __p)34970 __arm_vaddq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34971 {
34972  return __arm_vaddq_x_n_f32 (__a, __b, __p);
34973 }
34974 
34975 __extension__ extern __inline float16x8_t
34976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x(float16x8_t __a,mve_pred16_t __p)34977 __arm_vnegq_x (float16x8_t __a, mve_pred16_t __p)
34978 {
34979  return __arm_vnegq_x_f16 (__a, __p);
34980 }
34981 
34982 __extension__ extern __inline float32x4_t
34983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vnegq_x(float32x4_t __a,mve_pred16_t __p)34984 __arm_vnegq_x (float32x4_t __a, mve_pred16_t __p)
34985 {
34986  return __arm_vnegq_x_f32 (__a, __p);
34987 }
34988 
34989 __extension__ extern __inline float16x8_t
34990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)34991 __arm_vmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34992 {
34993  return __arm_vmulq_x_f16 (__a, __b, __p);
34994 }
34995 
34996 __extension__ extern __inline float32x4_t
34997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)34998 __arm_vmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34999 {
35000  return __arm_vmulq_x_f32 (__a, __b, __p);
35001 }
35002 
35003 __extension__ extern __inline float16x8_t
35004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(float16x8_t __a,float16_t __b,mve_pred16_t __p)35005 __arm_vmulq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
35006 {
35007  return __arm_vmulq_x_n_f16 (__a, __b, __p);
35008 }
35009 
35010 __extension__ extern __inline float32x4_t
35011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vmulq_x(float32x4_t __a,float32_t __b,mve_pred16_t __p)35012 __arm_vmulq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
35013 {
35014  return __arm_vmulq_x_n_f32 (__a, __b, __p);
35015 }
35016 
35017 __extension__ extern __inline float16x8_t
35018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35019 __arm_vsubq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35020 {
35021  return __arm_vsubq_x_f16 (__a, __b, __p);
35022 }
35023 
35024 __extension__ extern __inline float32x4_t
35025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35026 __arm_vsubq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35027 {
35028  return __arm_vsubq_x_f32 (__a, __b, __p);
35029 }
35030 
35031 __extension__ extern __inline float16x8_t
35032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(float16x8_t __a,float16_t __b,mve_pred16_t __p)35033 __arm_vsubq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
35034 {
35035  return __arm_vsubq_x_n_f16 (__a, __b, __p);
35036 }
35037 
35038 __extension__ extern __inline float32x4_t
35039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsubq_x(float32x4_t __a,float32_t __b,mve_pred16_t __p)35040 __arm_vsubq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
35041 {
35042  return __arm_vsubq_x_n_f32 (__a, __b, __p);
35043 }
35044 
35045 __extension__ extern __inline float16x8_t
35046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35047 __arm_vcaddq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35048 {
35049  return __arm_vcaddq_rot90_x_f16 (__a, __b, __p);
35050 }
35051 
35052 __extension__ extern __inline float32x4_t
35053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot90_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35054 __arm_vcaddq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35055 {
35056  return __arm_vcaddq_rot90_x_f32 (__a, __b, __p);
35057 }
35058 
35059 __extension__ extern __inline float16x8_t
35060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35061 __arm_vcaddq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35062 {
35063  return __arm_vcaddq_rot270_x_f16 (__a, __b, __p);
35064 }
35065 
35066 __extension__ extern __inline float32x4_t
35067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcaddq_rot270_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35068 __arm_vcaddq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35069 {
35070  return __arm_vcaddq_rot270_x_f32 (__a, __b, __p);
35071 }
35072 
35073 __extension__ extern __inline float16x8_t
35074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35075 __arm_vcmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35076 {
35077  return __arm_vcmulq_x_f16 (__a, __b, __p);
35078 }
35079 
35080 __extension__ extern __inline float32x4_t
35081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35082 __arm_vcmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35083 {
35084  return __arm_vcmulq_x_f32 (__a, __b, __p);
35085 }
35086 
35087 __extension__ extern __inline float16x8_t
35088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35089 __arm_vcmulq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35090 {
35091  return __arm_vcmulq_rot90_x_f16 (__a, __b, __p);
35092 }
35093 
35094 __extension__ extern __inline float32x4_t
35095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35096 __arm_vcmulq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35097 {
35098  return __arm_vcmulq_rot90_x_f32 (__a, __b, __p);
35099 }
35100 
35101 __extension__ extern __inline float16x8_t
35102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35103 __arm_vcmulq_rot180_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35104 {
35105  return __arm_vcmulq_rot180_x_f16 (__a, __b, __p);
35106 }
35107 
35108 __extension__ extern __inline float32x4_t
35109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35110 __arm_vcmulq_rot180_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35111 {
35112  return __arm_vcmulq_rot180_x_f32 (__a, __b, __p);
35113 }
35114 
35115 __extension__ extern __inline float16x8_t
35116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35117 __arm_vcmulq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35118 {
35119  return __arm_vcmulq_rot270_x_f16 (__a, __b, __p);
35120 }
35121 
35122 __extension__ extern __inline float32x4_t
35123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35124 __arm_vcmulq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35125 {
35126  return __arm_vcmulq_rot270_x_f32 (__a, __b, __p);
35127 }
35128 
35129 __extension__ extern __inline float16x8_t
35130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x(uint16x8_t __a,mve_pred16_t __p)35131 __arm_vcvtq_x (uint16x8_t __a, mve_pred16_t __p)
35132 {
35133  return __arm_vcvtq_x_f16_u16 (__a, __p);
35134 }
35135 
35136 __extension__ extern __inline float16x8_t
35137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x(int16x8_t __a,mve_pred16_t __p)35138 __arm_vcvtq_x (int16x8_t __a, mve_pred16_t __p)
35139 {
35140  return __arm_vcvtq_x_f16_s16 (__a, __p);
35141 }
35142 
35143 __extension__ extern __inline float32x4_t
35144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x(int32x4_t __a,mve_pred16_t __p)35145 __arm_vcvtq_x (int32x4_t __a, mve_pred16_t __p)
35146 {
35147  return __arm_vcvtq_x_f32_s32 (__a, __p);
35148 }
35149 
35150 __extension__ extern __inline float32x4_t
35151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x(uint32x4_t __a,mve_pred16_t __p)35152 __arm_vcvtq_x (uint32x4_t __a, mve_pred16_t __p)
35153 {
35154  return __arm_vcvtq_x_f32_u32 (__a, __p);
35155 }
35156 
35157 __extension__ extern __inline float16x8_t
35158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n(int16x8_t __a,const int __imm6,mve_pred16_t __p)35159 __arm_vcvtq_x_n (int16x8_t __a, const int __imm6, mve_pred16_t __p)
35160 {
35161  return __arm_vcvtq_x_n_f16_s16 (__a, __imm6, __p);
35162 }
35163 
35164 __extension__ extern __inline float16x8_t
35165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n(uint16x8_t __a,const int __imm6,mve_pred16_t __p)35166 __arm_vcvtq_x_n (uint16x8_t __a, const int __imm6, mve_pred16_t __p)
35167 {
35168  return __arm_vcvtq_x_n_f16_u16 (__a, __imm6, __p);
35169 }
35170 
35171 __extension__ extern __inline float32x4_t
35172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n(int32x4_t __a,const int __imm6,mve_pred16_t __p)35173 __arm_vcvtq_x_n (int32x4_t __a, const int __imm6, mve_pred16_t __p)
35174 {
35175  return __arm_vcvtq_x_n_f32_s32 (__a, __imm6, __p);
35176 }
35177 
35178 __extension__ extern __inline float32x4_t
35179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcvtq_x_n(uint32x4_t __a,const int __imm6,mve_pred16_t __p)35180 __arm_vcvtq_x_n (uint32x4_t __a, const int __imm6, mve_pred16_t __p)
35181 {
35182  return __arm_vcvtq_x_n_f32_u32 (__a, __imm6, __p);
35183 }
35184 
35185 __extension__ extern __inline float16x8_t
35186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_x(float16x8_t __a,mve_pred16_t __p)35187 __arm_vrndq_x (float16x8_t __a, mve_pred16_t __p)
35188 {
35189  return __arm_vrndq_x_f16 (__a, __p);
35190 }
35191 
35192 __extension__ extern __inline float32x4_t
35193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndq_x(float32x4_t __a,mve_pred16_t __p)35194 __arm_vrndq_x (float32x4_t __a, mve_pred16_t __p)
35195 {
35196  return __arm_vrndq_x_f32 (__a, __p);
35197 }
35198 
35199 __extension__ extern __inline float16x8_t
35200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_x(float16x8_t __a,mve_pred16_t __p)35201 __arm_vrndnq_x (float16x8_t __a, mve_pred16_t __p)
35202 {
35203  return __arm_vrndnq_x_f16 (__a, __p);
35204 }
35205 
35206 __extension__ extern __inline float32x4_t
35207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndnq_x(float32x4_t __a,mve_pred16_t __p)35208 __arm_vrndnq_x (float32x4_t __a, mve_pred16_t __p)
35209 {
35210  return __arm_vrndnq_x_f32 (__a, __p);
35211 }
35212 
35213 __extension__ extern __inline float16x8_t
35214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_x(float16x8_t __a,mve_pred16_t __p)35215 __arm_vrndmq_x (float16x8_t __a, mve_pred16_t __p)
35216 {
35217  return __arm_vrndmq_x_f16 (__a, __p);
35218 }
35219 
35220 __extension__ extern __inline float32x4_t
35221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndmq_x(float32x4_t __a,mve_pred16_t __p)35222 __arm_vrndmq_x (float32x4_t __a, mve_pred16_t __p)
35223 {
35224  return __arm_vrndmq_x_f32 (__a, __p);
35225 }
35226 
35227 __extension__ extern __inline float16x8_t
35228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_x(float16x8_t __a,mve_pred16_t __p)35229 __arm_vrndpq_x (float16x8_t __a, mve_pred16_t __p)
35230 {
35231  return __arm_vrndpq_x_f16 (__a, __p);
35232 }
35233 
35234 __extension__ extern __inline float32x4_t
35235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndpq_x(float32x4_t __a,mve_pred16_t __p)35236 __arm_vrndpq_x (float32x4_t __a, mve_pred16_t __p)
35237 {
35238  return __arm_vrndpq_x_f32 (__a, __p);
35239 }
35240 
35241 __extension__ extern __inline float16x8_t
35242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_x(float16x8_t __a,mve_pred16_t __p)35243 __arm_vrndaq_x (float16x8_t __a, mve_pred16_t __p)
35244 {
35245  return __arm_vrndaq_x_f16 (__a, __p);
35246 }
35247 
35248 __extension__ extern __inline float32x4_t
35249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndaq_x(float32x4_t __a,mve_pred16_t __p)35250 __arm_vrndaq_x (float32x4_t __a, mve_pred16_t __p)
35251 {
35252  return __arm_vrndaq_x_f32 (__a, __p);
35253 }
35254 
35255 __extension__ extern __inline float16x8_t
35256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_x(float16x8_t __a,mve_pred16_t __p)35257 __arm_vrndxq_x (float16x8_t __a, mve_pred16_t __p)
35258 {
35259  return __arm_vrndxq_x_f16 (__a, __p);
35260 }
35261 
35262 __extension__ extern __inline float32x4_t
35263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrndxq_x(float32x4_t __a,mve_pred16_t __p)35264 __arm_vrndxq_x (float32x4_t __a, mve_pred16_t __p)
35265 {
35266  return __arm_vrndxq_x_f32 (__a, __p);
35267 }
35268 
35269 __extension__ extern __inline float16x8_t
35270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35271 __arm_vandq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35272 {
35273  return __arm_vandq_x_f16 (__a, __b, __p);
35274 }
35275 
35276 __extension__ extern __inline float32x4_t
35277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vandq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35278 __arm_vandq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35279 {
35280  return __arm_vandq_x_f32 (__a, __b, __p);
35281 }
35282 
35283 __extension__ extern __inline float16x8_t
35284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35285 __arm_vbicq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35286 {
35287  return __arm_vbicq_x_f16 (__a, __b, __p);
35288 }
35289 
35290 __extension__ extern __inline float32x4_t
35291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbicq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35292 __arm_vbicq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35293 {
35294  return __arm_vbicq_x_f32 (__a, __b, __p);
35295 }
35296 
35297 __extension__ extern __inline float16x8_t
35298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x(float16x8_t __a,int32_t __b,mve_pred16_t __p)35299 __arm_vbrsrq_x (float16x8_t __a, int32_t __b, mve_pred16_t __p)
35300 {
35301  return __arm_vbrsrq_x_n_f16 (__a, __b, __p);
35302 }
35303 
35304 __extension__ extern __inline float32x4_t
35305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vbrsrq_x(float32x4_t __a,int32_t __b,mve_pred16_t __p)35306 __arm_vbrsrq_x (float32x4_t __a, int32_t __b, mve_pred16_t __p)
35307 {
35308  return __arm_vbrsrq_x_n_f32 (__a, __b, __p);
35309 }
35310 
35311 __extension__ extern __inline float16x8_t
35312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35313 __arm_veorq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35314 {
35315  return __arm_veorq_x_f16 (__a, __b, __p);
35316 }
35317 
35318 __extension__ extern __inline float32x4_t
35319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_veorq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35320 __arm_veorq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35321 {
35322  return __arm_veorq_x_f32 (__a, __b, __p);
35323 }
35324 
35325 __extension__ extern __inline float16x8_t
35326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35327 __arm_vornq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35328 {
35329  return __arm_vornq_x_f16 (__a, __b, __p);
35330 }
35331 
35332 __extension__ extern __inline float32x4_t
35333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vornq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35334 __arm_vornq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35335 {
35336  return __arm_vornq_x_f32 (__a, __b, __p);
35337 }
35338 
35339 __extension__ extern __inline float16x8_t
35340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x(float16x8_t __a,float16x8_t __b,mve_pred16_t __p)35341 __arm_vorrq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35342 {
35343  return __arm_vorrq_x_f16 (__a, __b, __p);
35344 }
35345 
35346 __extension__ extern __inline float32x4_t
35347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vorrq_x(float32x4_t __a,float32x4_t __b,mve_pred16_t __p)35348 __arm_vorrq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35349 {
35350  return __arm_vorrq_x_f32 (__a, __b, __p);
35351 }
35352 
35353 __extension__ extern __inline float16x8_t
35354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev32q_x(float16x8_t __a,mve_pred16_t __p)35355 __arm_vrev32q_x (float16x8_t __a, mve_pred16_t __p)
35356 {
35357  return __arm_vrev32q_x_f16 (__a, __p);
35358 }
35359 
35360 __extension__ extern __inline float16x8_t
35361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x(float16x8_t __a,mve_pred16_t __p)35362 __arm_vrev64q_x (float16x8_t __a, mve_pred16_t __p)
35363 {
35364  return __arm_vrev64q_x_f16 (__a, __p);
35365 }
35366 
35367 __extension__ extern __inline float32x4_t
35368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vrev64q_x(float32x4_t __a,mve_pred16_t __p)35369 __arm_vrev64q_x (float32x4_t __a, mve_pred16_t __p)
35370 {
35371  return __arm_vrev64q_x_f32 (__a, __p);
35372 }
35373 
35374 __extension__ extern __inline float16x8x4_t
35375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q(float16_t const * __addr)35376 __arm_vld4q (float16_t const * __addr)
35377 {
35378  return __arm_vld4q_f16 (__addr);
35379 }
35380 
35381 __extension__ extern __inline float16x8x2_t
35382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q(float16_t const * __addr)35383 __arm_vld2q (float16_t const * __addr)
35384 {
35385  return __arm_vld2q_f16 (__addr);
35386 }
35387 
35388 __extension__ extern __inline float16x8_t
35389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z(float16_t const * __base,mve_pred16_t __p)35390 __arm_vld1q_z (float16_t const *__base, mve_pred16_t __p)
35391 {
35392  return __arm_vld1q_z_f16 (__base, __p);
35393 }
35394 
35395 __extension__ extern __inline void
35396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q(float16_t * __addr,float16x8x2_t __value)35397 __arm_vst2q (float16_t * __addr, float16x8x2_t __value)
35398 {
35399  __arm_vst2q_f16 (__addr, __value);
35400 }
35401 
35402 __extension__ extern __inline void
35403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p(float16_t * __addr,float16x8_t __value,mve_pred16_t __p)35404 __arm_vst1q_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
35405 {
35406  __arm_vst1q_p_f16 (__addr, __value, __p);
35407 }
35408 
35409 __extension__ extern __inline float32x4x4_t
35410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q(float32_t const * __addr)35411 __arm_vld4q (float32_t const * __addr)
35412 {
35413  return __arm_vld4q_f32 (__addr);
35414 }
35415 
35416 __extension__ extern __inline float32x4x2_t
35417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld2q(float32_t const * __addr)35418 __arm_vld2q (float32_t const * __addr)
35419 {
35420  return __arm_vld2q_f32 (__addr);
35421 }
35422 
35423 __extension__ extern __inline float32x4_t
35424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld1q_z(float32_t const * __base,mve_pred16_t __p)35425 __arm_vld1q_z (float32_t const *__base, mve_pred16_t __p)
35426 {
35427  return __arm_vld1q_z_f32 (__base, __p);
35428 }
35429 
35430 __extension__ extern __inline void
35431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q(float32_t * __addr,float32x4x2_t __value)35432 __arm_vst2q (float32_t * __addr, float32x4x2_t __value)
35433 {
35434  __arm_vst2q_f32 (__addr, __value);
35435 }
35436 
35437 __extension__ extern __inline void
35438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst1q_p(float32_t * __addr,float32x4_t __value,mve_pred16_t __p)35439 __arm_vst1q_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
35440 {
35441  __arm_vst1q_p_f32 (__addr, __value, __p);
35442 }
35443 
35444 __extension__ extern __inline float16x8_t
35445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(float16_t __a,float16x8_t __b,const int __idx)35446 __arm_vsetq_lane (float16_t __a, float16x8_t __b, const int __idx)
35447 {
35448  return __arm_vsetq_lane_f16 (__a, __b, __idx);
35449 }
35450 
35451 __extension__ extern __inline float32x4_t
35452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vsetq_lane(float32_t __a,float32x4_t __b,const int __idx)35453 __arm_vsetq_lane (float32_t __a, float32x4_t __b, const int __idx)
35454 {
35455  return __arm_vsetq_lane_f32 (__a, __b, __idx);
35456 }
35457 
35458 __extension__ extern __inline float16_t
35459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(float16x8_t __a,const int __idx)35460 __arm_vgetq_lane (float16x8_t __a, const int __idx)
35461 {
35462  return __arm_vgetq_lane_f16 (__a, __idx);
35463 }
35464 
35465 __extension__ extern __inline float32_t
35466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vgetq_lane(float32x4_t __a,const int __idx)35467 __arm_vgetq_lane (float32x4_t __a, const int __idx)
35468 {
35469  return __arm_vgetq_lane_f32 (__a, __idx);
35470 }
35471 #endif /* MVE Floating point.  */
35472 
35473 #else
35474 enum {
35475     __ARM_mve_type_fp_n = 1,
35476     __ARM_mve_type_int_n,
35477     __ARM_mve_type_float16_t_ptr,
35478     __ARM_mve_type_float16x8_t,
35479     __ARM_mve_type_float16x8x2_t,
35480     __ARM_mve_type_float16x8x4_t,
35481     __ARM_mve_type_float32_t_ptr,
35482     __ARM_mve_type_float32x4_t,
35483     __ARM_mve_type_float32x4x2_t,
35484     __ARM_mve_type_float32x4x4_t,
35485     __ARM_mve_type_int16_t_ptr,
35486     __ARM_mve_type_int16x8_t,
35487     __ARM_mve_type_int16x8x2_t,
35488     __ARM_mve_type_int16x8x4_t,
35489     __ARM_mve_type_int32_t_ptr,
35490     __ARM_mve_type_int32x4_t,
35491     __ARM_mve_type_int32x4x2_t,
35492     __ARM_mve_type_int32x4x4_t,
35493     __ARM_mve_type_int64_t_ptr,
35494     __ARM_mve_type_int64x2_t,
35495     __ARM_mve_type_int8_t_ptr,
35496     __ARM_mve_type_int8x16_t,
35497     __ARM_mve_type_int8x16x2_t,
35498     __ARM_mve_type_int8x16x4_t,
35499     __ARM_mve_type_uint16_t_ptr,
35500     __ARM_mve_type_uint16x8_t,
35501     __ARM_mve_type_uint16x8x2_t,
35502     __ARM_mve_type_uint16x8x4_t,
35503     __ARM_mve_type_uint32_t_ptr,
35504     __ARM_mve_type_uint32x4_t,
35505     __ARM_mve_type_uint32x4x2_t,
35506     __ARM_mve_type_uint32x4x4_t,
35507     __ARM_mve_type_uint64_t_ptr,
35508     __ARM_mve_type_uint64x2_t,
35509     __ARM_mve_type_uint8_t_ptr,
35510     __ARM_mve_type_uint8x16_t,
35511     __ARM_mve_type_uint8x16x2_t,
35512     __ARM_mve_type_uint8x16x4_t,
35513     __ARM_mve_unsupported_type
35514 };
35515 
35516 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point.  */
35517 #define __ARM_mve_typeid(x) _Generic(x, \
35518     float16_t: __ARM_mve_type_fp_n, \
35519     float16_t *: __ARM_mve_type_float16_t_ptr, \
35520     float16_t const *: __ARM_mve_type_float16_t_ptr, \
35521     float16x8_t: __ARM_mve_type_float16x8_t, \
35522     float16x8x2_t: __ARM_mve_type_float16x8x2_t, \
35523     float16x8x4_t: __ARM_mve_type_float16x8x4_t, \
35524     float32_t: __ARM_mve_type_fp_n, \
35525     float32_t *: __ARM_mve_type_float32_t_ptr, \
35526     float32_t const *: __ARM_mve_type_float32_t_ptr, \
35527     float32x4_t: __ARM_mve_type_float32x4_t, \
35528     float32x4x2_t: __ARM_mve_type_float32x4x2_t, \
35529     float32x4x4_t: __ARM_mve_type_float32x4x4_t, \
35530     int16_t: __ARM_mve_type_int_n, \
35531     int16_t *: __ARM_mve_type_int16_t_ptr, \
35532     int16_t const *: __ARM_mve_type_int16_t_ptr, \
35533     int16x8_t: __ARM_mve_type_int16x8_t, \
35534     int16x8x2_t: __ARM_mve_type_int16x8x2_t, \
35535     int16x8x4_t: __ARM_mve_type_int16x8x4_t, \
35536     int32_t: __ARM_mve_type_int_n, \
35537     int32_t *: __ARM_mve_type_int32_t_ptr, \
35538     int32_t const *: __ARM_mve_type_int32_t_ptr, \
35539     int32x4_t: __ARM_mve_type_int32x4_t, \
35540     int32x4x2_t: __ARM_mve_type_int32x4x2_t, \
35541     int32x4x4_t: __ARM_mve_type_int32x4x4_t, \
35542     int64_t: __ARM_mve_type_int_n, \
35543     int64_t *: __ARM_mve_type_int64_t_ptr, \
35544     int64_t const *: __ARM_mve_type_int64_t_ptr, \
35545     int64x2_t: __ARM_mve_type_int64x2_t, \
35546     int8_t: __ARM_mve_type_int_n, \
35547     int8_t *: __ARM_mve_type_int8_t_ptr, \
35548     int8_t const *: __ARM_mve_type_int8_t_ptr, \
35549     int8x16_t: __ARM_mve_type_int8x16_t, \
35550     int8x16x2_t: __ARM_mve_type_int8x16x2_t, \
35551     int8x16x4_t: __ARM_mve_type_int8x16x4_t, \
35552     uint16_t: __ARM_mve_type_int_n, \
35553     uint16_t *: __ARM_mve_type_uint16_t_ptr, \
35554     uint16_t const *: __ARM_mve_type_uint16_t_ptr, \
35555     uint16x8_t: __ARM_mve_type_uint16x8_t, \
35556     uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \
35557     uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \
35558     uint32_t: __ARM_mve_type_int_n, \
35559     uint32_t *: __ARM_mve_type_uint32_t_ptr, \
35560     uint32_t const *: __ARM_mve_type_uint32_t_ptr, \
35561     uint32x4_t: __ARM_mve_type_uint32x4_t, \
35562     uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \
35563     uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \
35564     uint64_t: __ARM_mve_type_int_n, \
35565     uint64_t *: __ARM_mve_type_uint64_t_ptr, \
35566     uint64_t const *: __ARM_mve_type_uint64_t_ptr, \
35567     uint64x2_t: __ARM_mve_type_uint64x2_t, \
35568     uint8_t: __ARM_mve_type_int_n, \
35569     uint8_t *: __ARM_mve_type_uint8_t_ptr, \
35570     uint8_t const *: __ARM_mve_type_uint8_t_ptr, \
35571     uint8x16_t: __ARM_mve_type_uint8x16_t, \
35572     uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \
35573     uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \
35574     default: _Generic(x, \
35575 	signed char: __ARM_mve_type_int_n, \
35576 	short: __ARM_mve_type_int_n, \
35577 	int: __ARM_mve_type_int_n, \
35578 	long: __ARM_mve_type_int_n, \
35579 	double: __ARM_mve_type_fp_n, \
35580 	long long: __ARM_mve_type_int_n, \
35581 	unsigned char: __ARM_mve_type_int_n, \
35582 	unsigned short: __ARM_mve_type_int_n, \
35583 	unsigned int: __ARM_mve_type_int_n, \
35584 	unsigned long: __ARM_mve_type_int_n, \
35585 	unsigned long long: __ARM_mve_type_int_n, \
35586 	default: __ARM_mve_unsupported_type))
35587 #else
35588 #define __ARM_mve_typeid(x) _Generic(x, \
35589     int16_t: __ARM_mve_type_int_n, \
35590     int16_t *: __ARM_mve_type_int16_t_ptr, \
35591     int16_t const *: __ARM_mve_type_int16_t_ptr, \
35592     int16x8_t: __ARM_mve_type_int16x8_t, \
35593     int16x8x2_t: __ARM_mve_type_int16x8x2_t, \
35594     int16x8x4_t: __ARM_mve_type_int16x8x4_t, \
35595     int32_t: __ARM_mve_type_int_n, \
35596     int32_t *: __ARM_mve_type_int32_t_ptr, \
35597     int32_t const *: __ARM_mve_type_int32_t_ptr, \
35598     int32x4_t: __ARM_mve_type_int32x4_t, \
35599     int32x4x2_t: __ARM_mve_type_int32x4x2_t, \
35600     int32x4x4_t: __ARM_mve_type_int32x4x4_t, \
35601     int64_t: __ARM_mve_type_int_n, \
35602     int64_t *: __ARM_mve_type_int64_t_ptr, \
35603     int64_t const *: __ARM_mve_type_int64_t_ptr, \
35604     int64x2_t: __ARM_mve_type_int64x2_t, \
35605     int8_t: __ARM_mve_type_int_n, \
35606     int8_t *: __ARM_mve_type_int8_t_ptr, \
35607     int8_t const *: __ARM_mve_type_int8_t_ptr, \
35608     int8x16_t: __ARM_mve_type_int8x16_t, \
35609     int8x16x2_t: __ARM_mve_type_int8x16x2_t, \
35610     int8x16x4_t: __ARM_mve_type_int8x16x4_t, \
35611     uint16_t: __ARM_mve_type_int_n, \
35612     uint16_t *: __ARM_mve_type_uint16_t_ptr, \
35613     uint16_t const *: __ARM_mve_type_uint16_t_ptr, \
35614     uint16x8_t: __ARM_mve_type_uint16x8_t, \
35615     uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \
35616     uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \
35617     uint32_t: __ARM_mve_type_int_n, \
35618     uint32_t *: __ARM_mve_type_uint32_t_ptr, \
35619     uint32_t const *: __ARM_mve_type_uint32_t_ptr, \
35620     uint32x4_t: __ARM_mve_type_uint32x4_t, \
35621     uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \
35622     uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \
35623     uint64_t: __ARM_mve_type_int_n, \
35624     uint64_t *: __ARM_mve_type_uint64_t_ptr, \
35625     uint64_t const *: __ARM_mve_type_uint64_t_ptr, \
35626     uint64x2_t: __ARM_mve_type_uint64x2_t, \
35627     uint8_t: __ARM_mve_type_int_n, \
35628     uint8_t *: __ARM_mve_type_uint8_t_ptr, \
35629     uint8_t const *: __ARM_mve_type_uint8_t_ptr, \
35630     uint8x16_t: __ARM_mve_type_uint8x16_t, \
35631     uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \
35632     uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \
35633     default: _Generic(x, \
35634 	signed char: __ARM_mve_type_int_n, \
35635 	short: __ARM_mve_type_int_n, \
35636 	int: __ARM_mve_type_int_n, \
35637 	long: __ARM_mve_type_int_n, \
35638 	long long: __ARM_mve_type_int_n, \
35639 	unsigned char: __ARM_mve_type_int_n, \
35640 	unsigned short: __ARM_mve_type_int_n, \
35641 	unsigned int: __ARM_mve_type_int_n, \
35642 	unsigned long: __ARM_mve_type_int_n, \
35643 	unsigned long long: __ARM_mve_type_int_n, \
35644 	default: __ARM_mve_unsupported_type))
35645 #endif /* MVE Floating point.  */
35646 
35647 extern void *__ARM_undef;
35648 #define __ARM_mve_coerce(param, type) \
35649     _Generic(param, type: param, default: *(type *)__ARM_undef)
35650 #define __ARM_mve_coerce1(param, type) \
35651     _Generic(param, type: param, const type: param, default: *(type *)__ARM_undef)
35652 #define __ARM_mve_coerce2(param, type) \
35653     _Generic(param, type: param, float16_t: param, float32_t: param, default: *(type *)__ARM_undef)
35654 
35655 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point.  */
35656 
35657 #define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35658   __typeof(p1) __p1 = (p1); \
35659   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35660   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
35661   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
35662   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
35663   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
35664   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
35665   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \
35666   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \
35667   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));})
35668 
35669 #define __arm_vrndxq(p0) ({ __typeof(p0) __p0 = (p0); \
35670   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35671   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35672   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35673 
35674 #define __arm_vrndq(p0) ({ __typeof(p0) __p0 = (p0); \
35675   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35676   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35677   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35678 
35679 #define __arm_vrndpq(p0) ({ __typeof(p0) __p0 = (p0); \
35680   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35681   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35682   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35683 
35684 #define __arm_vrndnq(p0) ({ __typeof(p0) __p0 = (p0); \
35685   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35686   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35687   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35688 
35689 #define __arm_vrndmq(p0) ({ __typeof(p0) __p0 = (p0); \
35690   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35691   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35692   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35693 
35694 #define __arm_vrndaq(p0) ({ __typeof(p0) __p0 = (p0); \
35695   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35696   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35697   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35698 
35699 #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \
35700   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35701   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35702   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35703   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35704   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35705   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35706   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
35707   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35708   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35709 
35710 #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \
35711   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35712   int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35713   int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35714   int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35715   int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35716   int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35717 
35718 #define __arm_vdupq_n(p0) ({ __typeof(p0) __p0 = (p0); \
35719   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35720   int (*)[__ARM_mve_type_float16x8_t]: __arm_vdupq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35721   int (*)[__ARM_mve_type_float32x4_t]: __arm_vdupq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35722 
35723 #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \
35724   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35725   int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35726   int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35727   int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35728   int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35729   int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35730 
35731 #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \
35732   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35733   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35734   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35735   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35736   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35737   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
35738 
35739 #define __arm_vcvtbq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
35740   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35741   int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvtbq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
35742 
35743 #define __arm_vcvttq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
35744   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35745   int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvttq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
35746 
35747 #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \
35748   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35749   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35750   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));})
35751 
35752 #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \
35753   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35754   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35755   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35756   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
35757 
35758 #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \
35759   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35760   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35761   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35762   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
35763 
35764 #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \
35765   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35766   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35767   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35768   int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35769   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35770   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35771   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
35772 
35773 #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \
35774   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35775   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35776   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35777   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35778   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
35779 
35780 #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \
35781   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35782   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35783   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35784   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35785   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
35786 
35787 #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \
35788   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35789   int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35790   int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35791   int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35792   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35793   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35794   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
35795 
35796 #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \
35797   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35798   int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35799   int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35800   int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
35801 
35802 #define __arm_vcvtq(p0) ({ __typeof(p0) __p0 = (p0); \
35803   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35804   int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35805   int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35806   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35807   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
35808 
35809 #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35810   __typeof(p1) __p1 = (p1); \
35811   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35812   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35813   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35814   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35815   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35816   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35817   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
35818 
35819 #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35820   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35821   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
35822   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
35823   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
35824   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
35825   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
35826   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
35827 
35828 #define __arm_vcvtq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35829   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35830   int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_n_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
35831   int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_n_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
35832   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_n_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
35833   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_n_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
35834 
35835 #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35836   __typeof(p1) __p1 = (p1); \
35837   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35838   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35839   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35840   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35841   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35842   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35843   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35844   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35845   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35846 
35847 #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35848   __typeof(p1) __p1 = (p1); \
35849   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35850   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35851   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35852   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35853   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35854   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35855   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35856   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35857   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35858 
35859 #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35860   __typeof(p1) __p1 = (p1); \
35861   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35862   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35863   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35864   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35865   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35866   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35867   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35868   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1, float16x8_t)), \
35869   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1, float32x4_t)), \
35870   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int)), \
35871   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int)), \
35872   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int)), \
35873   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int)), \
35874   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int)), \
35875   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int)), \
35876   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
35877   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
35878 
35879 #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35880   __typeof(p1) __p1 = (p1); \
35881   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35882   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35883   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35884   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35885   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35886   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35887   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35888   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35889   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35890 
35891 #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35892   __typeof(p1) __p1 = (p1); \
35893   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35894   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1 (__p1, int)), \
35895   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1 (__p1, int)), \
35896   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1 (__p1, int)), \
35897   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1 (__p1, int)), \
35898   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35899   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35900   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35901   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35902   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35903   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35904   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35905   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35906 
35907 #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35908   __typeof(p1) __p1 = (p1); \
35909   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35910   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35911   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35912   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35913   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35914   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35915   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35916   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35917   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35918 
35919 #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35920   __typeof(p1) __p1 = (p1); \
35921   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35922   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
35923   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
35924   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
35925   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
35926   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
35927   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
35928   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
35929   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
35930   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35931   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35932   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35933   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35934   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35935   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35936   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35937   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35938 
35939 #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35940   __typeof(p1) __p1 = (p1); \
35941   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35942   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35943   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35944   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35945   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35946   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35947   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35948   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35949   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35950 
35951 #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35952   __typeof(p1) __p1 = (p1); \
35953   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35954   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
35955   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
35956   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
35957   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
35958   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
35959   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
35960   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
35961   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
35962   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35963   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35964   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35965   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35966   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35967   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35968   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35969   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35970 
35971 #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35972   __typeof(p1) __p1 = (p1); \
35973   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35974   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35975   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35976   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35977   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35978   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35979   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35980   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35981   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35982 
35983 #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
35984   __typeof(p1) __p1 = (p1); \
35985   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35986   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
35987   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
35988   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
35989   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
35990   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
35991   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
35992   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
35993   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
35994   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
35995   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
35996   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
35997   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
35998   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
35999   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36000   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36001   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
36002 
36003 #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36004   __typeof(p1) __p1 = (p1); \
36005   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36006   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36007   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36008   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36009   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36010   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36011   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36012   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36013   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
36014   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36015   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
36016 
36017 #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36018   __typeof(p1) __p1 = (p1); \
36019   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36020   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36021   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36022   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36023   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36024   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
36025   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36026   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36027   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36028   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36029   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
36030 
36031 #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36032   __typeof(p1) __p1 = (p1); \
36033   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36034   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36035   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36036   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36037   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36038   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36039   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36040   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36041   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
36042   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36043   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
36044 
36045 #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36046   __typeof(p1) __p1 = (p1); \
36047   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36048   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36049   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36050   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36051   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36052   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36053   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36054   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36055   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
36056   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36057   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36058   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36059   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36060   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36061   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
36062   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36063   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36064 
36065 #define __arm_vcmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36066   __typeof(p1) __p1 = (p1); \
36067   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36068   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36069   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36070 
36071 #define __arm_vcmulq_rot180(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36072   __typeof(p1) __p1 = (p1); \
36073   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36074   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36075   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36076 
36077 #define __arm_vcmulq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36078   __typeof(p1) __p1 = (p1); \
36079   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36080   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36081   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36082 
36083 #define __arm_vcmulq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36084   __typeof(p1) __p1 = (p1); \
36085   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36086   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36087   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36088 
36089 #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36090   __typeof(p1) __p1 = (p1); \
36091   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36092   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36093   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36094   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36095   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36096   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36097   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
36098   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36099   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36100 
36101 #define __arm_vmaxnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36102   __typeof(p1) __p1 = (p1); \
36103   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36104   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36105   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36106 
36107 #define __arm_vmaxnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36108   __typeof(p1) __p1 = (p1); \
36109   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36110   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36111   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36112 
36113 #define __arm_vmaxnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36114   __typeof(p1) __p1 = (p1); \
36115   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36116   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36117   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36118 
36119 #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36120   __typeof(p1) __p1 = (p1); \
36121   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36122   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36123   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36124 
36125 #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36126   __typeof(p1) __p1 = (p1); \
36127   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36128   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36129   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36130 
36131 #define __arm_vminnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36132   __typeof(p1) __p1 = (p1); \
36133   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36134   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36135   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36136 
36137 #define __arm_vminnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36138   __typeof(p1) __p1 = (p1); \
36139   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36140   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36141   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36142 
36143 #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36144   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36145   int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36146   int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36147   int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36148   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36149   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36150   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \
36151   int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \
36152   int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));})
36153 
36154 #define __arm_vminnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36155   __typeof(p1) __p1 = (p1); \
36156   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36157   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36158   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36159 
36160 #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36161   __typeof(p1) __p1 = (p1); \
36162   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36163   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36164   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
36165   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36166   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36167   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36168   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36169   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36170   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36171   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36172   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36173   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36174   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36175   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36176   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
36177   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36178   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36179 
36180 #define __arm_vminnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36181   __typeof(p1) __p1 = (p1); \
36182   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36183   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36184   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36185 
36186 #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36187   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36188   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36189   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36190   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36191   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36192   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36193   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36194 
36195 #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36196   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36197   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36198   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36199   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36200   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36201   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36202   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36203 
36204 #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36205   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36206   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36207   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36208   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36209   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
36210 
36211 #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36212   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36213   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36214   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36215   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36216   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
36217 
36218 #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36219   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36220   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36221   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36222   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36223   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36224   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36225   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36226 
36227 #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36228   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36229   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36230   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36231   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36232   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36233   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36234   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36235 
36236 #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36237   __typeof(p1) __p1 = (p1); \
36238   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36239   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
36240   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
36241   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36242   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
36243   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
36244   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36245   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36246   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36247   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36248   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36249   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36250   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36251 
36252 #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36253   __typeof(p1) __p1 = (p1); \
36254   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36255   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36256   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36257   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36258   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36259   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36260   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36261 
36262 #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36263   __typeof(p1) __p1 = (p1); \
36264   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36265   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36266   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36267   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36268   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36269   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36270   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36271 
36272 #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36273   __typeof(p1) __p1 = (p1); \
36274   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36275   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36276   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36277   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36278   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36279   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36280   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36281   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36282   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36283   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36284   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36285   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36286   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36287 
36288 #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36289   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36290   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36291   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36292   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));})
36293 
36294 #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36295   __typeof(p1) __p1 = (p1); \
36296   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36297   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36298   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36299   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36300   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36301   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36302   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36303 
36304 #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36305   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36306   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36307   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36308   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36309   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36310   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36311   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36312 
36313 #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36314   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36315   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36316   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36317   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36318   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36319   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36320   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36321 
36322 #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36323   __typeof(p1) __p1 = (p1); \
36324   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36325   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36326   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36327   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36328   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36329   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36330   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36331   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
36332   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
36333   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36334   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
36335   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
36336   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
36337 
36338 #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36339   __typeof(p1) __p1 = (p1); \
36340   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36341   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36342   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36343   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36344   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36345   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36346   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
36347 
36348 #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36349   __typeof(p1) __p1 = (p1); \
36350   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36351   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36352   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36353 
36354 #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36355   __typeof(p1) __p1 = (p1); \
36356   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36357   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36358   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36359 
36360 #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36361   __typeof(p1) __p1 = (p1); \
36362   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36363   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36364   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36365   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36366   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36367 
36368 #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36369   __typeof(p1) __p1 = (p1); \
36370   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36371   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36372   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36373   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36374   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36375 
36376 #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36377   __typeof(p1) __p1 = (p1); \
36378   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36379   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36380   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36381   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36382   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36383 
36384 #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36385   __typeof(p1) __p1 = (p1); \
36386   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36387   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36388   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36389 
36390 #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36391   __typeof(p1) __p1 = (p1); \
36392   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36393   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36394   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36395   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36396   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36397 
36398 #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36399   __typeof(p1) __p1 = (p1); \
36400   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36401   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36402   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36403   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36404   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36405   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36406   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36407 
36408 #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36409   __typeof(p1) __p1 = (p1); \
36410   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36411   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36412   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36413   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36414   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36415   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36416   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36417   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36418   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36419   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36420   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36421   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36422   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36423 
36424 #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36425   __typeof(p1) __p1 = (p1); \
36426   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36427   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36428   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
36429 
36430 #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36431   __typeof(p1) __p1 = (p1); \
36432   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36433   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36434   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
36435 
36436 #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36437   __typeof(p1) __p1 = (p1); \
36438   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36439   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36440   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36441   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36442   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36443   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36444   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36445 
36446 #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36447   __typeof(p1) __p1 = (p1); \
36448   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36449   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36450   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36451   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36452   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36453   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36454   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36455   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36456   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36457   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36458   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36459   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36460   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36461 
36462 #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36463   __typeof(p1) __p1 = (p1); \
36464   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36465   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36466   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36467   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36468 
36469 #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36470   __typeof(p1) __p1 = (p1); \
36471   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36472   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36473   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36474   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36475 
36476 #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36477   __typeof(p1) __p1 = (p1); \
36478   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36479   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36480   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36481   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36482   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36483   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36484   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36485   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36486   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36487   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36488   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36489   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36490   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36491 
36492 #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36493   __typeof(p1) __p1 = (p1); \
36494   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36495   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36496   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36497   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36498   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36499   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36500   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36501 
36502 #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36503   __typeof(p1) __p1 = (p1); \
36504   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36505   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36506   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36507   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36508 
36509 #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36510   __typeof(p1) __p1 = (p1); \
36511   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36512   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36513   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36514   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36515   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36516   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36517   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36518 
36519 #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36520   __typeof(p1) __p1 = (p1); \
36521   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36522   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36523   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36524   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36525 
36526 #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36527   __typeof(p1) __p1 = (p1); \
36528   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36529   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36530   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36531   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36532   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36533 
36534 #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36535   __typeof(p1) __p1 = (p1); \
36536   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36537   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36538   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36539   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36540   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36541 
36542 #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36543   __typeof(p1) __p1 = (p1); \
36544   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36545   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36546   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36547   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36548   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36549   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36550   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36551 
36552 #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36553   __typeof(p1) __p1 = (p1); \
36554   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36555   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36556   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36557   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36558   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36559   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36560   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36561 
36562 #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36563   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36564   int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36565   int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36566   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36567   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36568 
36569 #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36570   __typeof(p1) __p1 = (p1); \
36571   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36572   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36573   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36574   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36575   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36576 
36577 #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36578   __typeof(p1) __p1 = (p1); \
36579   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36580   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36581   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36582 
36583 #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36584   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36585   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36586   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36587   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36588   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36589   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36590   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36591 
36592 #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36593   __typeof(p1) __p1 = (p1); \
36594   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36595   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36596   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36597   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36598 
36599 #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36600   __typeof(p1) __p1 = (p1); \
36601   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36602   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36603   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36604   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36605   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36606   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36607   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36608 
36609 #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36610   __typeof(p1) __p1 = (p1); \
36611   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36612   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36613   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36614   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36615 
36616 #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36617   __typeof(p1) __p1 = (p1); \
36618   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36619   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36620   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36621   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36622 
36623 #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36624   __typeof(p1) __p1 = (p1); \
36625   __typeof(p2) __p2 = (p2); \
36626   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36627   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36628   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36629   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
36630   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
36631   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
36632   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
36633 
36634 #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36635   __typeof(p1) __p1 = (p1); \
36636   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36637   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36638   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36639   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36640   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36641   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36642   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36643 
36644 #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36645   __typeof(p1) __p1 = (p1); \
36646   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36647   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36648   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36649   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36650   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36651   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36652   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36653 
36654 #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36655   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36656   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36657   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36658   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36659   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36660   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36661   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36662 
36663 #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36664   __typeof(p1) __p1 = (p1); \
36665   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36666   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36667   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36668   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36669   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36670   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36671   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
36672 
36673 #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36674   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36675   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36676   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36677   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36678   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36679   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36680   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36681 
36682 #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36683   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36684   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36685   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36686   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36687   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36688   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36689   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36690 
36691 #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36692   __typeof(p1) __p1 = (p1); \
36693   __typeof(p2) __p2 = (p2); \
36694   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36695   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36696   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36697   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36698 
36699 #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36700   __typeof(p1) __p1 = (p1); \
36701   __typeof(p2) __p2 = (p2); \
36702   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36703   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36704   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36705   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36706 
36707 #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36708   __typeof(p1) __p1 = (p1); \
36709   __typeof(p2) __p2 = (p2); \
36710   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36711   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36712   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36713 	    int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
36714 
36715 #define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36716   __typeof(p1) __p1 = (p1); \
36717   __typeof(p2) __p2 = (p2); \
36718   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36719   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36720   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36721 	    int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
36722 
36723 #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36724   __typeof(p1) __p1 = (p1); \
36725   __typeof(p2) __p2 = (p2); \
36726   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36727   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36728   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36729   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
36730 
36731 #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36732   __typeof(p1) __p1 = (p1); \
36733   __typeof(p2) __p2 = (p2); \
36734   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36735   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36736   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36737   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
36738   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
36739   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
36740   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
36741 
36742 #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36743   __typeof(p1) __p1 = (p1); \
36744   __typeof(p2) __p2 = (p2); \
36745   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36746   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36747   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36748   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
36749 
36750 #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36751   __typeof(p1) __p1 = (p1); \
36752   __typeof(p2) __p2 = (p2); \
36753   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36754   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36755   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36756   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36757 
36758 #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36759   __typeof(p1) __p1 = (p1); \
36760   __typeof(p2) __p2 = (p2); \
36761   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36762   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36763   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36764   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36765 
36766 #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36767   __typeof(p1) __p1 = (p1); \
36768   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36769   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36770   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36771   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36772 
36773 #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36774   __typeof(p1) __p1 = (p1); \
36775   __typeof(p2) __p2 = (p2); \
36776   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36777   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36778   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36779   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36780 
36781 #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36782   __typeof(p1) __p1 = (p1); \
36783   __typeof(p2) __p2 = (p2); \
36784   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36785   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36786   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36787   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36788 
36789 #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36790   __typeof(p1) __p1 = (p1); \
36791   __typeof(p2) __p2 = (p2); \
36792   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36793   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36794   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36795   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36796 
36797 #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36798   __typeof(p1) __p1 = (p1); \
36799   __typeof(p2) __p2 = (p2); \
36800   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36801   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36802   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36803   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36804 
36805 #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36806   __typeof(p1) __p1 = (p1); \
36807   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36808   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36809   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36810   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36811   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
36812 
36813 #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36814   __typeof(p1) __p1 = (p1); \
36815   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36816   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36817   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36818   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36819   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36820 
36821 #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36822   __typeof(p1) __p1 = (p1); \
36823   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36824   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36825   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36826   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36827   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36828 
36829 #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36830   __typeof(p1) __p1 = (p1); \
36831   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36832   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36833   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36834   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36835   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
36836 
36837 #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36838   __typeof(p1) __p1 = (p1); \
36839   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36840   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36841   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36842   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36843   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36844 
36845 #define __arm_vcvtaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36846   __typeof(p1) __p1 = (p1); \
36847   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36848   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36849   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36850   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36851   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36852 
36853 #define __arm_vcvtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36854   __typeof(p1) __p1 = (p1); \
36855   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36856   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36857   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36858   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36859   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
36860   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36861   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36862   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36863   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36864 
36865 #define __arm_vcvtq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
36866   __typeof(p1) __p1 = (p1); \
36867   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36868   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
36869   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \
36870   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
36871   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \
36872   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_n_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
36873   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_n_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
36874   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_n_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
36875   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_n_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
36876 
36877 #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36878   __typeof(p1) __p1 = (p1); \
36879   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36880   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36881   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36882   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36883   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36884   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36885 
36886 #define __arm_vcmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36887   __typeof(p1) __p1 = (p1); \
36888   __typeof(p2) __p2 = (p2); \
36889   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36890   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
36891   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
36892 
36893 #define __arm_vcmlaq_rot180(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36894   __typeof(p1) __p1 = (p1); \
36895   __typeof(p2) __p2 = (p2); \
36896   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36897   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
36898   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
36899 
36900 #define __arm_vcmlaq_rot270(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36901   __typeof(p1) __p1 = (p1); \
36902   __typeof(p2) __p2 = (p2); \
36903   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36904   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
36905   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
36906 
36907 #define __arm_vcmlaq_rot90(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36908   __typeof(p1) __p1 = (p1); \
36909   __typeof(p2) __p2 = (p2); \
36910   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36911   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
36912   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
36913 
36914 #define __arm_vrndxq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36915   __typeof(p1) __p1 = (p1); \
36916   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36917   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndxq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36918   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndxq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36919 
36920 #define __arm_vrndq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36921   __typeof(p1) __p1 = (p1); \
36922   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36923   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36924   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36925 
36926 #define __arm_vrndpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36927   __typeof(p1) __p1 = (p1); \
36928   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36929   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndpq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36930   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndpq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36931 
36932 #define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36933   __typeof(p1) __p1 = (p1); \
36934   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36935   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36936   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36937   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36938   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
36939   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
36940   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
36941   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36942   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2), \
36943   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36944   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36945 
36946 #define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36947   __typeof(p1) __p1 = (p1); \
36948   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36949   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36950   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36951   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36952   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36953   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36954   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
36955   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
36956   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
36957   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36958   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
36959 
36960 #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36961   __typeof(p1) __p1 = (p1); \
36962   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36963   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36964   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36965   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36966   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36967   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36968   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
36969   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
36970   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
36971   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36972   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
36973 
36974 #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36975   __typeof(p1) __p1 = (p1); \
36976   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36977   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36978   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36979   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36980   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36981   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36982   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
36983   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36984   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36985   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
36986   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
36987   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
36988   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
36989   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
36990   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
36991   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36992   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
36993 
36994 #define __arm_vcvtbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36995   __typeof(p1) __p1 = (p1); \
36996   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36997   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvtbq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36998   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvtbq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36999 
37000 #define __arm_vcvttq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37001   __typeof(p1) __p1 = (p1); \
37002   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37003   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvttq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37004   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvttq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37005 
37006 #define __arm_vcvtmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37007   __typeof(p1) __p1 = (p1); \
37008   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37009   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37010   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
37011   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37012   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37013 
37014 #define __arm_vcvtnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37015   __typeof(p1) __p1 = (p1); \
37016   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37017   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37018   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
37019   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37020   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37021 
37022 #define __arm_vcvtpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37023   __typeof(p1) __p1 = (p1); \
37024   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37025   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37026   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
37027   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37028   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37029 
37030 #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37031   __typeof(p1) __p1 = (p1); \
37032   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37033   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), (int8_t) __p1, p2), \
37034   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), (int16_t) __p1, p2), \
37035   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), (int32_t) __p1, p2), \
37036   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint8_t) __p1, p2), \
37037   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint16_t) __p1, p2), \
37038   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2), \
37039   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), (float16_t) __p1, p2), \
37040   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), (float32_t) __p1, p2));})
37041 
37042 #define __arm_vfmaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37043   __typeof(p1) __p1 = (p1); \
37044   __typeof(p2) __p2 = (p2); \
37045   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37046   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double)), \
37047   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double)), \
37048   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
37049   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
37050 
37051 #define __arm_vfmsq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37052   __typeof(p1) __p1 = (p1); \
37053   __typeof(p2) __p2 = (p2); \
37054   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37055   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
37056   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
37057 
37058 #define __arm_vfmasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37059   __typeof(p1) __p1 = (p1); \
37060   __typeof(p2) __p2 = (p2); \
37061   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37062   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double)), \
37063   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double)));})
37064 
37065 #define __arm_vmaxnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37066   __typeof(p1) __p1 = (p1); \
37067   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37068   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37069   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37070 
37071 #define __arm_vmaxnmavq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37072   __typeof(p1) __p1 = (p1); \
37073   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37074   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37075   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37076 
37077 #define __arm_vmaxnmvq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37078   __typeof(p1) __p1 = (p1); \
37079   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37080   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37081   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37082 
37083 #define __arm_vmaxnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37084   __typeof(p1) __p1 = (p1); \
37085   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37086   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37087   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37088 
37089 #define __arm_vmaxnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37090   __typeof(p1) __p1 = (p1); \
37091   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37092   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37093   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37094 
37095 #define __arm_vminnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37096   __typeof(p1) __p1 = (p1); \
37097   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37098   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37099   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37100 
37101 #define __arm_vminnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37102   __typeof(p1) __p1 = (p1); \
37103   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37104   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37105   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37106 
37107 #define __arm_vminnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37108   __typeof(p1) __p1 = (p1); \
37109   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37110   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37111   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37112 
37113 #define __arm_vrndnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37114   __typeof(p1) __p1 = (p1); \
37115   __typeof(p2) __p2 = (p2); \
37116   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37117   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndnq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37118   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndnq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __p2));})
37119 
37120 #define __arm_vrndaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37121   __typeof(p1) __p1 = (p1); \
37122   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37123   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37124   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37125 
37126 #define __arm_vrndmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37127   __typeof(p1) __p1 = (p1); \
37128   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37129   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37130   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37131 
37132 #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37133   __typeof(p1) __p1 = (p1); \
37134   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37135   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37136   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37137   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37138   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
37139   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37140   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37141   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev64q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37142   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrev64q_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37143 
37144 #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37145   __typeof(p1) __p1 = (p1); \
37146   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37147   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37148   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37149   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
37150   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37151   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev32q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2));})
37152 
37153 #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37154   __typeof(p1) __p1 = (p1); \
37155   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37156   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37157   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37158   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37159   int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
37160   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
37161   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37162   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37163   int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \
37164   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vpselq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37165   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vpselq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37166 
37167 #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
37168   __typeof(p1) __p1 = (p1); \
37169   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37170   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
37171   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
37172   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
37173   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
37174   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
37175   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
37176   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
37177   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
37178   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
37179   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
37180 
37181 #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37182   __typeof(p1) __p1 = (p1); \
37183   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37184   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37185   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37186   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37187   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37188 
37189 #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37190   __typeof(p1) __p1 = (p1); \
37191   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37192   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37193   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));})
37194 
37195 #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37196   __typeof(p1) __p1 = (p1); \
37197   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37198   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37199   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37200 
37201 #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37202   __typeof(p1) __p1 = (p1); \
37203   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37204   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37205   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37206   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37207   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37208 
37209 #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37210   __typeof(p1) __p1 = (p1); \
37211   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37212   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37213   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37214   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37215   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37216 
37217 #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37218   __typeof(p1) __p1 = (p1); \
37219   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37220   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37221   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37222 
37223 #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37224   __typeof(p1) __p1 = (p1); \
37225   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37226   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37227   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37228   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37229   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37230 
37231 #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37232   __typeof(p1) __p1 = (p1); \
37233   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37234   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37235   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37236   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37237   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37238 
37239 #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37240   __typeof(p1) __p1 = (p1); \
37241   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37242   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37243   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37244 
37245 #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37246   __typeof(p1) __p1 = (p1); \
37247   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37248   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37249   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37250 
37251 #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37252   __typeof(p1) __p1 = (p1); \
37253   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37254   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37255   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37256   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37257   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37258 
37259 #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37260   __typeof(p1) __p1 = (p1); \
37261   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37262   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37263   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37264 
37265 #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37266   __typeof(p1) __p1 = (p1); \
37267   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37268   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37269   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37270   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37271   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vnegq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37272   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vnegq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37273 
37274 #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37275   __typeof(p1) __p1 = (p1); \
37276   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37277   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37278   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37279   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37280   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
37281   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
37282   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
37283   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
37284   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2), \
37285   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37286   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37287 
37288 #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37289   __typeof(p1) __p1 = (p1); \
37290   __typeof(p2) __p2 = (p2); \
37291   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37292   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37293   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37294   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37295   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37296   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37297   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37298   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37299   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37300 
37301 #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37302   __typeof(p1) __p1 = (p1); \
37303   __typeof(p2) __p2 = (p2); \
37304   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37305   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37306   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37307   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37308   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37309   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37310   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37311   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37312   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
37313   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int), p3), \
37314   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int), p3), \
37315   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int), p3), \
37316   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int), p3), \
37317   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int), p3), \
37318   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int), p3), \
37319   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37320   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37321 
37322 #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37323   __typeof(p1) __p1 = (p1); \
37324   __typeof(p2) __p2 = (p2); \
37325   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37326   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37327   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37328   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37329   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37330   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37331   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37332   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37333   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37334 
37335 #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37336   __typeof(p1) __p1 = (p1); \
37337   __typeof(p2) __p2 = (p2); \
37338   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37339   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37340   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37341   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37342   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37343   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37344   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37345   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37346   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37347 
37348 #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37349   __typeof(p1) __p1 = (p1); \
37350   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37351   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
37352   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
37353   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
37354   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
37355   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
37356   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \
37357   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbrsrq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
37358   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbrsrq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3));})
37359 
37360 #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37361   __typeof(p1) __p1 = (p1); \
37362   __typeof(p2) __p2 = (p2); \
37363   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37364   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37365   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37366   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37367   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37368   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37369   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37370   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37371   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37372 
37373 #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37374   __typeof(p1) __p1 = (p1); \
37375   __typeof(p2) __p2 = (p2); \
37376   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37377   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37378   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37379   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37380   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37381   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37382   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37383   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37384   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37385 
37386 #define __arm_vcmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37387   __typeof(p1) __p1 = (p1); \
37388   __typeof(p2) __p2 = (p2); \
37389   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37390   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37391   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37392 
37393 #define __arm_vcmlaq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37394   __typeof(p1) __p1 = (p1); \
37395   __typeof(p2) __p2 = (p2); \
37396   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37397   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37398   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37399 
37400 #define __arm_vcmlaq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37401   __typeof(p1) __p1 = (p1); \
37402   __typeof(p2) __p2 = (p2); \
37403   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37404   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37405   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37406 
37407 #define __arm_vcmlaq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37408   __typeof(p1) __p1 = (p1); \
37409   __typeof(p2) __p2 = (p2); \
37410   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37411   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37412   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37413 
37414 #define __arm_vcmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37415   __typeof(p1) __p1 = (p1); \
37416   __typeof(p2) __p2 = (p2); \
37417   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37418   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37419   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37420 
37421 #define __arm_vcmulq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37422   __typeof(p1) __p1 = (p1); \
37423   __typeof(p2) __p2 = (p2); \
37424   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37425   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37426   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37427 
37428 #define __arm_vcmulq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37429   __typeof(p1) __p1 = (p1); \
37430   __typeof(p2) __p2 = (p2); \
37431   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37432   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37433   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37434 
37435 #define __arm_vcmulq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37436   __typeof(p1) __p1 = (p1); \
37437   __typeof(p2) __p2 = (p2); \
37438   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)] [__ARM_mve_typeid(__p2)])0, \
37439   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_m_f16(__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37440   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_m_f32(__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37441 
37442 #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37443   __typeof(p1) __p1 = (p1); \
37444   __typeof(p2) __p2 = (p2); \
37445   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37446   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37447   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37448   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37449   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37450   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37451   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37452   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37453   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37454 
37455 #define __arm_vfmaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37456   __typeof(p1) __p1 = (p1); \
37457   __typeof(p2) __p2 = (p2); \
37458   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37459   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37460   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
37461   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37462   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37463 
37464 #define __arm_vfmasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37465   __typeof(p1) __p1 = (p1); \
37466   __typeof(p2) __p2 = (p2); \
37467   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37468   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37469   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37470 
37471 #define __arm_vfmsq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37472   __typeof(p1) __p1 = (p1); \
37473   __typeof(p2) __p2 = (p2); \
37474   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37475   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37476   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37477 
37478 #define __arm_vmaxnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37479   __typeof(p1) __p1 = (p1); \
37480   __typeof(p2) __p2 = (p2); \
37481   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37482   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37483   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37484 
37485 #define __arm_vminnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37486   __typeof(p1) __p1 = (p1); \
37487   __typeof(p2) __p2 = (p2); \
37488   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37489   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37490   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37491 
37492 #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37493   __typeof(p1) __p1 = (p1); \
37494   __typeof(p2) __p2 = (p2); \
37495   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37496   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37497   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37498   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37499   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37500   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37501   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37502   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37503   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
37504   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
37505   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
37506   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
37507   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
37508   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
37509   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
37510   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37511   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37512 
37513 #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37514   __typeof(p1) __p1 = (p1); \
37515   __typeof(p2) __p2 = (p2); \
37516   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37517   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37518   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37519   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37520   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37521   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37522   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37523   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37524   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37525 
37526 #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37527   __typeof(p1) __p1 = (p1); \
37528   __typeof(p2) __p2 = (p2); \
37529   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37530   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37531   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37532   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37533   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37534   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37535   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37536   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37537   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
37538   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
37539   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
37540   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
37541   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
37542   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
37543   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
37544   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37545   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37546 
37547 #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37548   __typeof(p1) __p1 = (p1); \
37549   __typeof(p2) __p2 = (p2); \
37550   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37551   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37552   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37553   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37554   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37555   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37556   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37557   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37558   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37559 
37560 #define __arm_vld1q(p0) (\
37561   _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37562   int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
37563   int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
37564   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
37565   int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
37566   int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
37567   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce1(p0, uint32_t *)), \
37568   int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_f16 (__ARM_mve_coerce1(p0, float16_t *)), \
37569   int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_f32 (__ARM_mve_coerce1(p0, float32_t *))))
37570 
37571 #define __arm_vld1q_z(p0,p1) ( \
37572   _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37573   int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce1(p0, int8_t *), p1), \
37574   int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce1(p0, int16_t *), p1), \
37575   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
37576   int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce1(p0, uint8_t *), p1), \
37577   int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), p1), \
37578   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \
37579   int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_z_f16 (__ARM_mve_coerce1(p0, float16_t *), p1), \
37580   int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1)))
37581 
37582 #define __arm_vld2q(p0) ( \
37583   _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37584   int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
37585   int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
37586   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
37587   int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
37588   int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
37589   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce1(p0, uint32_t *)), \
37590   int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld2q_f16 (__ARM_mve_coerce1(p0, float16_t *)), \
37591   int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld2q_f32 (__ARM_mve_coerce1(p0, float32_t *))))
37592 
37593 #define __arm_vld4q(p0) ( \
37594   _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37595   int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
37596   int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
37597   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
37598   int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
37599   int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
37600   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce1(p0, uint32_t *)), \
37601   int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld4q_f16 (__ARM_mve_coerce1(p0, float16_t *)), \
37602   int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld4q_f32 (__ARM_mve_coerce1(p0, float32_t *))))
37603 
37604 #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37605   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37606   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37607   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37608   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37609   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37610   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
37611 
37612 #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37613   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37614   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37615   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37616   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37617   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37618   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
37619 
37620 #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37621   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37622   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37623   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37624   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37625   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37626   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
37627 
37628 #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37629   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37630   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37631   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37632   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37633   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37634   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
37635 
37636 #define __arm_vldrwq_gather_offset(p0,p1) ( \
37637   _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37638   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
37639   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \
37640   int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_f32 (__ARM_mve_coerce1(p0, float32_t *), p1)))
37641 
37642 #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ( \
37643   _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37644   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1, p2), \
37645   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1, p2), \
37646   int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1, p2)))
37647 
37648 #define __arm_vldrwq_gather_shifted_offset(p0,p1) ( \
37649   _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37650   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
37651   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \
37652   int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce1(p0, float32_t *), p1)))
37653 
37654 #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ( \
37655   _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37656   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1, p2), \
37657   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1, p2), \
37658   int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1, p2)))
37659 
37660 #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37661   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37662   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37663   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37664   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37665   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
37666   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37667   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37668   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37669   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37670 
37671 #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37672   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37673   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
37674   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
37675   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
37676   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
37677   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
37678   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)), \
37679   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x2_t]: __arm_vst2q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x2_t)), \
37680   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x2_t]: __arm_vst2q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x2_t)));})
37681 
37682 #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37683   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37684   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
37685   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
37686   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
37687   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
37688   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37689   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37690   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \
37691   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
37692 
37693 #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37694   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37695   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
37696   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
37697   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37698   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37699   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));})
37700 
37701 #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37702   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37703   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37704   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37705   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37706   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37707   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));})
37708 
37709 #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37710   __typeof(p2) __p2 = (p2); \
37711   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37712   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37713   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37714   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37715   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37716   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
37717 
37718 #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37719   __typeof(p2) __p2 = (p2); \
37720   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37721   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
37722   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
37723   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
37724   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
37725   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
37726 
37727 #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37728   __typeof(p2) __p2 = (p2); \
37729   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37730   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37731   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37732   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37733   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37734   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
37735 
37736 #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37737   __typeof(p2) __p2 = (p2); \
37738   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37739   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
37740   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
37741   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
37742   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
37743   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
37744 
37745 #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37746   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37747   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37748   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37749   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37750 
37751 #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37752   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37753   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
37754   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37755   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
37756 
37757 #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37758   __typeof(p2) __p2 = (p2); \
37759   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37760   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
37761   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
37762   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
37763   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
37764   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
37765 
37766 #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37767   __typeof(p2) __p2 = (p2); \
37768   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37769   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37770   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37771   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37772   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37773   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
37774 
37775 #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37776   __typeof(p2) __p2 = (p2); \
37777   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37778   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
37779   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
37780   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
37781   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
37782   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
37783 
37784 #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37785   __typeof(p2) __p2 = (p2); \
37786   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37787   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37788   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37789   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37790   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37791   int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
37792 
37793 #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
37794   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
37795   int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37796   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37797   int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37798 
37799 #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
37800   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
37801   int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37802   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37803   int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_p_f32(p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37804 
37805 #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37806   __typeof(p2) __p2 = (p2); \
37807   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
37808   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37809   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37810   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37811 
37812 #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37813   __typeof(p2) __p2 = (p2); \
37814   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
37815   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37816   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37817   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37818 
37819 #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37820   __typeof(p2) __p2 = (p2); \
37821   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37822   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37823   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37824   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37825 
37826 #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37827   __typeof(p2) __p2 = (p2); \
37828   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37829   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37830   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37831   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37832 
37833 #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37834   __typeof(p2) __p2 = (p2); \
37835   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37836   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37837   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37838   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37839 
37840 #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37841   __typeof(p2) __p2 = (p2); \
37842   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37843   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37844   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37845   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37846 
37847 #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
37848   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37849   int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
37850   int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
37851   int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
37852   int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
37853   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
37854   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
37855   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
37856   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 (), \
37857   int (*)[__ARM_mve_type_float16x8_t]: __arm_vuninitializedq_f16 (), \
37858   int (*)[__ARM_mve_type_float32x4_t]: __arm_vuninitializedq_f32 ());})
37859 
37860 #define __arm_vreinterpretq_f16(p0) ({ __typeof(p0) __p0 = (p0); \
37861   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37862   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37863   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37864   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37865   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37866   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37867   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37868   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37869   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37870   int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_f16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37871 
37872 #define __arm_vreinterpretq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
37873   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37874   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37875   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37876   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37877   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37878   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37879   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37880   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37881   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37882   int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
37883 
37884 #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
37885   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37886   int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37887   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37888   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37889   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37890   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37891   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37892   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37893   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37894   int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37895 
37896 #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
37897   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37898   int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37899   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37900   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37901   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37902   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37903   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37904   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37905   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37906   int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37907 
37908 #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
37909   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37910   int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37911   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37912   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37913   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37914   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37915   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37916   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37917   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37918   int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37919 
37920 #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
37921   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37922   int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37923   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37924   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37925   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37926   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37927   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37928   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37929   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37930   int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37931 
37932 #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
37933   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37934   int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37935   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37936   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37937   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37938   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37939   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37940   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37941   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37942   int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37943 
37944 #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
37945   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37946   int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37947   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37948   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37949   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37950   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37951   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37952   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37953   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37954   int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37955 
37956 #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
37957   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37958   int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37959   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37960   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37961   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37962   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37963   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37964   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37965   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37966   int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37967 
37968 #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
37969   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37970   int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37971   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37972   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37973   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37974   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37975   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37976   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37977   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37978   int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37979 
37980 #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
37981   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
37982   int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37983   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37984   int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37985 
37986 #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
37987   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
37988   int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37989   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37990   int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_p_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37991 
37992 #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37993   __typeof(p2) __p2 = (p2); \
37994   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37995   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37996   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37997   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37998   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37999   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38000   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38001   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38002   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38003 
38004 #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38005   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38006   int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
38007   int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38008   int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
38009   int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38010   int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38011 
38012 #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38013   __typeof(p2) __p2 = (p2); \
38014   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38015   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38016   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38017   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38018   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
38019   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
38020   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
38021   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38022   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38023   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38024   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
38025   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
38026   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
38027   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38028   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
38029   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
38030   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
38031 
38032 #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38033   __typeof(p2) __p2 = (p2); \
38034   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38035   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8  (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38036   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38037   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38038   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38039   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38040   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38041   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38042   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38043 
38044 #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38045   __typeof(p2) __p2 = (p2); \
38046   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38047   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8   (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38048   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16  (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38049   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32  (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38050   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38051   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38052   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38053   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38054   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38055 
38056 #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38057   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38058   int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
38059   int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
38060   int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
38061   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
38062   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
38063   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \
38064   int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
38065   int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2, p3));})
38066 
38067 #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38068   __typeof(p2) __p2 = (p2); \
38069   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38070   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38071   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38072   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38073   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38074   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38075   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38076   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38077   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38078 
38079 #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38080   __typeof(p2) __p2 = (p2); \
38081   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38082   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38083   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38084   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38085   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38086   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38087   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38088   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38089   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38090 
38091 #define __arm_vcmulq_rot180_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38092   __typeof(p2) __p2 = (p2); \
38093   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38094   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38095   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38096 
38097 #define __arm_vcmulq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38098   __typeof(p2) __p2 = (p2); \
38099   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38100   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38101   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38102 
38103 #define __arm_vcmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38104   __typeof(p2) __p2 = (p2); \
38105   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38106   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38107   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38108 
38109 #define __arm_vcvtq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38110   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38111   int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38112   int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
38113   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
38114   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
38115 
38116 #define __arm_vcvtq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38117   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38118   int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_n_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
38119   int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_n_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
38120   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
38121   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
38122 
38123 #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38124   __typeof(p2) __p2 = (p2); \
38125   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38126   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38127   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38128   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38129   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38130   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38131   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38132   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38133   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38134 
38135 #define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38136   __typeof(p2) __p2 = (p2); \
38137   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38138   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38139   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38140 
38141 #define __arm_vminnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38142   __typeof(p2) __p2 = (p2); \
38143   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38144   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38145   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38146 
38147 #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38148   __typeof(p2) __p2 = (p2); \
38149   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38150   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38151   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38152   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38153   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
38154   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
38155   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
38156   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38157   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38158   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38159   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
38160   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
38161   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
38162   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38163   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
38164   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
38165   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
38166 
38167 #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38168   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38169   int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
38170   int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38171   int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
38172   int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38173   int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38174 
38175 #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38176   __typeof(p2) __p2 = (p2); \
38177   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38178   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38179   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38180   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38181   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38182   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38183   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38184   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38185   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38186 
38187 #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38188   __typeof(p2) __p2 = (p2); \
38189   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38190   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38191   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38192   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38193   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38194   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38195   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38196   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38197   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38198 
38199 #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38200   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38201   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
38202   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38203   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
38204   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
38205   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2));})
38206 
38207 #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38208   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38209   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
38210   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38211   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
38212   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
38213   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
38214   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2), \
38215   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38216   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38217 
38218 #define __arm_vrndaq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38219   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38220   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38221   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38222 
38223 #define __arm_vrndmq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38224   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38225   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38226   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38227 
38228 #define __arm_vrndnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38229   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38230   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38231   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38232 
38233 #define __arm_vrndpq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38234   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38235   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38236   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38237 
38238 #define __arm_vrndq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38239   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38240   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38241   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38242 
38243 #define __arm_vrndxq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38244   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38245   int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38246   int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38247 
38248 #define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38249   __typeof(p2) __p2 = (p2); \
38250   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38251   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38252   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
38253   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
38254   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
38255 
38256 #define __arm_vcmulq_rot90_x(p1,p2,p3)  ({ __typeof(p1) __p1 = (p1); \
38257   __typeof(p2) __p2 = (p2); \
38258   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38259   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38260   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38261 
38262 #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38263   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38264   int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38265   int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38266   int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38267   int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \
38268   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38269   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38270   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \
38271   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1), \
38272   int (*)[__ARM_mve_type_float16x8_t]: __arm_vgetq_lane_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \
38273   int (*)[__ARM_mve_type_float32x4_t]: __arm_vgetq_lane_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));})
38274 
38275 #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
38276   __typeof(p1) __p1 = (p1); \
38277   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38278   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
38279   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
38280   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
38281   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
38282   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
38283   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
38284   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
38285   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \
38286   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vsetq_lane_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
38287   int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vsetq_lane_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
38288 
38289 #else /* MVE Integer.  */
38290 
38291 #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
38292   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
38293   int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
38294   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
38295 
38296 #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
38297   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
38298   int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
38299   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
38300 
38301 #define __arm_vst4q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
38302   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
38303   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
38304   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
38305   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
38306   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
38307   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
38308   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));})
38309 
38310 #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \
38311   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38312   int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38313   int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38314   int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38315 
38316 #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \
38317   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38318   int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38319   int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38320   int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38321 
38322 #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \
38323   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38324   int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38325   int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38326   int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
38327   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38328   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
38329   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
38330 
38331 #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \
38332   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38333   int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38334   int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38335   int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38336 
38337 #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \
38338   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38339   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38340   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38341   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38342   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
38343 
38344 #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \
38345   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38346   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38347   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38348   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38349   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
38350 
38351 #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \
38352   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38353   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38354   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38355   int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
38356   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38357   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
38358   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
38359 
38360 #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \
38361   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38362   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38363   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));})
38364 
38365 #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \
38366   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38367   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38368   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38369   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38370   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
38371 
38372 #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \
38373   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38374   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38375   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38376   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
38377   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38378   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
38379   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
38380 
38381 #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \
38382   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38383   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38384   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38385   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38386 
38387 #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \
38388   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38389   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38390   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38391   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38392 
38393 #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38394   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38395   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38396   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38397   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38398   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38399   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38400   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38401 
38402 #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38403   __typeof(p1) __p1 = (p1); \
38404   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38405   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38406   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38407   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38408   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38409   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38410   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38411   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38412   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38413   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38414   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38415   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38416   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38417 
38418 #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38419   __typeof(p1) __p1 = (p1); \
38420   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38421   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38422   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38423   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38424   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38425   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38426   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38427 
38428 #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38429   __typeof(p1) __p1 = (p1); \
38430   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38431   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38432   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38433   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38434   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38435   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38436   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
38437   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38438   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38439   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38440   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38441   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38442   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
38443 
38444 #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38445   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38446   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38447   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38448   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38449   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38450   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38451   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38452 
38453 #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38454   __typeof(p1) __p1 = (p1); \
38455   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38456   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
38457   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
38458   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38459   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
38460   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
38461   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38462   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38463   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38464   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38465   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38466   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38467   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38468 
38469 #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38470   __typeof(p1) __p1 = (p1); \
38471   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38472   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38473   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38474   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38475   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38476   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38477   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38478 
38479 #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38480   __typeof(p1) __p1 = (p1); \
38481   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38482   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38483   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38484   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38485   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38486   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38487   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38488 
38489 #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38490   __typeof(p1) __p1 = (p1); \
38491   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38492   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38493   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38494   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38495   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38496   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38497   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38498   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38499   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38500   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38501   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38502   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38503   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38504 
38505 #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38506   __typeof(p1) __p1 = (p1); \
38507   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38508   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38509   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38510   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38511   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38512   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38513   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38514 
38515 #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38516   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38517   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38518   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38519   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38520   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38521   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38522   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38523 
38524 #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38525   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38526   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38527   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38528   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));})
38529 
38530 #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38531   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38532   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38533   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38534   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38535   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38536   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38537   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38538 
38539 #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38540   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38541   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38542   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38543   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38544   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38545   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38546   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38547 
38548 #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38549   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38550   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38551   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38552   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38553   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38554   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38555   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38556 
38557 #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38558   __typeof(p1) __p1 = (p1); \
38559   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38560   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38561   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38562   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38563   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38564   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38565   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38566   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
38567   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
38568   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38569   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
38570   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
38571   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
38572 
38573 #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38574   __typeof(p1) __p1 = (p1); \
38575   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38576   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38577   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38578   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38579   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38580   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38581   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
38582 
38583 #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38584   __typeof(p1) __p1 = (p1); \
38585   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38586   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38587   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38588   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38589   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38590   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38591   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38592 
38593 #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38594   __typeof(p1) __p1 = (p1); \
38595   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38596   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38597   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38598   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38599   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38600   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38601   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38602   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38603   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38604   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38605   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38606   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38607   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38608 
38609 #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38610   __typeof(p1) __p1 = (p1); \
38611   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38612   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38613   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38614   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38615   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38616   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38617   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38618 
38619 #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38620   __typeof(p1) __p1 = (p1); \
38621   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38622   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38623   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38624   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38625   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38626   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38627   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38628 
38629 #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38630   __typeof(p1) __p1 = (p1); \
38631   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38632   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38633   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38634   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38635   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38636   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38637   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38638   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38639   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38640   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38641   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38642   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38643   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38644 
38645 #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38646   __typeof(p1) __p1 = (p1); \
38647   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38648   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38649   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38650   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38651   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38652   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38653   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38654 
38655 #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38656   __typeof(p1) __p1 = (p1); \
38657   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38658   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38659   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38660   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38661   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38662   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38663   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38664 
38665 #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38666   __typeof(p1) __p1 = (p1); \
38667   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38668   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38669   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38670   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38671   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38672   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38673   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38674 
38675 #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38676   __typeof(p1) __p1 = (p1); \
38677   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38678   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38679   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38680   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38681   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38682   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38683   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38684 
38685 #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38686   __typeof(p1) __p1 = (p1); \
38687   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38688   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38689   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38690   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38691 
38692 #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38693   __typeof(p1) __p1 = (p1); \
38694   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38695   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38696   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38697   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38698   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38699   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38700   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38701 
38702 #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38703   __typeof(p1) __p1 = (p1); \
38704   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38705   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38706   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38707   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38708 
38709 #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38710   __typeof(p1) __p1 = (p1); \
38711   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38712   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38713   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38714   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38715   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38716   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38717   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38718   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38719   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38720   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38721   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38722   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38723   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38724 
38725 #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38726   __typeof(p1) __p1 = (p1); \
38727   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38728   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38729   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38730   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38731 
38732 #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38733   __typeof(p1) __p1 = (p1); \
38734   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38735   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38736   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38737   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38738 
38739 #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38740   __typeof(p1) __p1 = (p1); \
38741   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38742   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38743   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38744   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38745   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38746   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38747   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38748   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38749   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38750   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38751   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38752   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38753   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38754 
38755 #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38756   __typeof(p1) __p1 = (p1); \
38757   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38758   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38759   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38760   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38761   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38762   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38763   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38764 
38765 #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38766   __typeof(p1) __p1 = (p1); \
38767   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38768   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38769   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38770   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38771   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38772   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38773   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38774 
38775 #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38776   __typeof(p1) __p1 = (p1); \
38777   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38778   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38779   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38780   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38781   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38782   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38783   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38784 
38785 #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38786   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38787   int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38788   int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38789   int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38790   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38791   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38792   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38793 
38794 #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38795   __typeof(p1) __p1 = (p1); \
38796   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38797   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1 (__p1, int)), \
38798   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1 (__p1, int)), \
38799   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1 (__p1, int)), \
38800   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1 (__p1, int)), \
38801   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38802   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38803   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38804   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38805   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38806   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38807 
38808 #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38809   __typeof(p1) __p1 = (p1); \
38810   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38811   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38812   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38813   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38814   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38815   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38816   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
38817   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int)), \
38818   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int)), \
38819   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int)), \
38820   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int)), \
38821   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int)), \
38822   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int)));})
38823 
38824 #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38825   __typeof(p1) __p1 = (p1); \
38826   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38827   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38828   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38829   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38830   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38831   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38832   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38833 
38834 #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38835   __typeof(p1) __p1 = (p1); \
38836   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38837   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38838   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38839   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38840   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38841   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38842   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38843 
38844 #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38845   __typeof(p1) __p1 = (p1); \
38846   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38847   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38848   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38849   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38850   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38851   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38852   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
38853   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38854   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38855   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38856   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38857   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38858   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
38859 
38860 #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38861   __typeof(p1) __p1 = (p1); \
38862   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38863   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38864   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38865   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38866   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38867 
38868 #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38869   __typeof(p1) __p1 = (p1); \
38870   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38871   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38872   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38873   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38874   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38875 
38876 #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38877   __typeof(p1) __p1 = (p1); \
38878   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38879   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38880   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
38881 
38882 #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38883   __typeof(p1) __p1 = (p1); \
38884   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38885   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38886   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
38887 
38888 #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38889   __typeof(p1) __p1 = (p1); \
38890   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38891   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38892   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38893   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38894   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38895 
38896 #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38897   __typeof(p1) __p1 = (p1); \
38898   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38899   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38900   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38901   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38902   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38903 
38904 #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38905   __typeof(p1) __p1 = (p1); \
38906   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38907   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38908   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38909 
38910 #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38911   __typeof(p1) __p1 = (p1); \
38912   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38913   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38914   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38915 
38916 #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38917   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38918   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38919   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38920   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38921   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
38922 
38923 #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38924   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38925   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38926   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38927   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38928   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
38929 
38930 #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38931   __typeof(p1) __p1 = (p1); \
38932   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38933   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38934   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38935 
38936 #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38937   __typeof(p1) __p1 = (p1); \
38938   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38939   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38940   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38941   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38942   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38943 
38944 #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38945   __typeof(p1) __p1 = (p1); \
38946   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38947   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38948   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38949   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38950   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38951 
38952 #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38953   __typeof(p1) __p1 = (p1); \
38954   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38955   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38956   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38957   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38958   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38959   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38960   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
38961 
38962 #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38963   __typeof(p1) __p1 = (p1); \
38964   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38965   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38966   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38967   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38968   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38969   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38970   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
38971 
38972 #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38973   __typeof(p1) __p1 = (p1); \
38974   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38975   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38976   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38977   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38978   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38979   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38980   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
38981 
38982 #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38983   __typeof(p1) __p1 = (p1); \
38984   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38985   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38986   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38987   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38988   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38989   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38990   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
38991 
38992 #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
38993   __typeof(p1) __p1 = (p1); \
38994   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38995   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
38996   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
38997   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
38998   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
38999   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
39000   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
39001   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
39002   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39003   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39004   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39005   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39006   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39007 
39008 #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39009   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39010   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39011   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39012   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39013   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39014   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39015   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39016 
39017 #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39018   __typeof(p1) __p1 = (p1); \
39019   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39020   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39021   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39022   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39023   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39024   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39025   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
39026   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
39027   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
39028   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
39029   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
39030   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
39031   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));})
39032 
39033 #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39034   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39035   int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39036   int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39037   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39038   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39039 
39040 #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39041   __typeof(p1) __p1 = (p1); \
39042   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39043   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39044   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39045   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39046   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39047 
39048 #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39049   __typeof(p1) __p1 = (p1); \
39050   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39051   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39052   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39053 
39054 #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39055   __typeof(p1) __p1 = (p1); \
39056   __typeof(p2) __p2 = (p2); \
39057   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39058   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39059   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39060   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39061 
39062 #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39063   __typeof(p1) __p1 = (p1); \
39064   __typeof(p2) __p2 = (p2); \
39065   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39066   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39067   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39068   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39069 
39070 #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39071   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39072   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39073   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39074   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39075   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39076   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39077   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39078 
39079 #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39080   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39081   int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39082   int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39083   int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39084   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39085   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39086   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39087 
39088 #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39089   __typeof(p1) __p1 = (p1); \
39090   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39091   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39092   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39093   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39094   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39095   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39096   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39097 
39098 #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39099   __typeof(p1) __p1 = (p1); \
39100   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39101   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39102   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39103   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39104   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39105   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39106   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
39107 
39108 #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39109   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39110   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39111   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39112   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39113   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39114   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39115   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39116 
39117 #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39118   __typeof(p1) __p1 = (p1); \
39119   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39120   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39121   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39122   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39123   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39124   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39125   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39126 
39127 #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39128   __typeof(p1) __p1 = (p1); \
39129   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39130   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39131   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39132   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39133   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39134   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39135   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39136 
39137 #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39138   __typeof(p1) __p1 = (p1); \
39139   __typeof(p2) __p2 = (p2); \
39140   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39141   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39142   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39143   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
39144 
39145 #define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39146   __typeof(p1) __p1 = (p1); \
39147   __typeof(p2) __p2 = (p2); \
39148   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39149   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39150   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39151   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
39152 
39153 #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39154   __typeof(p1) __p1 = (p1); \
39155   __typeof(p2) __p2 = (p2); \
39156   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39157   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39158   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39159   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
39160 
39161 #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39162   __typeof(p1) __p1 = (p1); \
39163   __typeof(p2) __p2 = (p2); \
39164   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39165   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39166   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39167   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39168 
39169 #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39170   __typeof(p1) __p1 = (p1); \
39171   __typeof(p2) __p2 = (p2); \
39172   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39173   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39174   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39175   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39176 
39177 #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39178   __typeof(p1) __p1 = (p1); \
39179   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39180   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39181   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39182   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39183 
39184 #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39185   __typeof(p1) __p1 = (p1); \
39186   __typeof(p2) __p2 = (p2); \
39187   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39188   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39189   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39190   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39191 
39192 #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39193   __typeof(p1) __p1 = (p1); \
39194   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39195   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39196   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39197   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39198 
39199 #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39200   __typeof(p1) __p1 = (p1); \
39201   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39202   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39203   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39204   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39205 
39206 #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39207   __typeof(p1) __p1 = (p1); \
39208   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39209   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39210   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39211   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39212   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39213   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39214   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39215 
39216 #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39217   __typeof(p1) __p1 = (p1); \
39218   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39219   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39220   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39221   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39222   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
39223   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
39224   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));})
39225 
39226 #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39227   __typeof(p1) __p1 = (p1); \
39228   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39229   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), (int8_t) __p1, p2), \
39230   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), (int16_t) __p1, p2), \
39231   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), (int32_t) __p1, p2), \
39232   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint8_t) __p1, p2), \
39233   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint16_t) __p1, p2), \
39234   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2));})
39235 
39236 #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39237   __typeof(p1) __p1 = (p1); \
39238   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39239   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39240   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39241   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39242 
39243 #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39244   __typeof(p1) __p1 = (p1); \
39245   __typeof(p2) __p2 = (p2); \
39246   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39247   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39248   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39249   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
39250   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
39251   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
39252   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
39253 
39254 #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39255   __typeof(p1) __p1 = (p1); \
39256   __typeof(p2) __p2 = (p2); \
39257   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39258   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39259   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39260   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
39261   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
39262   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
39263   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
39264 
39265 #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39266   __typeof(p1) __p1 = (p1); \
39267   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39268   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39269   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39270   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39271 
39272 #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39273   __typeof(p1) __p1 = (p1); \
39274   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39275   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39276   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39277   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39278   int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
39279   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39280   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39281   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
39282   int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));})
39283 
39284 #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39285   __typeof(p1) __p1 = (p1); \
39286   __typeof(p2) __p2 = (p2); \
39287   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39288   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39289   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39290   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
39291 
39292 #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39293   __typeof(p1) __p1 = (p1); \
39294   __typeof(p2) __p2 = (p2); \
39295   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39296   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39297   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39298   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39299 
39300 #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39301   __typeof(p1) __p1 = (p1); \
39302   __typeof(p2) __p2 = (p2); \
39303   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39304   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39305   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39306   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39307 
39308 #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39309   __typeof(p1) __p1 = (p1); \
39310   __typeof(p2) __p2 = (p2); \
39311   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39312   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39313   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39314   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39315 
39316 #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39317   __typeof(p1) __p1 = (p1); \
39318   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39319   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39320   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39321   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39322 
39323 #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39324   __typeof(p1) __p1 = (p1); \
39325   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39326   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39327   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39328   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39329   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
39330 
39331 #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39332   __typeof(p1) __p1 = (p1); \
39333   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39334   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39335   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39336   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39337   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39338 
39339 #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39340   __typeof(p1) __p1 = (p1); \
39341   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39342   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39343   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39344   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39345   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39346 
39347 #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39348   __typeof(p1) __p1 = (p1); \
39349   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39350   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39351   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39352   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39353   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39354 
39355 #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39356   __typeof(p1) __p1 = (p1); \
39357   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39358   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39359   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39360   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39361   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39362 
39363 #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39364   __typeof(p1) __p1 = (p1); \
39365   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39366   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39367   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39368   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39369   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
39370 
39371 #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39372   __typeof(p1) __p1 = (p1); \
39373   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39374   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39375   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39376 
39377 #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39378   __typeof(p1) __p1 = (p1); \
39379   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39380   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39381   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));})
39382 
39383 #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39384   __typeof(p1) __p1 = (p1); \
39385   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39386   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39387   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39388   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39389   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39390 
39391 #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39392   __typeof(p1) __p1 = (p1); \
39393   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39394   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39395   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39396 
39397 #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39398   __typeof(p1) __p1 = (p1); \
39399   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39400   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39401   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39402   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39403   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39404 
39405 #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39406   __typeof(p1) __p1 = (p1); \
39407   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39408   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39409   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39410   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39411   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39412 
39413 #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39414   __typeof(p1) __p1 = (p1); \
39415   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39416   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39417   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39418 
39419 #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39420   __typeof(p1) __p1 = (p1); \
39421   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39422   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39423   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39424   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39425   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39426 
39427 #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39428   __typeof(p1) __p1 = (p1); \
39429   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39430   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39431   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39432   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39433   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39434 
39435 #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39436   __typeof(p1) __p1 = (p1); \
39437   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39438   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39439   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39440   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39441   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
39442 
39443 #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39444   __typeof(p1) __p1 = (p1); \
39445   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39446   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39447   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39448 
39449 #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39450   __typeof(p1) __p1 = (p1); \
39451   __typeof(p2) __p2 = (p2); \
39452   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39453   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
39454   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
39455   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
39456   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
39457   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
39458   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
39459   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39460   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39461   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39462   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39463   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39464   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39465 
39466 #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39467   __typeof(p1) __p1 = (p1); \
39468   __typeof(p2) __p2 = (p2); \
39469   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39470   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39471   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39472   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39473   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39474   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39475   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39476 
39477 #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39478   __typeof(p1) __p1 = (p1); \
39479   __typeof(p2) __p2 = (p2); \
39480   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39481   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39482   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39483   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39484   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39485   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39486   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39487 
39488 #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39489   __typeof(p1) __p1 = (p1); \
39490   __typeof(p2) __p2 = (p2); \
39491   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39492   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39493   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39494   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39495   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39496   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39497   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39498 
39499 #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39500   __typeof(p1) __p1 = (p1); \
39501   __typeof(p2) __p2 = (p2); \
39502   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39503   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39504   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39505   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39506   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39507   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39508   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39509 
39510 #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39511   __typeof(p1) __p1 = (p1); \
39512   __typeof(p2) __p2 = (p2); \
39513   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39514   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __p2, p3), \
39515   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __p2, p3), \
39516   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __p2, p3), \
39517   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __p2, p3), \
39518   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __p2, p3), \
39519   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __p2, p3));})
39520 
39521 #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39522   __typeof(p1) __p1 = (p1); \
39523   __typeof(p2) __p2 = (p2); \
39524   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39525   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39526   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39527   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39528   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39529   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39530   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39531 
39532 #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39533   __typeof(p1) __p1 = (p1); \
39534   __typeof(p2) __p2 = (p2); \
39535   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39536   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39537   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39538   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39539   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39540   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39541   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39542 
39543 #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39544   __typeof(p1) __p1 = (p1); \
39545   __typeof(p2) __p2 = (p2); \
39546   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39547   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39548   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39549   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39550   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39551   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39552   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39553 
39554 #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39555   __typeof(p1) __p1 = (p1); \
39556   __typeof(p2) __p2 = (p2); \
39557   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39558   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39559   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39560   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39561   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39562   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39563   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39564 
39565 #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39566   __typeof(p1) __p1 = (p1); \
39567   __typeof(p2) __p2 = (p2); \
39568   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39569   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39570   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39571   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39572   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39573   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39574   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39575 
39576 #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39577   __typeof(p1) __p1 = (p1); \
39578   __typeof(p2) __p2 = (p2); \
39579   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39580   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39581   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39582   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39583   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39584   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39585   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39586 
39587 #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39588   __typeof(p1) __p1 = (p1); \
39589   __typeof(p2) __p2 = (p2); \
39590   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39591   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int), p3), \
39592   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int), p3), \
39593   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int), p3), \
39594   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int), p3), \
39595   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int), p3), \
39596   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int), p3), \
39597   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39598   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39599   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39600   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39601   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39602   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39603 
39604 #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39605   __typeof(p1) __p1 = (p1); \
39606   __typeof(p2) __p2 = (p2); \
39607   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39608   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
39609   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
39610   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
39611   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
39612   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
39613   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
39614   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39615   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39616   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39617   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39618   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39619   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39620 
39621 #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
39622   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
39623   int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
39624   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
39625 
39626 #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39627   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39628   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
39629   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39630   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
39631   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
39632   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39633   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39634 
39635 #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
39636   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
39637   int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
39638   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39639 
39640 #define __arm_vld1q(p0) (\
39641   _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
39642   int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
39643   int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
39644   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
39645   int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
39646   int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
39647   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce1(p0, uint32_t *))))
39648 
39649 #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39650   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39651   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39652   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
39653   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39654   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39655 
39656 #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39657   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39658   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39659   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
39660   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39661   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39662 
39663 #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39664   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39665   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39666   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
39667   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39668   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39669 
39670 #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39671   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39672   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39673   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
39674   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39675   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39676 
39677 #define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \
39678   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39679   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1), \
39680   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1));})
39681 
39682 #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39683   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39684   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1, p2), \
39685   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1, p2));})
39686 
39687 #define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \
39688   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39689   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1), \
39690   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1));})
39691 
39692 #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39693   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39694   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1, p2), \
39695   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1, p2));})
39696 
39697 #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39698   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39699   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
39700   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
39701   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
39702   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
39703   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39704   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39705 
39706 #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39707   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39708   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39709   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39710   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39711   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39712   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39713   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39714 
39715 #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39716   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39717   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
39718   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
39719   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
39720   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
39721   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
39722   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)));})
39723 
39724 #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39725   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39726   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
39727   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
39728   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39729   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39730 
39731 #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39732   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39733   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39734   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39735   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39736   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39737 
39738 #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39739   __typeof(p2) __p2 = (p2); \
39740   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39741   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39742   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39743   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39744   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39745 
39746 #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39747   __typeof(p2) __p2 = (p2); \
39748   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39749   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39750   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
39751   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
39752   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
39753 
39754 #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39755   __typeof(p2) __p2 = (p2); \
39756   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39757   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39758   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39759   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39760   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39761 
39762 #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39763   __typeof(p2) __p2 = (p2); \
39764   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39765   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39766   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
39767   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
39768   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
39769 
39770 
39771 #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39772   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39773   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
39774   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39775 
39776 #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39777   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39778   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39779   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39780 
39781 #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
39782   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
39783   int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
39784   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
39785 
39786 #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
39787   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
39788   int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \
39789   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
39790 
39791 #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39792   __typeof(p2) __p2 = (p2); \
39793   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39794   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39795   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
39796   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
39797   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
39798 
39799 #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39800   __typeof(p2) __p2 = (p2); \
39801   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39802   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39803   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39804   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39805   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39806 
39807 #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39808   __typeof(p2) __p2 = (p2); \
39809   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39810   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39811   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
39812   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
39813   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
39814 
39815 #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39816   __typeof(p2) __p2 = (p2); \
39817   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39818   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39819   int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39820   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39821   int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39822 
39823 #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39824   __typeof(p2) __p2 = (p2); \
39825   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
39826   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \
39827   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
39828 
39829 #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39830   __typeof(p2) __p2 = (p2); \
39831   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
39832   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
39833   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39834 
39835 #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39836   __typeof(p2) __p2 = (p2); \
39837   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
39838   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
39839   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
39840 
39841 #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39842   __typeof(p2) __p2 = (p2); \
39843   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
39844   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
39845   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39846 
39847 #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
39848   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39849   int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
39850   int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
39851   int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
39852   int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
39853   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
39854   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
39855   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
39856   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 ());})
39857 
39858 #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
39859   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39860   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
39861   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
39862   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
39863   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
39864   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
39865   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
39866   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
39867 
39868 #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
39869   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39870   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
39871   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
39872   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
39873   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
39874   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
39875   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
39876   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
39877 
39878 #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
39879   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39880   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
39881   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
39882   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
39883   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
39884   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
39885   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
39886   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
39887 
39888 #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
39889   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39890   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
39891   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
39892   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
39893   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
39894   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
39895   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
39896   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
39897 
39898 #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
39899   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39900   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
39901   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
39902   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
39903   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
39904   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
39905   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
39906   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
39907 
39908 #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
39909   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39910   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
39911   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
39912   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
39913   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
39914   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
39915   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
39916   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
39917 
39918 #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
39919   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39920   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
39921   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
39922   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
39923   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
39924   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
39925   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
39926   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)));})
39927 
39928 #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
39929   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39930   int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
39931   int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
39932   int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
39933   int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
39934   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
39935   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
39936   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
39937 
39938 #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
39939   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
39940   int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
39941   int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
39942   int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
39943 
39944 #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39945   __typeof(p2) __p2 = (p2); \
39946   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39947   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39948   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39949   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39950   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
39951   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
39952   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
39953   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39954   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39955   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
39956   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
39957   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
39958   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
39959 
39960 #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39961   __typeof(p2) __p2 = (p2); \
39962   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39963   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39964   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39965   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39966   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39967   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39968   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39969 
39970 #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39971   __typeof(p2) __p2 = (p2); \
39972   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39973   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39974   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39975   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39976   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39977   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39978   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39979 
39980 #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39981   __typeof(p2) __p2 = (p2); \
39982   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39983   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39984   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39985   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39986   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39987   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39988   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39989 
39990 #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
39991   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
39992   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
39993   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
39994   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
39995   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
39996 
39997 #define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
39998   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
39999   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40000   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40001   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40002   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
40003 
40004 #define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40005   __typeof(p2) __p2 = (p2); \
40006   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40007   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40008   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40009   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40010   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40011   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40012   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40013 
40014 #define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40015   __typeof(p2) __p2 = (p2); \
40016   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40017   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40018   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40019   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40020   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40021   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40022   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40023 
40024 #define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40025   __typeof(p2) __p2 = (p2); \
40026   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40027   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40028   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
40029 
40030 #define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40031   __typeof(p2) __p2 = (p2); \
40032   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40033   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40034   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40035   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40036   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40037   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40038   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40039 
40040 #define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40041   __typeof(p2) __p2 = (p2); \
40042   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40043   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40044   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
40045 
40046 #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40047   __typeof(p2) __p2 = (p2); \
40048   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40049   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40050   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40051   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40052   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40053   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40054   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40055   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40056   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40057   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
40058   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40059   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40060   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
40061 
40062 #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40063   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40064   int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40065   int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40066   int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
40067 
40068 #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40069   __typeof(p2) __p2 = (p2); \
40070   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40071   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40072   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40073   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40074   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40075   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40076   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40077 
40078 #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40079   __typeof(p2) __p2 = (p2); \
40080   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40081   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40082   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40083   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40084   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40085   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40086   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40087 
40088 #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40089   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40090   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40091   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40092   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40093   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
40094 
40095 #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40096   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40097   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40098   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40099   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
40100   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40101   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
40102   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
40103 
40104 #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40105   __typeof(p2) __p2 = (p2); \
40106   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40107   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40108   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40109   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40110   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40111   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40112   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40113 
40114 #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40115   __typeof(p2) __p2 = (p2); \
40116   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40117   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8  (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40118   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40119   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40120   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40121   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40122   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40123 
40124 #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40125   __typeof(p2) __p2 = (p2); \
40126   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40127   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8   (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40128   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16  (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40129   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32  (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40130   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40131   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40132   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40133 
40134 #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40135   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40136   int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40137   int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40138   int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40139   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40140   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40141   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40142 
40143 #define __arm_vld1q_z(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40144   int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce1(p0, int8_t *), p1), \
40145   int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce1(p0, int16_t *), p1), \
40146   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
40147   int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce1(p0, uint8_t *), p1), \
40148   int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), p1), \
40149   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1)))
40150 
40151 #define __arm_vld2q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40152   int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
40153   int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
40154   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
40155   int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
40156   int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
40157   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce1(p0, uint32_t *))))
40158 
40159 
40160 #define __arm_vld4q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40161   int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \
40162   int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \
40163   int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \
40164   int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \
40165   int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \
40166   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce1(p0, uint32_t *))))
40167 
40168 #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \
40169   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40170   int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
40171   int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
40172   int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
40173   int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \
40174   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
40175   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
40176   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \
40177   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1));})
40178 
40179 #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40180   __typeof(p1) __p1 = (p1); \
40181   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40182   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
40183   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
40184   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40185   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
40186   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
40187   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40188   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
40189   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));})
40190 
40191 #endif /* MVE Integer.  */
40192 
40193 #define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40194   __typeof(p1) __p1 = (p1); \
40195   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40196   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
40197   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40198   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40199   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40200 
40201 
40202 #define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40203   __typeof(p1) __p1 = (p1); \
40204   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40205   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
40206   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40207   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40208   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40209 
40210 
40211 #define __arm_vmvnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40212   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40213   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40214   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40215   int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
40216   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40217   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
40218   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
40219 
40220 #define __arm_vrev16q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40221   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40222   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40223   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2));})
40224 
40225 #define __arm_vrhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40226   __typeof(p2) __p2 = (p2); \
40227   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40228   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40229   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40230   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40231   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40232   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40233   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40234 
40235 #define __arm_vshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40236   __typeof(p2) __p2 = (p2); \
40237   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40238   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40239   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40240   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40241   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40242   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40243   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40244 
40245 #define __arm_vrmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40246   __typeof(p2) __p2 = (p2); \
40247   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40248   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40249   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40250   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40251   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40252   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40253   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40254 
40255 #define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40256   __typeof(p2) __p2 = (p2); \
40257   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40258   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40259   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40260   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40261   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40262   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40263   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40264 
40265 #define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40266   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40267   int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40268   int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40269   int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40270   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40271   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40272   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40273 
40274 #define __arm_vshllbq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40275   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40276   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40277   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40278   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40279   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
40280 
40281 #define __arm_vshlltq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40282   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40283   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40284   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40285   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40286   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
40287 
40288 #define __arm_vshlq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40289   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40290   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40291   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40292   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40293   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40294   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40295   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40296 
40297 #define __arm_vdwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40298   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40299   int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \
40300   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40301 
40302 #define __arm_vdwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40303   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40304   int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \
40305   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40306 
40307 #define __arm_vdwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40308   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40309   int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \
40310   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40311 
40312 #define __arm_viwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40313   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40314   int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \
40315   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40316 
40317 #define __arm_viwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40318   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40319   int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \
40320   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40321 
40322 #define __arm_viwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40323   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40324   int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \
40325   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40326 
40327 #define __arm_vidupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40328   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40329   int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u8 ((uint32_t) __p1, p2, p3), \
40330   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40331 
40332 #define __arm_vddupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40333   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40334   int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u8 ((uint32_t) __p1, p2, p3), \
40335   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40336 
40337 #define __arm_vidupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40338   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40339   int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u16 ((uint32_t) __p1, p2, p3), \
40340   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40341 
40342 #define __arm_vddupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40343   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40344   int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u16 ((uint32_t) __p1, p2, p3), \
40345   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40346 
40347 #define __arm_vidupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40348   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40349   int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u32 ((uint32_t) __p1, p2, p3), \
40350   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40351 
40352 #define __arm_vddupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40353   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40354   int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u32 ((uint32_t) __p1, p2, p3), \
40355   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40356 
40357 #define __arm_vshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40358   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40359   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40360   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40361   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40362   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40363   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40364   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40365 
40366 #define __arm_vhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40367   __typeof(p2) __p2 = (p2); \
40368   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40369   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40370   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40371   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40372   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40373   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40374   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
40375   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40376   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40377   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40378   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40379   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40380   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40381 
40382 #define __arm_vhcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40383   __typeof(p2) __p2 = (p2); \
40384   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40385   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40386   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40387   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40388 
40389 #define __arm_vhcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40390   __typeof(p2) __p2 = (p2); \
40391   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40392   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40393   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40394   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40395 
40396 #define __arm_vhsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40397   __typeof(p2) __p2 = (p2); \
40398   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40399   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40400   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40401   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40402   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40403   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40404   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
40405   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40406   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40407   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40408   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40409   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40410   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40411 
40412 #define __arm_vclsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40413   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40414   int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40415   int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40416   int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
40417 
40418 #define __arm_vclzq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40419   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40420   int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40421   int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40422   int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
40423   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40424   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
40425   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
40426 
40427 #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40428   __typeof(p1) __p1 = (p1); \
40429   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40430   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40431   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40432 
40433 #define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
40434   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
40435   int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
40436   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
40437 
40438 #define __arm_vstrdq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
40439   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
40440   int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \
40441   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
40442 
40443 #define __arm_vldrdq_gather_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40444   int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_s64 (__ARM_mve_coerce1(p0, int64_t *), p1), \
40445   int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1)))
40446 
40447 #define __arm_vldrdq_gather_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40448   int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_z_s64 (__ARM_mve_coerce1(p0, int64_t *), p1, p2), \
40449   int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_z_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1, p2)))
40450 
40451 #define __arm_vldrdq_gather_shifted_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40452   int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce1(p0, int64_t *), p1), \
40453   int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1)))
40454 
40455 #define __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40456   int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce1(p0, int64_t *), p1, p2), \
40457   int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1, p2)))
40458 
40459 #define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
40460   __typeof(p1) __p1 = (p1); \
40461   __typeof(p2) __p2 = (p2); \
40462   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40463   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
40464   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
40465 
40466 #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40467   __typeof(p1) __p1 = (p1); \
40468   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40469   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40470   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40471 
40472 #define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
40473   __typeof(p1) __p1 = (p1); \
40474   __typeof(p2) __p2 = (p2); \
40475   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40476   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
40477   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
40478 
40479 #define __arm_vadcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40480   __typeof(p1) __p1 = (p1); \
40481   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40482   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40483   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40484 
40485 #define __arm_vsbciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
40486   __typeof(p1) __p1 = (p1); \
40487   __typeof(p2) __p2 = (p2); \
40488   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40489   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
40490   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
40491 
40492 #define __arm_vsbciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40493   __typeof(p1) __p1 = (p1); \
40494   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40495   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40496   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40497 
40498 #define __arm_vsbcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
40499   __typeof(p1) __p1 = (p1); \
40500   __typeof(p2) __p2 = (p2); \
40501   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40502   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
40503   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
40504 
40505 #define __arm_vsbcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40506   __typeof(p1) __p1 = (p1); \
40507   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40508   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40509   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40510 
40511 #define __arm_vldrbq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
40512   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
40513   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_s8 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
40514   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40515   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
40516   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_u8 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
40517   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40518   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40519 
40520 #define __arm_vqrdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40521   __typeof(p1) __p1 = (p1); \
40522   __typeof(p2) __p2 = (p2); \
40523   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40524   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40525   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40526   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
40527 
40528 #define __arm_vqrdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40529   __typeof(p1) __p1 = (p1); \
40530   __typeof(p2) __p2 = (p2); \
40531   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40532   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40533   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40534   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
40535 
40536 #define __arm_vqdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40537   __typeof(p1) __p1 = (p1); \
40538   __typeof(p2) __p2 = (p2); \
40539   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40540   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40541   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40542   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
40543 
40544 #define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40545   __typeof(p1) __p1 = (p1); \
40546   __typeof(p2) __p2 = (p2); \
40547   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40548   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40549   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40550   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40551   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40552   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40553   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40554 
40555 #define __arm_vqshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40556   __typeof(p1) __p1 = (p1); \
40557   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40558   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t),  p2, p3), \
40559   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t),  p2, p3), \
40560   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t),  p2, p3), \
40561   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),  p2, p3), \
40562   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),  p2, p3), \
40563   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),  p2, p3));})
40564 
40565 #define __arm_vqshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40566   __typeof(p1) __p1 = (p1); \
40567   __typeof(p2) __p2 = (p2); \
40568   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40569   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40570   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40571   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40572   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40573   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40574   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40575 
40576 #define __arm_vrhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40577   __typeof(p1) __p1 = (p1); \
40578   __typeof(p2) __p2 = (p2); \
40579   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40580   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40581   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40582   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40583   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40584   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40585   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40586 
40587 #define __arm_vrmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40588   __typeof(p1) __p1 = (p1); \
40589   __typeof(p2) __p2 = (p2); \
40590   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40591   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40592   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40593   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40594   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40595   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40596   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40597 
40598 #define __arm_vrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40599   __typeof(p1) __p1 = (p1); \
40600   __typeof(p2) __p2 = (p2); \
40601   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40602   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40603   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40604   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40605   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40606   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40607   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40608 
40609 #define __arm_vrshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40610   __typeof(p1) __p1 = (p1); \
40611   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40612   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t),  p2, p3), \
40613   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t),  p2, p3), \
40614   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t),  p2, p3), \
40615   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),  p2, p3), \
40616   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),  p2, p3), \
40617   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),  p2, p3));})
40618 
40619 #define __arm_vshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40620   __typeof(p1) __p1 = (p1); \
40621   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40622   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t),  p2, p3), \
40623   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t),  p2, p3), \
40624   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t),  p2, p3), \
40625   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),  p2, p3), \
40626   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),  p2, p3), \
40627   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),  p2, p3));})
40628 
40629 #define __arm_vsliq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40630   __typeof(p1) __p1 = (p1); \
40631   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40632   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t),  p2, p3), \
40633   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t),  p2, p3), \
40634   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t),  p2, p3), \
40635   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),  p2, p3), \
40636   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),  p2, p3), \
40637   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),  p2, p3));})
40638 
40639 #define __arm_vqsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40640   __typeof(p1) __p1 = (p1); \
40641   __typeof(p2) __p2 = (p2); \
40642   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40643   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40644   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40645   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40646   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40647   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40648   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
40649   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40650   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40651   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40652   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40653   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40654   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40655 
40656 #define __arm_vqrdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40657   __typeof(p1) __p1 = (p1); \
40658   __typeof(p2) __p2 = (p2); \
40659   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40660   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40661   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40662   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40663   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40664   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40665   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
40666 
40667 #define __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40668   __typeof(p1) __p1 = (p1); \
40669   __typeof(p2) __p2 = (p2); \
40670   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40671   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40672   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40673   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40674 
40675 #define __arm_vqrdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40676   __typeof(p1) __p1 = (p1); \
40677   __typeof(p2) __p2 = (p2); \
40678   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40679   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40680   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40681   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40682 
40683 #define __arm_vshllbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40684   __typeof(p1) __p1 = (p1); \
40685   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40686   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshllbq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40687   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshllbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40688   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshllbq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40689   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshllbq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
40690 
40691 #define __arm_vshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40692   __typeof(p1) __p1 = (p1); \
40693   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40694   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40695   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40696   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40697   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40698 
40699 #define __arm_vshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40700   __typeof(p1) __p1 = (p1); \
40701   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40702   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40703   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40704   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40705   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40706 
40707 #define __arm_vshlltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40708   __typeof(p1) __p1 = (p1); \
40709   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40710   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshlltq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40711   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshlltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40712   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshlltq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40713   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshlltq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
40714 
40715 #define __arm_vrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40716   __typeof(p1) __p1 = (p1); \
40717   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40718   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40719   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40720   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40721   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40722 
40723 #define __arm_vqshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40724   __typeof(p1) __p1 = (p1); \
40725   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40726   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40727   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
40728 
40729 #define __arm_vqshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40730   __typeof(p1) __p1 = (p1); \
40731   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40732   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40733   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
40734 
40735 #define __arm_vqrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40736   __typeof(p1) __p1 = (p1); \
40737   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40738   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40739   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40740   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40741   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40742 
40743 #define __arm_vqrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40744   __typeof(p1) __p1 = (p1); \
40745   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40746   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40747   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40748   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40749   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40750 
40751 #define __arm_vqrshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40752   __typeof(p1) __p1 = (p1); \
40753   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40754   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40755   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
40756 
40757 #define __arm_vqrshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40758   __typeof(p1) __p1 = (p1); \
40759   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40760   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40761   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
40762 
40763 #define __arm_vqshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40764   __typeof(p1) __p1 = (p1); \
40765   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40766   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40767   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40768   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40769   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40770 
40771 #define __arm_vqshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40772   __typeof(p1) __p1 = (p1); \
40773   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40774   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40775   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40776   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40777   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40778 
40779 #define __arm_vrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40780   __typeof(p1) __p1 = (p1); \
40781   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40782   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40783   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40784   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40785   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40786 
40787 #define __arm_vmlaldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40788   __typeof(p1) __p1 = (p1); \
40789   __typeof(p2) __p2 = (p2); \
40790   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40791   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40792   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40793   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40794   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40795 
40796 #define __arm_vmlaldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40797   __typeof(p1) __p1 = (p1); \
40798   __typeof(p2) __p2 = (p2); \
40799   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40800   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40801   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40802 
40803 #define __arm_vmlsldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40804   __typeof(p1) __p1 = (p1); \
40805   __typeof(p2) __p2 = (p2); \
40806   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40807   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40808   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40809 
40810 #define __arm_vmlsldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40811   __typeof(p1) __p1 = (p1); \
40812   __typeof(p2) __p2 = (p2); \
40813   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40814   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40815   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40816 
40817 #define __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p_s32(p0,p1,p2,p3)
40818 
40819 #define __arm_vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p_s32(p0,p1,p2,p3)
40820 
40821 #define __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p_s32(p0,p1,p2,p3)
40822 
40823 #define __arm_vqdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40824   __typeof(p1) __p1 = (p1); \
40825   __typeof(p2) __p2 = (p2); \
40826   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40827   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40828   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40829   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40830 
40831 #define __arm_vqdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40832   __typeof(p1) __p1 = (p1); \
40833   __typeof(p2) __p2 = (p2); \
40834   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40835   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40836   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40837   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40838 
40839 #define __arm_vqdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40840   __typeof(p1) __p1 = (p1); \
40841   __typeof(p2) __p2 = (p2); \
40842   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40843   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40844   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40845   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40846 
40847 #define __arm_vqdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40848   __typeof(p1) __p1 = (p1); \
40849   __typeof(p2) __p2 = (p2); \
40850   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40851   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40852   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40853   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40854 
40855 #define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40856   __typeof(p1) __p1 = (p1); \
40857   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40858   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
40859   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
40860   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
40861 
40862 #define __arm_vmvnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40863   __typeof(p1) __p1 = (p1); \
40864   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40865   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmvnq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
40866   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmvnq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
40867   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmvnq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40868   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmvnq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
40869   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmvnq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40870   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmvnq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
40871   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \
40872   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1(__p1, int) , p2), \
40873   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \
40874   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1(__p1, int) , p2));})
40875 
40876 #define __arm_vorrq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40877   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40878   int (*)[__ARM_mve_type_int16x8_t]: __arm_vorrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
40879   int (*)[__ARM_mve_type_int32x4_t]: __arm_vorrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
40880   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
40881   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
40882 
40883 #define __arm_vqshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40884   __typeof(p1) __p1 = (p1); \
40885   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40886   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
40887   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
40888 
40889 #define __arm_vqshluq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40890   __typeof(p1) __p1 = (p1); \
40891   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40892   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshluq_m_n_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40893   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshluq_m_n_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40894   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshluq_m_n_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
40895 
40896 #define __arm_vshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40897   __typeof(p1) __p1 = (p1); \
40898   __typeof(p2) __p2 = (p2); \
40899   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40900   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40901   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40902   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40903   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40904   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40905   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40906 
40907 #define __arm_vshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40908   __typeof(p1) __p1 = (p1); \
40909   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40910   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t),  p2, p3), \
40911   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t),  p2, p3), \
40912   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t),  p2, p3), \
40913   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),  p2, p3), \
40914   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),  p2, p3), \
40915   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),  p2, p3));})
40916 
40917 #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40918   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40919   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
40920   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
40921   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
40922   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
40923   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
40924   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
40925 
40926 #define __arm_vsriq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40927   __typeof(p1) __p1 = (p1); \
40928   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40929   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40930   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40931   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40932   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40933   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40934   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40935 
40936 #define __arm_vhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40937   __typeof(p1) __p1 = (p1); \
40938   __typeof(p2) __p2 = (p2); \
40939   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40940   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40941   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40942   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40943   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40944   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40945   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
40946   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40947   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40948   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40949   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40950   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40951   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40952 
40953 #define __arm_vhcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40954   __typeof(p1) __p1 = (p1); \
40955   __typeof(p2) __p2 = (p2); \
40956   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40957   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40958   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40959   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40960 
40961 #define __arm_vhcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40962   __typeof(p1) __p1 = (p1); \
40963   __typeof(p2) __p2 = (p2); \
40964   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40965   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40966   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40967   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40968 
40969 #define __arm_vhsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40970   __typeof(p1) __p1 = (p1); \
40971   __typeof(p2) __p2 = (p2); \
40972   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40973   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40974   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40975   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40976   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40977   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40978   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
40979   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40980   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40981   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40982   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40983   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40984   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
40985 
40986 #define __arm_vmaxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40987   __typeof(p1) __p1 = (p1); \
40988   __typeof(p2) __p2 = (p2); \
40989   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40990   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40991   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40992   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40993   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40994   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40995   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40996 
40997 #define __arm_vminq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40998   __typeof(p1) __p1 = (p1); \
40999   __typeof(p2) __p2 = (p2); \
41000   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41001   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41002   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41003   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41004   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41005   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41006   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41007 
41008 #define __arm_vmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41009   __typeof(p1) __p1 = (p1); \
41010   __typeof(p2) __p2 = (p2); \
41011   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41012   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41013   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41014   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41015   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
41016   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
41017   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
41018 
41019 #define __arm_vmlasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41020   __typeof(p1) __p1 = (p1); \
41021   __typeof(p2) __p2 = (p2); \
41022   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41023   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41024   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41025   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41026   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
41027   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
41028   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
41029 
41030 #define __arm_vmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41031   __typeof(p1) __p1 = (p1); \
41032   __typeof(p2) __p2 = (p2); \
41033   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41034   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41035   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41036   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41037   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41038   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41039   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41040 
41041 #define __arm_vmullbq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41042   __typeof(p1) __p1 = (p1); \
41043   __typeof(p2) __p2 = (p2); \
41044   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41045   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41046   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41047   int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41048   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41049   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41050   int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41051 
41052 #define __arm_vmulltq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41053   __typeof(p1) __p1 = (p1); \
41054   __typeof(p2) __p2 = (p2); \
41055   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41056   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41057   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41058   int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41059   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41060   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41061   int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41062 
41063 #define __arm_vmulltq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41064   __typeof(p1) __p1 = (p1); \
41065   __typeof(p2) __p2 = (p2); \
41066   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41067   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41068   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
41069 
41070 #define __arm_vqaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41071   __typeof(p1) __p1 = (p1); \
41072   __typeof(p2) __p2 = (p2); \
41073   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41074   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41075   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41076   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41077   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
41078   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
41079   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
41080   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41081   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41082   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41083   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41084   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41085   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41086 
41087 #define __arm_vqdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41088   __typeof(p1) __p1 = (p1); \
41089   __typeof(p2) __p2 = (p2); \
41090   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41091   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41092   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41093   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
41094 
41095 #define __arm_vqdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41096   __typeof(p1) __p1 = (p1); \
41097   __typeof(p2) __p2 = (p2); \
41098   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41099   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41100   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41101   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41102   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41103   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41104   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41105 
41106 #define __arm_vqdmullbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41107   __typeof(p1) __p1 = (p1); \
41108   __typeof(p2) __p2 = (p2); \
41109   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41110   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41111   int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41112   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41113   int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
41114 
41115 #define __arm_vqdmulltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41116   __typeof(p1) __p1 = (p1); \
41117   __typeof(p2) __p2 = (p2); \
41118   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41119   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41120   int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41121   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41122   int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41123 
41124 #define __arm_vqrdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41125   __typeof(p1) __p1 = (p1); \
41126   __typeof(p2) __p2 = (p2); \
41127   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41128   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41129   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41130   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41131 
41132 #define __arm_vqrdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41133   __typeof(p1) __p1 = (p1); \
41134   __typeof(p2) __p2 = (p2); \
41135   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41136   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41137   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41138   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41139 
41140 #define __arm_vmlsdavaxq_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41141   __typeof(p2) __p2 = (p2); \
41142   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41143   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_p_s8 (p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41144   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_p_s16 (p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41145   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_p_s32 (p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41146 
41147 #define __arm_vmlsdavaq(p0,p1,p2) ({  __typeof(p1) __p1 = (p1); \
41148   __typeof(p2) __p2 = (p2); \
41149   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41150   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_s8(p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41151   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_s16(p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41152   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_s32(p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41153 
41154 #define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
41155   __typeof(p1) __p1 = (p1); \
41156   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41157   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_s8(p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41158   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_s16(p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41159   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_s32(p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41160 
41161 #define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41162   __typeof(p1) __p1 = (p1); \
41163   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41164   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41165   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41166   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41167 
41168 #define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41169   __typeof(p1) __p1 = (p1); \
41170   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41171   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41172   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41173   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41174 
41175 #define __arm_vmlsdavaq_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41176   __typeof(p2) __p2 = (p2); \
41177   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41178   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_p_s8(p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41179   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_p_s16(p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41180   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_p_s32(p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41181 
41182 #define __arm_vmladavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41183   __typeof(p1) __p1 = (p1); \
41184   __typeof(p2) __p2 = (p2); \
41185   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41186   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41187   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41188   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41189 
41190 #define __arm_vmullbq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41191   __typeof(p1) __p1 = (p1); \
41192   __typeof(p2) __p2 = (p2); \
41193   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41194   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41195   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
41196 
41197 #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
41198   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
41199   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8(__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
41200   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16(__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
41201   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32(__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
41202   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8(__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
41203   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16(__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
41204   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32(__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
41205 
41206 #define __arm_vidupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41207  __typeof(p1) __p1 = (p1); \
41208   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41209  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \
41210  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \
41211  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \
41212  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
41213  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
41214  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
41215 
41216 #define __arm_vddupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41217  __typeof(p1) __p1 = (p1); \
41218   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41219  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \
41220  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \
41221  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \
41222  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
41223  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
41224  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
41225 
41226 #define __arm_vidupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41227   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41228   int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u16 ((uint32_t) __p0, p1), \
41229   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41230 
41231 #define __arm_vidupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41232   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41233   int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u32 ((uint32_t) __p0, p1), \
41234   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41235 
41236 #define __arm_vidupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41237   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41238   int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u8 ((uint32_t) __p0, p1), \
41239   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41240 
41241 #define __arm_vddupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41242   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41243   int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u16 ((uint32_t) __p0, p1), \
41244   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41245 
41246 #define __arm_vddupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41247   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41248   int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u32 ((uint32_t) __p0, p1), \
41249   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41250 
41251 #define __arm_vddupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41252   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41253   int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u8 ((uint32_t) __p0, p1), \
41254   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41255 
41256 #define __arm_viwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
41257   __typeof(p1) __p1 = (p1); \
41258   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41259   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41260   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41261   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41262   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
41263   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
41264   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
41265 
41266 #define __arm_viwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41267   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41268   int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, (const int) p2), \
41269   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, (const int) p2));})
41270 
41271 #define __arm_viwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41272   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41273   int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41274   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41275 
41276 #define __arm_viwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41277   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41278   int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41279   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41280 
41281 #define __arm_vdwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
41282   __typeof(p1) __p1 = (p1); \
41283   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41284   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41285   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41286   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41287   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
41288   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
41289   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
41290 
41291 #define __arm_vdwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41292   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41293   int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41294   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41295 
41296 #define __arm_vdwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41297   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41298   int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41299   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41300 
41301 #define __arm_vdwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41302   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41303   int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41304   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41305 
41306 #define __arm_vshlcq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41307   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41308   int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2, p3), \
41309   int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2, p3), \
41310   int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2, p3), \
41311   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2, p3), \
41312   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2, p3), \
41313   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2, p3));})
41314 
41315 #define __arm_vabavq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41316   __typeof(p1) __p1 = (p1); \
41317   __typeof(p2) __p2 = (p2); \
41318   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41319   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41320   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41321   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41322   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
41323   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
41324   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41325 
41326 #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41327   __typeof(p1) __p1 = (p1); \
41328   __typeof(p2) __p2 = (p2); \
41329   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41330   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41331   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41332   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41333   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41334   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41335   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41336 
41337 #define __arm_vaddlvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41338   __typeof(p1) __p1 = (p1); \
41339   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41340   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41341   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41342 
41343 #define __arm_vaddlvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41344   __typeof(p1) __p1 = (p1); \
41345   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41346   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41347   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41348 
41349 #define __arm_vaddlvq(p0) ({ __typeof(p0) __p0 = (p0); \
41350   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41351   int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
41352   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
41353 
41354 #define __arm_vaddlvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41355   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41356   int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
41357   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
41358 
41359 #define __arm_vaddvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41360   __typeof(p1) __p1 = (p1); \
41361   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41362   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41363   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41364   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41365   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41366   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41367   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41368 
41369 #define __arm_vaddvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41370   __typeof(p1) __p1 = (p1); \
41371   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41372   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41373   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41374   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41375   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41376   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41377   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41378 
41379 #define __arm_vaddvq(p0) ({ __typeof(p0) __p0 = (p0); \
41380   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41381   int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
41382   int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
41383   int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
41384   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
41385   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
41386   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
41387 
41388 #define __arm_vaddvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41389   _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41390   int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
41391   int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
41392   int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
41393   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
41394   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
41395   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
41396 
41397 #define __arm_vcmpcsq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41398   __typeof(p1) __p1 = (p1); \
41399   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41400   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41401   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41402   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
41403   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
41404   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
41405   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
41406 
41407 #define __arm_vcmpcsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41408   __typeof(p1) __p1 = (p1); \
41409   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41410   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41411   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41412   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
41413   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
41414   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
41415   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));})
41416 
41417 #define __arm_vcmphiq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41418   __typeof(p1) __p1 = (p1); \
41419   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41420   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41421   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41422   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
41423   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
41424   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
41425   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
41426 
41427 #define __arm_vcmphiq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41428   __typeof(p1) __p1 = (p1); \
41429   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41430   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
41431   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
41432   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
41433   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41434   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41435   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41436 
41437 #define __arm_vmaxavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41438   __typeof(p1) __p1 = (p1); \
41439   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41440   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
41441   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
41442   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)));})
41443 
41444 #define __arm_vmaxavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41445   __typeof(p1) __p1 = (p1); \
41446   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41447   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
41448   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
41449   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2));})
41450 
41451 #define __arm_vmaxvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41452   __typeof(p1) __p1 = (p1); \
41453   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41454   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
41455   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
41456   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)), \
41457   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t)), \
41458   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t)), \
41459   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_u32 (__p0,__ARM_mve_coerce(__p1, uint32x4_t)));})
41460 
41461 #define __arm_vmaxvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41462   __typeof(p1) __p1 = (p1); \
41463   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41464   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
41465   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
41466   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2), \
41467   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_p_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41468   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_p_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41469   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_p_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41470 
41471 #define __arm_vminavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41472   __typeof(p1) __p1 = (p1); \
41473   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41474   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
41475   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
41476   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)));})
41477 
41478 #define __arm_vminavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41479   __typeof(p1) __p1 = (p1); \
41480   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41481   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminavq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
41482   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
41483   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2));})
41484 
41485 #define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41486   __typeof(p2) __p2 = (p2); \
41487   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41488   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41489   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41490   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41491   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41492   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41493   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41494 
41495 #define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41496   __typeof(p2) __p2 = (p2); \
41497   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41498   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41499   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41500   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41501   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41502   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41503   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41504 
41505 #define __arm_vminvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41506   __typeof(p1) __p1 = (p1); \
41507   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41508   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminvq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
41509   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminvq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
41510   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminvq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)), \
41511   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vminvq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t)), \
41512   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t)), \
41513   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t)));})
41514 
41515 #define __arm_vminvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41516   __typeof(p1) __p1 = (p1); \
41517   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41518   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminvq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
41519   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminvq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
41520   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminvq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2), \
41521   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vminvq_p_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41522   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_p_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41523   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_p_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41524 
41525 #define __arm_vmladavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41526   __typeof(p1) __p1 = (p1); \
41527   __typeof(p2) __p2 = (p2); \
41528   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41529   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41530   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41531   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41532   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
41533   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
41534   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41535 
41536 #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41537   __typeof(p1) __p1 = (p1); \
41538   __typeof(p2) __p2 = (p2); \
41539   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41540   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41541   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41542   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41543   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41544   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41545   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41546 
41547 #define __arm_vmladavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41548   __typeof(p1) __p1 = (p1); \
41549   __typeof(p2) __p2 = (p2); \
41550   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41551   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41552   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41553   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41554   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaxq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
41555   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
41556   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41557 
41558 #define __arm_vmladavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41559   __typeof(p1) __p1 = (p1); \
41560   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41561   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41562   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41563   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41564   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41565   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41566   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41567 
41568 #define __arm_vmladavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41569   __typeof(p1) __p1 = (p1); \
41570   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41571   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41572   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41573   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41574   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41575   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41576   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41577 
41578 #define __arm_vmladavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41579   __typeof(p1) __p1 = (p1); \
41580   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41581   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41582   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41583   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41584   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41585   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41586   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41587 
41588 #define __arm_vmladavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41589   __typeof(p1) __p1 = (p1); \
41590   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41591   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41592   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41593   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41594 
41595 #define __arm_vmlaldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41596   __typeof(p1) __p1 = (p1); \
41597   __typeof(p2) __p2 = (p2); \
41598   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41599   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41600   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41601   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
41602   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41603 
41604 #define __arm_vmlaldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41605   __typeof(p1) __p1 = (p1); \
41606   __typeof(p2) __p2 = (p2); \
41607   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41608   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41609   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41610 
41611 #define __arm_vmlaldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41612   __typeof(p1) __p1 = (p1); \
41613   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41614   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41615   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41616   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41617   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41618 
41619 #define __arm_vmlaldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41620   __typeof(p1) __p1 = (p1); \
41621   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41622   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41623   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41624   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41625   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41626 
41627 #define __arm_vmlaldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41628   __typeof(p1) __p1 = (p1); \
41629   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41630   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41631   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41632 
41633 #define __arm_vmlsdavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41634   __typeof(p1) __p1 = (p1); \
41635   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41636   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41637   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41638   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
41639 
41640 #define __arm_vmlsdavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41641   __typeof(p1) __p1 = (p1); \
41642   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41643   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41644   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41645   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
41646 
41647 #define __arm_vmlsldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41648   __typeof(p1) __p1 = (p1); \
41649   __typeof(p2) __p2 = (p2); \
41650   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41651   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41652   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41653 
41654 #define __arm_vmlsldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41655   __typeof(p1) __p1 = (p1); \
41656   __typeof(p2) __p2 = (p2); \
41657   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41658   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41659   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41660 
41661 #define __arm_vmlsldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41662   __typeof(p1) __p1 = (p1); \
41663   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41664   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41665   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
41666 
41667 #define __arm_vmlsldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41668   __typeof(p1) __p1 = (p1); \
41669   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41670   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41671   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41672 
41673 #define __arm_vmlsldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41674   __typeof(p1) __p1 = (p1); \
41675   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41676   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41677   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
41678 
41679 #define __arm_vmlsldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41680   __typeof(p1) __p1 = (p1); \
41681   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41682   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41683   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41684 
41685 #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
41686   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
41687   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
41688   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
41689   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
41690   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
41691 
41692 #define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
41693   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
41694   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
41695   int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
41696   int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
41697   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
41698 
41699 #define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41700   __typeof(p2) __p2 = (p2); \
41701   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41702   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41703   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41704   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41705   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41706   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41707   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41708 
41709 #define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41710   __typeof(p2) __p2 = (p2); \
41711   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41712   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41713   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41714   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41715   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41716   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41717   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41718 
41719 #define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41720   __typeof(p2) __p2 = (p2); \
41721   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41722   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41723   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
41724 
41725 #define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41726   __typeof(p2) __p2 = (p2); \
41727   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41728   int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41729   int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41730   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41731   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41732   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41733   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41734 
41735 #define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41736   __typeof(p2) __p2 = (p2); \
41737   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41738   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41739   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
41740 
41741 #define __arm_vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq_s32(p0,p1,p2)
41742 
41743 #define __arm_vrmlaldavhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41744   __typeof(p1) __p1 = (p1); \
41745   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41746   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41747   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41748 
41749 #define __arm_vrmlaldavhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41750   __typeof(p1) __p1 = (p1); \
41751   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41752   int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41753   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41754 
41755 #define __arm_vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq_s32(p0,p1)
41756 
41757 #define __arm_vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p_s32(p0,p1,p2)
41758 
41759 #define __arm_vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq_s32(p0,p1,p2)
41760 
41761 #define __arm_vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq_s32(p0,p1,p2)
41762 
41763 #define __arm_vrmlsldavhq(p0,p1) __arm_vrmlsldavhq_s32(p0,p1)
41764 
41765 #define __arm_vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p_s32(p0,p1,p2)
41766 
41767 #define __arm_vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq_s32(p0,p1)
41768 
41769 #define __arm_vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p_s32(p0,p1,p2)
41770 
41771 #define __arm_vstrbq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
41772   _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
41773   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
41774   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
41775   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
41776   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
41777   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
41778   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
41779 
41780 #define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41781   __typeof(p1) __p1 = (p1); \
41782   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41783   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41784   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41785   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41786   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41787   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41788   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41789 
41790 #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
41791   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
41792   int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \
41793   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
41794 
41795 #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
41796   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
41797   int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
41798   int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
41799 
41800 #define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41801   __typeof(p1) __p1 = (p1); \
41802   __typeof(p2) __p2 = (p2); \
41803   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41804   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41805   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41806 
41807 #define __arm_vrmlaldavhaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41808   __typeof(p1) __p1 = (p1); \
41809   __typeof(p2) __p2 = (p2); \
41810   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41811   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41812   int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41813 
41814 #define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41815   __typeof(p1) __p1 = (p1); \
41816   __typeof(p2) __p2 = (p2); \
41817   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41818   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41819   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41820   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41821   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
41822   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
41823   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41824 
41825 #define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41826   __typeof(p1) __p1 = (p1); \
41827   __typeof(p2) __p2 = (p2); \
41828   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41829   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41830   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41831   int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41832   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41833   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41834   int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41835 
41836 #define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41837   __typeof(p2) __p2 = (p2); \
41838   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
41839   int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
41840   int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
41841 
41842 #define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41843   __typeof(p2) __p2 = (p2); \
41844   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
41845   int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \
41846   int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
41847 
41848 #define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41849   __typeof(p2) __p2 = (p2); \
41850   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
41851   int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
41852   int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
41853 
41854 #define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41855   __typeof(p2) __p2 = (p2); \
41856   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
41857   int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \
41858   int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
41859 
41860 #endif /* __cplusplus  */
41861 #endif /* __ARM_FEATURE_MVE  */
41862 #endif /* _GCC_ARM_MVE_H.  */
41863