xref: /netbsd-src/external/gpl3/binutils/dist/include/opcode/aarch64.h (revision cb63e24e8d6aae7ddac1859a9015f48b1d8bd90e)
1 /* AArch64 assembler/disassembler support.
2 
3    Copyright (C) 2009-2024 Free Software Foundation, Inc.
4    Contributed by ARM Ltd.
5 
6    This file is part of GNU Binutils.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the license, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program; see the file COPYING3. If not,
20    see <http://www.gnu.org/licenses/>.  */
21 
22 #ifndef OPCODE_AARCH64_H
23 #define OPCODE_AARCH64_H
24 
25 #include "bfd.h"
26 #include <stdint.h>
27 #include <assert.h>
28 #include <stdlib.h>
29 
30 #include "dis-asm.h"
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35 
36 /* The offset for pc-relative addressing is currently defined to be 0.  */
37 #define AARCH64_PCREL_OFFSET		0
38 
39 typedef uint32_t aarch64_insn;
40 
41 /* An enum containing all known CPU features.  The values act as bit positions
42    into aarch64_feature_set.  */
43 enum aarch64_feature_bit {
44   /* All processors.  */
45   AARCH64_FEATURE_V8,
46   /* ARMv8.6 processors.  */
47   AARCH64_FEATURE_V8_6A,
48   /* Bfloat16 insns.  */
49   AARCH64_FEATURE_BFLOAT16,
50   /* Armv8-A processors.  */
51   AARCH64_FEATURE_V8A,
52   /* SVE2 instructions.  */
53   AARCH64_FEATURE_SVE2,
54   /* ARMv8.2 processors.  */
55   AARCH64_FEATURE_V8_2A,
56   /* ARMv8.3 processors.  */
57   AARCH64_FEATURE_V8_3A,
58   AARCH64_FEATURE_SVE2_AES,
59   AARCH64_FEATURE_SVE2_BITPERM,
60   AARCH64_FEATURE_SVE2_SM4,
61   AARCH64_FEATURE_SVE2_SHA3,
62   /* ARMv8.4 processors.  */
63   AARCH64_FEATURE_V8_4A,
64   /* Armv8-R processors.  */
65   AARCH64_FEATURE_V8R,
66   /* Armv8.7 processors.  */
67   AARCH64_FEATURE_V8_7A,
68   /* Scalable Matrix Extension.  */
69   AARCH64_FEATURE_SME,
70   /* Atomic 64-byte load/store.  */
71   AARCH64_FEATURE_LS64,
72   /* v8.3 Pointer Authentication.  */
73   AARCH64_FEATURE_PAC,
74   /* FP instructions.  */
75   AARCH64_FEATURE_FP,
76   /* SIMD instructions.  */
77   AARCH64_FEATURE_SIMD,
78   /* CRC instructions.  */
79   AARCH64_FEATURE_CRC,
80   /* LSE instructions.  */
81   AARCH64_FEATURE_LSE,
82   /* PAN instructions.  */
83   AARCH64_FEATURE_PAN,
84   /* LOR instructions.  */
85   AARCH64_FEATURE_LOR,
86   /* v8.1 SIMD instructions.  */
87   AARCH64_FEATURE_RDMA,
88   /* v8.1 features.  */
89   AARCH64_FEATURE_V8_1A,
90   /* v8.2 FP16 instructions.  */
91   AARCH64_FEATURE_F16,
92   /* RAS Extensions.  */
93   AARCH64_FEATURE_RAS,
94   /* Statistical Profiling.  */
95   AARCH64_FEATURE_PROFILE,
96   /* SVE instructions.  */
97   AARCH64_FEATURE_SVE,
98   /* RCPC instructions.  */
99   AARCH64_FEATURE_RCPC,
100   /* RCPC2 instructions.  */
101   AARCH64_FEATURE_RCPC2,
102   /* Complex # instructions.  */
103   AARCH64_FEATURE_COMPNUM,
104   /* JavaScript conversion instructions.  */
105   AARCH64_FEATURE_JSCVT,
106   /* Dot Product instructions.  */
107   AARCH64_FEATURE_DOTPROD,
108   /* SM3 & SM4 instructions.  */
109   AARCH64_FEATURE_SM4,
110   /* SHA2 instructions.  */
111   AARCH64_FEATURE_SHA2,
112   /* SHA3 instructions.  */
113   AARCH64_FEATURE_SHA3,
114   /* AES instructions.  */
115   AARCH64_FEATURE_AES,
116   /* v8.2 FP16FML ins.  */
117   AARCH64_FEATURE_F16_FML,
118   /* ARMv8.5 processors.  */
119   AARCH64_FEATURE_V8_5A,
120   /* v8.5 Flag Manipulation version 2.  */
121   AARCH64_FEATURE_FLAGMANIP,
122   /* FRINT[32,64][Z,X] insns.  */
123   AARCH64_FEATURE_FRINTTS,
124   /* SB instruction.  */
125   AARCH64_FEATURE_SB,
126   /* Execution and Data Prediction Restriction instructions.  */
127   AARCH64_FEATURE_PREDRES,
128   /* DC CVADP.  */
129   AARCH64_FEATURE_CVADP,
130   /* Random Number instructions.  */
131   AARCH64_FEATURE_RNG,
132   /* SCXTNUM_ELx.  */
133   AARCH64_FEATURE_SCXTNUM,
134   /* ID_PFR2 instructions.  */
135   AARCH64_FEATURE_ID_PFR2,
136   /* SSBS mechanism enabled.  */
137   AARCH64_FEATURE_SSBS,
138   /* Memory Tagging Extension.  */
139   AARCH64_FEATURE_MEMTAG,
140   /* Transactional Memory Extension.  */
141   AARCH64_FEATURE_TME,
142   /* XS memory attribute.  */
143   AARCH64_FEATURE_XS,
144   /* WFx instructions with timeout.  */
145   AARCH64_FEATURE_WFXT,
146   /* Standardization of memory operations.  */
147   AARCH64_FEATURE_MOPS,
148   /* Hinted conditional branches.  */
149   AARCH64_FEATURE_HBC,
150   /* Matrix Multiply instructions.  */
151   AARCH64_FEATURE_I8MM,
152   AARCH64_FEATURE_F32MM,
153   AARCH64_FEATURE_F64MM,
154   /* v8.4 Flag Manipulation.  */
155   AARCH64_FEATURE_FLAGM,
156   /* Armv9.0-A processors.  */
157   AARCH64_FEATURE_V9A,
158   /* SME F64F64.  */
159   AARCH64_FEATURE_SME_F64F64,
160   /* SME I16I64.  */
161   AARCH64_FEATURE_SME_I16I64,
162   /* Armv8.8 processors.  */
163   AARCH64_FEATURE_V8_8A,
164   /* Common Short Sequence Compression instructions.  */
165   AARCH64_FEATURE_CSSC,
166   /* Armv8.9-A processors.  */
167   AARCH64_FEATURE_V8_9A,
168   /* Check Feature Status Extension.  */
169   AARCH64_FEATURE_CHK,
170   /* Guarded Control Stack.  */
171   AARCH64_FEATURE_GCS,
172   /* SPE Call Return branch records.  */
173   AARCH64_FEATURE_SPE_CRR,
174   /* SPE Filter by data source.  */
175   AARCH64_FEATURE_SPE_FDS,
176   /* Additional SPE events.  */
177   AARCH64_FEATURE_SPEv1p4,
178   /* SME2.  */
179   AARCH64_FEATURE_SME2,
180   /* Translation Hardening Extension.  */
181   AARCH64_FEATURE_THE,
182   /* LSE128.  */
183   AARCH64_FEATURE_LSE128,
184   /* ARMv8.9-A RAS Extensions.  */
185   AARCH64_FEATURE_RASv2,
186   /* System Control Register2.  */
187   AARCH64_FEATURE_SCTLR2,
188   /* Fine Grained Traps.  */
189   AARCH64_FEATURE_FGT2,
190   /* Physical Fault Address.  */
191   AARCH64_FEATURE_PFAR,
192   /* Address Translate Stage 1.  */
193   AARCH64_FEATURE_ATS1A,
194   /* Memory Attribute Index Enhancement.  */
195   AARCH64_FEATURE_AIE,
196   /* Stage 1 Permission Indirection Extension.  */
197   AARCH64_FEATURE_S1PIE,
198   /* Stage 2 Permission Indirection Extension.  */
199   AARCH64_FEATURE_S2PIE,
200   /* Stage 1 Permission Overlay Extension.  */
201   AARCH64_FEATURE_S1POE,
202   /* Stage 2 Permission Overlay Extension.  */
203   AARCH64_FEATURE_S2POE,
204   /* Extension to Translation Control Registers.  */
205   AARCH64_FEATURE_TCR2,
206   /* Speculation Prediction Restriction instructions.  */
207   AARCH64_FEATURE_PREDRES2,
208   /* Instrumentation Extension.  */
209   AARCH64_FEATURE_ITE,
210   /* 128-bit page table descriptor, system registers
211      and isntructions.  */
212   AARCH64_FEATURE_D128,
213   /* Armv8.9-A/Armv9.4-A architecture Debug extension.  */
214   AARCH64_FEATURE_DEBUGv8p9,
215   /* Performance Monitors Extension.  */
216   AARCH64_FEATURE_PMUv3p9,
217   /* Performance Monitors Snapshots Extension.  */
218   AARCH64_FEATURE_PMUv3_SS,
219   /* Performance Monitors Instruction Counter Extension.  */
220   AARCH64_FEATURE_PMUv3_ICNTR,
221   /* Performance Monitors Synchronous-Exception-Based Event Extension.  */
222   AARCH64_FEATURE_SEBEP,
223   /* SVE2.1 and SME2.1 non-widening BFloat16 instructions.  */
224   AARCH64_FEATURE_B16B16,
225   /* SME2.1 instructions.  */
226   AARCH64_FEATURE_SME2p1,
227   /* SVE2.1 instructions.  */
228   AARCH64_FEATURE_SVE2p1,
229   /* RCPC3 instructions.  */
230   AARCH64_FEATURE_RCPC3,
231   AARCH64_NUM_FEATURES
232 };
233 
234 /* These macros take an initial argument X that gives the index into
235    an aarch64_feature_set.  The macros then return the bitmask for
236    that array index.  */
237 
238 /* A mask in which feature bit BIT is set and all other bits are clear.  */
239 #define AARCH64_UINT64_BIT(X, BIT) \
240   ((X) == (BIT) / 64 ? 1ULL << (BIT) % 64 : 0)
241 
242 /* A mask that includes only AARCH64_FEATURE_<NAME>.  */
243 #define AARCH64_FEATBIT(X, NAME) \
244   AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME)
245 
246 /* A mask of the features that are enabled by each architecture version,
247    excluding those that are inherited from other architecture versions.  */
248 #define AARCH64_ARCH_V8A_FEATURES(X)	(AARCH64_FEATBIT (X, V8A)	\
249 					 | AARCH64_FEATBIT (X, FP)	\
250 					 | AARCH64_FEATBIT (X, RAS)	\
251 					 | AARCH64_FEATBIT (X, SIMD)	\
252 					 | AARCH64_FEATBIT (X, CHK))
253 #define AARCH64_ARCH_V8_1A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_1A)	\
254 					 | AARCH64_FEATBIT (X, CRC)	\
255 					 | AARCH64_FEATBIT (X, LSE)	\
256 					 | AARCH64_FEATBIT (X, PAN)	\
257 					 | AARCH64_FEATBIT (X, LOR)	\
258 					 | AARCH64_FEATBIT (X, RDMA))
259 #define AARCH64_ARCH_V8_2A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_2A))
260 #define AARCH64_ARCH_V8_3A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_3A)	\
261 					 | AARCH64_FEATBIT (X, PAC)	\
262 					 | AARCH64_FEATBIT (X, RCPC)	\
263 					 | AARCH64_FEATBIT (X, COMPNUM) \
264 					 | AARCH64_FEATBIT (X, JSCVT))
265 #define AARCH64_ARCH_V8_4A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_4A)	\
266 					 | AARCH64_FEATBIT (X, RCPC2)	\
267 					 | AARCH64_FEATBIT (X, DOTPROD)	\
268 					 | AARCH64_FEATBIT (X, FLAGM)	\
269 					 | AARCH64_FEATBIT (X, F16_FML))
270 #define AARCH64_ARCH_V8_5A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_5A)	\
271 					 | AARCH64_FEATBIT (X, FLAGMANIP) \
272 					 | AARCH64_FEATBIT (X, FRINTTS)	\
273 					 | AARCH64_FEATBIT (X, SB)	\
274 					 | AARCH64_FEATBIT (X, PREDRES)	\
275 					 | AARCH64_FEATBIT (X, CVADP)	\
276 					 | AARCH64_FEATBIT (X, SCXTNUM)	\
277 					 | AARCH64_FEATBIT (X, ID_PFR2)	\
278 					 | AARCH64_FEATBIT (X, SSBS))
279 #define AARCH64_ARCH_V8_6A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_6A)	\
280 					 | AARCH64_FEATBIT (X, BFLOAT16) \
281 					 | AARCH64_FEATBIT (X, I8MM))
282 #define AARCH64_ARCH_V8_7A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_7A)	\
283 					 | AARCH64_FEATBIT (X, XS)      \
284 					 | AARCH64_FEATBIT (X, WFXT)    \
285 					 | AARCH64_FEATBIT (X, LS64))
286 #define AARCH64_ARCH_V8_8A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_8A)	\
287 					 | AARCH64_FEATBIT (X, MOPS)	\
288 					 | AARCH64_FEATBIT (X, HBC))
289 #define AARCH64_ARCH_V8_9A_FEATURES(X)	(AARCH64_FEATBIT (X, V8_9A)	\
290 					 | AARCH64_FEATBIT (X, SPEv1p4) \
291 					 | AARCH64_FEATBIT (X, SPE_CRR)	\
292 					 | AARCH64_FEATBIT (X, SPE_FDS) \
293 					 | AARCH64_FEATBIT (X, RASv2)	\
294 					 | AARCH64_FEATBIT (X, SCTLR2)	\
295 					 | AARCH64_FEATBIT (X, FGT2)	\
296 					 | AARCH64_FEATBIT (X, PFAR)	\
297 					 | AARCH64_FEATBIT (X, ATS1A)	\
298 					 | AARCH64_FEATBIT (X, AIE)	\
299 					 | AARCH64_FEATBIT (X, S1PIE)	\
300 					 | AARCH64_FEATBIT (X, S2PIE)	\
301 					 | AARCH64_FEATBIT (X, S1POE)	\
302 					 | AARCH64_FEATBIT (X, S2POE)	\
303 					 | AARCH64_FEATBIT (X, TCR2)	\
304 					 | AARCH64_FEATBIT (X, DEBUGv8p9) \
305 					 | AARCH64_FEATBIT (X, PMUv3p9)	\
306 					 | AARCH64_FEATBIT (X, PMUv3_SS) \
307 					 | AARCH64_FEATBIT (X, PMUv3_ICNTR) \
308 					 | AARCH64_FEATBIT (X, SEBEP) \
309 					 | AARCH64_FEATBIT (X, PREDRES2) \
310 					)
311 
312 #define AARCH64_ARCH_V9A_FEATURES(X)	(AARCH64_FEATBIT (X, V9A)	\
313 					 | AARCH64_FEATBIT (X, F16)	\
314 					 | AARCH64_FEATBIT (X, SVE)	\
315 					 | AARCH64_FEATBIT (X, SVE2))
316 #define AARCH64_ARCH_V9_1A_FEATURES(X)	AARCH64_ARCH_V8_6A_FEATURES (X)
317 #define AARCH64_ARCH_V9_2A_FEATURES(X)	AARCH64_ARCH_V8_7A_FEATURES (X)
318 #define AARCH64_ARCH_V9_3A_FEATURES(X)	AARCH64_ARCH_V8_8A_FEATURES (X)
319 #define AARCH64_ARCH_V9_4A_FEATURES(X)	AARCH64_ARCH_V8_9A_FEATURES (X)
320 
321 /* Architectures are the sum of the base and extensions.  */
322 #define AARCH64_ARCH_V8A(X)	(AARCH64_FEATBIT (X, V8) \
323 				 | AARCH64_ARCH_V8A_FEATURES (X))
324 #define AARCH64_ARCH_V8_1A(X)	(AARCH64_ARCH_V8A (X) \
325 				 | AARCH64_ARCH_V8_1A_FEATURES (X))
326 #define AARCH64_ARCH_V8_2A(X)	(AARCH64_ARCH_V8_1A (X)	\
327 				 | AARCH64_ARCH_V8_2A_FEATURES (X))
328 #define AARCH64_ARCH_V8_3A(X)	(AARCH64_ARCH_V8_2A (X)	\
329 				 | AARCH64_ARCH_V8_3A_FEATURES (X))
330 #define AARCH64_ARCH_V8_4A(X)	(AARCH64_ARCH_V8_3A (X)	\
331 				 | AARCH64_ARCH_V8_4A_FEATURES (X))
332 #define AARCH64_ARCH_V8_5A(X)	(AARCH64_ARCH_V8_4A (X)	\
333 				 | AARCH64_ARCH_V8_5A_FEATURES (X))
334 #define AARCH64_ARCH_V8_6A(X)	(AARCH64_ARCH_V8_5A (X)	\
335 				 | AARCH64_ARCH_V8_6A_FEATURES (X))
336 #define AARCH64_ARCH_V8_7A(X)	(AARCH64_ARCH_V8_6A (X)	\
337 				 | AARCH64_ARCH_V8_7A_FEATURES (X))
338 #define AARCH64_ARCH_V8_8A(X)	(AARCH64_ARCH_V8_7A (X)	\
339 				 | AARCH64_ARCH_V8_8A_FEATURES (X))
340 #define AARCH64_ARCH_V8_9A(X)	(AARCH64_ARCH_V8_8A (X)	\
341 				 | AARCH64_ARCH_V8_9A_FEATURES (X))
342 #define AARCH64_ARCH_V8R(X)	((AARCH64_ARCH_V8_4A (X)	\
343 				  | AARCH64_FEATBIT (X, V8R))	\
344 				 & ~AARCH64_FEATBIT (X, V8A)	\
345 				 & ~AARCH64_FEATBIT (X, LOR))
346 
347 #define AARCH64_ARCH_V9A(X)	(AARCH64_ARCH_V8_5A (X) \
348 				 | AARCH64_ARCH_V9A_FEATURES (X))
349 #define AARCH64_ARCH_V9_1A(X)	(AARCH64_ARCH_V9A (X) \
350 				 | AARCH64_ARCH_V9_1A_FEATURES (X))
351 #define AARCH64_ARCH_V9_2A(X)	(AARCH64_ARCH_V9_1A (X) \
352 				 | AARCH64_ARCH_V9_2A_FEATURES (X))
353 #define AARCH64_ARCH_V9_3A(X)	(AARCH64_ARCH_V9_2A (X) \
354 				 | AARCH64_ARCH_V9_3A_FEATURES (X))
355 #define AARCH64_ARCH_V9_4A(X)	(AARCH64_ARCH_V9_3A (X) \
356 				 | AARCH64_ARCH_V9_4A_FEATURES (X))
357 
358 #define AARCH64_ARCH_NONE(X)	0
359 
360 /* CPU-specific features.  */
361 typedef struct {
362   uint64_t flags[(AARCH64_NUM_FEATURES + 63) / 64];
363 } aarch64_feature_set;
364 
365 #define AARCH64_CPU_HAS_FEATURE(CPU,FEAT)	\
366   ((~(CPU).flags[0] & AARCH64_FEATBIT (0, FEAT)) == 0		\
367    && (~(CPU).flags[1] & AARCH64_FEATBIT (1, FEAT)) == 0)
368 
369 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT)	\
370   ((~(CPU).flags[0] & (FEAT).flags[0]) == 0	\
371    && (~(CPU).flags[1] & (FEAT).flags[1]) == 0)
372 
373 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT)	\
374   (((CPU).flags[0] & (FEAT).flags[0]) != 0	\
375    || ((CPU).flags[1] & (FEAT).flags[1]) != 0)
376 
377 #define AARCH64_SET_FEATURE(DEST, FEAT) \
378   ((DEST).flags[0] = FEAT (0),		\
379    (DEST).flags[1] = FEAT (1))
380 
381 #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT)		\
382   ((DEST).flags[0] = (SRC).flags[0] & ~AARCH64_FEATBIT (0, FEAT), \
383    (DEST).flags[1] = (SRC).flags[1] & ~AARCH64_FEATBIT (1, FEAT))
384 
385 #define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2)		\
386   do							\
387     {							\
388       (TARG).flags[0] = (F1).flags[0] | (F2).flags[0];	\
389       (TARG).flags[1] = (F1).flags[1] | (F2).flags[1];	\
390     }							\
391   while (0)
392 
393 #define AARCH64_CLEAR_FEATURES(TARG,F1,F2)		\
394   do							\
395     {							\
396       (TARG).flags[0] = (F1).flags[0] &~ (F2).flags[0];	\
397       (TARG).flags[1] = (F1).flags[1] &~ (F2).flags[1];	\
398     }							\
399   while (0)
400 
401 /* aarch64_feature_set initializers for no features and all features,
402    respectively.  */
403 #define AARCH64_NO_FEATURES { { 0, 0 } }
404 #define AARCH64_ALL_FEATURES { { -1, -1 } }
405 
406 /* An aarch64_feature_set initializer for a single feature,
407    AARCH64_FEATURE_<FEAT>.  */
408 #define AARCH64_FEATURE(FEAT) \
409   { { AARCH64_FEATBIT (0, FEAT), AARCH64_FEATBIT (1, FEAT) } }
410 
411 /* An aarch64_feature_set initializer for a specific architecture version,
412    including all the features that are enabled by default for that architecture
413    version.  */
414 #define AARCH64_ARCH_FEATURES(ARCH) \
415   { { AARCH64_ARCH_##ARCH (0), AARCH64_ARCH_##ARCH (1) } }
416 
417 /* Used by AARCH64_CPU_FEATURES.  */
418 #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \
419   (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X))
420 #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \
421   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2))
422 #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \
423   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__))
424 #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \
425   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__))
426 #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \
427   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__))
428 #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \
429   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__))
430 #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \
431   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__))
432 #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \
433   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__))
434 #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \
435   (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__))
436 
437 /* An aarch64_feature_set initializer for a CPU that implements architecture
438    version ARCH, and additionally provides the N features listed in "...".  */
439 #define AARCH64_CPU_FEATURES(ARCH, N, ...)			\
440   { { AARCH64_OR_FEATURES_##N (0, ARCH, __VA_ARGS__),		\
441       AARCH64_OR_FEATURES_##N (1, ARCH, __VA_ARGS__) } }
442 
443 /* An aarch64_feature_set initializer for the N features listed in "...".  */
444 #define AARCH64_FEATURES(N, ...) \
445   AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__)
446 
447 enum aarch64_operand_class
448 {
449   AARCH64_OPND_CLASS_NIL,
450   AARCH64_OPND_CLASS_INT_REG,
451   AARCH64_OPND_CLASS_MODIFIED_REG,
452   AARCH64_OPND_CLASS_FP_REG,
453   AARCH64_OPND_CLASS_SIMD_REG,
454   AARCH64_OPND_CLASS_SIMD_ELEMENT,
455   AARCH64_OPND_CLASS_SISD_REG,
456   AARCH64_OPND_CLASS_SIMD_REGLIST,
457   AARCH64_OPND_CLASS_SVE_REG,
458   AARCH64_OPND_CLASS_SVE_REGLIST,
459   AARCH64_OPND_CLASS_PRED_REG,
460   AARCH64_OPND_CLASS_ZA_ACCESS,
461   AARCH64_OPND_CLASS_ADDRESS,
462   AARCH64_OPND_CLASS_IMMEDIATE,
463   AARCH64_OPND_CLASS_SYSTEM,
464   AARCH64_OPND_CLASS_COND,
465 };
466 
467 /* Operand code that helps both parsing and coding.
468    Keep AARCH64_OPERANDS synced.  */
469 
470 enum aarch64_opnd
471 {
472   AARCH64_OPND_NIL,	/* no operand---MUST BE FIRST!*/
473 
474   AARCH64_OPND_Rd,	/* Integer register as destination.  */
475   AARCH64_OPND_Rn,	/* Integer register as source.  */
476   AARCH64_OPND_Rm,	/* Integer register as source.  */
477   AARCH64_OPND_Rt,	/* Integer register used in ld/st instructions.  */
478   AARCH64_OPND_Rt2,	/* Integer register used in ld/st pair instructions.  */
479   AARCH64_OPND_X16,	/* Integer register x16 in chkfeat instruction.  */
480   AARCH64_OPND_Rt_LS64,	/* Integer register used in LS64 instructions.  */
481   AARCH64_OPND_Rt_SP,	/* Integer Rt or SP used in STG instructions.  */
482   AARCH64_OPND_Rs,	/* Integer register used in ld/st exclusive.  */
483   AARCH64_OPND_Ra,	/* Integer register used in ddp_3src instructions.  */
484   AARCH64_OPND_Rt_SYS,	/* Integer register used in system instructions.  */
485 
486   AARCH64_OPND_Rd_SP,	/* Integer Rd or SP.  */
487   AARCH64_OPND_Rn_SP,	/* Integer Rn or SP.  */
488   AARCH64_OPND_Rm_SP,	/* Integer Rm or SP.  */
489   AARCH64_OPND_PAIRREG,	/* Paired register operand.  */
490   AARCH64_OPND_PAIRREG_OR_XZR,	/* Paired register operand, optionally xzr.  */
491   AARCH64_OPND_Rm_EXT,	/* Integer Rm extended.  */
492   AARCH64_OPND_Rm_SFT,	/* Integer Rm shifted.  */
493 
494   AARCH64_OPND_Fd,	/* Floating-point Fd.  */
495   AARCH64_OPND_Fn,	/* Floating-point Fn.  */
496   AARCH64_OPND_Fm,	/* Floating-point Fm.  */
497   AARCH64_OPND_Fa,	/* Floating-point Fa.  */
498   AARCH64_OPND_Ft,	/* Floating-point Ft.  */
499   AARCH64_OPND_Ft2,	/* Floating-point Ft2.  */
500 
501   AARCH64_OPND_Sd,	/* AdvSIMD Scalar Sd.  */
502   AARCH64_OPND_Sn,	/* AdvSIMD Scalar Sn.  */
503   AARCH64_OPND_Sm,	/* AdvSIMD Scalar Sm.  */
504 
505   AARCH64_OPND_Va,	/* AdvSIMD Vector Va.  */
506   AARCH64_OPND_Vd,	/* AdvSIMD Vector Vd.  */
507   AARCH64_OPND_Vn,	/* AdvSIMD Vector Vn.  */
508   AARCH64_OPND_Vm,	/* AdvSIMD Vector Vm.  */
509   AARCH64_OPND_VdD1,	/* AdvSIMD <Vd>.D[1]; for FMOV only.  */
510   AARCH64_OPND_VnD1,	/* AdvSIMD <Vn>.D[1]; for FMOV only.  */
511   AARCH64_OPND_Ed,	/* AdvSIMD Vector Element Vd.  */
512   AARCH64_OPND_En,	/* AdvSIMD Vector Element Vn.  */
513   AARCH64_OPND_Em,	/* AdvSIMD Vector Element Vm.  */
514   AARCH64_OPND_Em16,	/* AdvSIMD Vector Element Vm restricted to V0 - V15 when
515 			   qualifier is S_H.  */
516   AARCH64_OPND_LVn,	/* AdvSIMD Vector register list used in e.g. TBL.  */
517   AARCH64_OPND_LVt,	/* AdvSIMD Vector register list used in ld/st.  */
518   AARCH64_OPND_LVt_AL,	/* AdvSIMD Vector register list for loading single
519 			   structure to all lanes.  */
520   AARCH64_OPND_LEt,	/* AdvSIMD Vector Element list.  */
521 
522   AARCH64_OPND_CRn,	/* Co-processor register in CRn field.  */
523   AARCH64_OPND_CRm,	/* Co-processor register in CRm field.  */
524 
525   AARCH64_OPND_IDX,	/* AdvSIMD EXT index operand.  */
526   AARCH64_OPND_MASK,	/* AdvSIMD EXT index operand.  */
527   AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left.  */
528   AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right.  */
529   AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift.  */
530   AARCH64_OPND_SIMD_IMM_SFT,	/* AdvSIMD modified immediate with shift.  */
531   AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate.  */
532   AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction
533 			   (no encoding).  */
534   AARCH64_OPND_IMM0,	/* Immediate for #0.  */
535   AARCH64_OPND_FPIMM0,	/* Immediate for #0.0.  */
536   AARCH64_OPND_FPIMM,	/* Floating-point Immediate.  */
537   AARCH64_OPND_IMMR,	/* Immediate #<immr> in e.g. BFM.  */
538   AARCH64_OPND_IMMS,	/* Immediate #<imms> in e.g. BFM.  */
539   AARCH64_OPND_WIDTH,	/* Immediate #<width> in e.g. BFI.  */
540   AARCH64_OPND_IMM,	/* Immediate.  */
541   AARCH64_OPND_IMM_2,	/* Immediate.  */
542   AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field.  */
543   AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field.  */
544   AARCH64_OPND_UIMM4,	/* Unsigned 4-bit immediate in the CRm field.  */
545   AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg.  */
546   AARCH64_OPND_UIMM7,	/* Unsigned 7-bit immediate in the CRm:op2 fields.  */
547   AARCH64_OPND_UIMM10,	/* Unsigned 10-bit immediate in addg/subg.  */
548   AARCH64_OPND_BIT_NUM,	/* Immediate.  */
549   AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions.  */
550   AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */
551   AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions.  */
552   AARCH64_OPND_SIMM5,	/* 5-bit signed immediate in the imm5 field.  */
553   AARCH64_OPND_NZCV,	/* Flag bit specifier giving an alternative value for
554 			   each condition flag.  */
555 
556   AARCH64_OPND_LIMM,	/* Logical Immediate.  */
557   AARCH64_OPND_AIMM,	/* Arithmetic immediate.  */
558   AARCH64_OPND_HALF,	/* #<imm16>{, LSL #<shift>} operand in move wide.  */
559   AARCH64_OPND_FBITS,	/* FP #<fbits> operand in e.g. SCVTF */
560   AARCH64_OPND_IMM_MOV,	/* Immediate operand for the MOV alias.  */
561   AARCH64_OPND_IMM_ROT1,	/* Immediate rotate operand for FCMLA.  */
562   AARCH64_OPND_IMM_ROT2,	/* Immediate rotate operand for indexed FCMLA.  */
563   AARCH64_OPND_IMM_ROT3,	/* Immediate rotate operand for FCADD.  */
564 
565   AARCH64_OPND_COND,	/* Standard condition as the last operand.  */
566   AARCH64_OPND_COND1,	/* Same as the above, but excluding AL and NV.  */
567 
568   AARCH64_OPND_ADDR_ADRP,	/* Memory address for ADRP */
569   AARCH64_OPND_ADDR_PCREL14,	/* 14-bit PC-relative address for e.g. TBZ.  */
570   AARCH64_OPND_ADDR_PCREL19,	/* 19-bit PC-relative address for e.g. LDR.  */
571   AARCH64_OPND_ADDR_PCREL21,	/* 21-bit PC-relative address for e.g. ADR.  */
572   AARCH64_OPND_ADDR_PCREL26,	/* 26-bit PC-relative address for e.g. BL.  */
573 
574   AARCH64_OPND_ADDR_SIMPLE,	/* Address of ld/st exclusive.  */
575   AARCH64_OPND_ADDR_REGOFF,	/* Address of register offset.  */
576   AARCH64_OPND_ADDR_SIMM7,	/* Address of signed 7-bit immediate.  */
577   AARCH64_OPND_ADDR_SIMM9,	/* Address of signed 9-bit immediate.  */
578   AARCH64_OPND_ADDR_SIMM9_2,	/* Same as the above, but the immediate is
579 				   negative or unaligned and there is
580 				   no writeback allowed.  This operand code
581 				   is only used to support the programmer-
582 				   friendly feature of using LDR/STR as the
583 				   the mnemonic name for LDUR/STUR instructions
584 				   wherever there is no ambiguity.  */
585   AARCH64_OPND_ADDR_SIMM10,	/* Address of signed 10-bit immediate.  */
586   AARCH64_OPND_ADDR_SIMM11,	/* Address with a signed 11-bit (multiple of
587 				   16) immediate.  */
588   AARCH64_OPND_ADDR_UIMM12,	/* Address of unsigned 12-bit immediate.  */
589   AARCH64_OPND_ADDR_SIMM13,	/* Address with a signed 13-bit (multiple of
590 				   16) immediate.  */
591   AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures.  */
592   AARCH64_OPND_ADDR_OFFSET,     /* Address with an optional 9-bit immediate.  */
593   AARCH64_OPND_SIMD_ADDR_POST,	/* Address of ld/st multiple post-indexed.  */
594 
595   AARCH64_OPND_SYSREG,		/* System register operand.  */
596   AARCH64_OPND_SYSREG128,	/* 128-bit system register operand.  */
597   AARCH64_OPND_PSTATEFIELD,	/* PSTATE field name operand.  */
598   AARCH64_OPND_SYSREG_AT,	/* System register <at_op> operand.  */
599   AARCH64_OPND_SYSREG_DC,	/* System register <dc_op> operand.  */
600   AARCH64_OPND_SYSREG_IC,	/* System register <ic_op> operand.  */
601   AARCH64_OPND_SYSREG_TLBI,	/* System register <tlbi_op> operand.  */
602   AARCH64_OPND_SYSREG_TLBIP,	/* System register <tlbip_op> operand.  */
603   AARCH64_OPND_SYSREG_SR,	/* System register RCTX operand.  */
604   AARCH64_OPND_BARRIER,		/* Barrier operand.  */
605   AARCH64_OPND_BARRIER_DSB_NXS,	/* Barrier operand for DSB nXS variant.  */
606   AARCH64_OPND_BARRIER_ISB,	/* Barrier operand for ISB.  */
607   AARCH64_OPND_PRFOP,		/* Prefetch operation.  */
608   AARCH64_OPND_RPRFMOP,		/* Range prefetch operation.  */
609   AARCH64_OPND_BARRIER_PSB,	/* Barrier operand for PSB.  */
610   AARCH64_OPND_BARRIER_GCSB,	/* Barrier operand for GCSB.  */
611   AARCH64_OPND_BTI_TARGET,	/* BTI {<target>}.  */
612   AARCH64_OPND_LSE128_Rt,	/* LSE128 <Xt1>.  */
613   AARCH64_OPND_LSE128_Rt2,	/* LSE128 <Xt2>.  */
614   AARCH64_OPND_SVE_ADDR_RI_S4x16,   /* SVE [<Xn|SP>, #<simm4>*16].  */
615   AARCH64_OPND_SVE_ADDR_RI_S4x32,   /* SVE [<Xn|SP>, #<simm4>*32].  */
616   AARCH64_OPND_SVE_ADDR_RI_S4xVL,   /* SVE [<Xn|SP>, #<simm4>, MUL VL].  */
617   AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL].  */
618   AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL].  */
619   AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL].  */
620   AARCH64_OPND_SVE_ADDR_RI_S6xVL,   /* SVE [<Xn|SP>, #<simm6>, MUL VL].  */
621   AARCH64_OPND_SVE_ADDR_RI_S9xVL,   /* SVE [<Xn|SP>, #<simm9>, MUL VL].  */
622   AARCH64_OPND_SVE_ADDR_RI_U6,	    /* SVE [<Xn|SP>, #<uimm6>].  */
623   AARCH64_OPND_SVE_ADDR_RI_U6x2,    /* SVE [<Xn|SP>, #<uimm6>*2].  */
624   AARCH64_OPND_SVE_ADDR_RI_U6x4,    /* SVE [<Xn|SP>, #<uimm6>*4].  */
625   AARCH64_OPND_SVE_ADDR_RI_U6x8,    /* SVE [<Xn|SP>, #<uimm6>*8].  */
626   AARCH64_OPND_SVE_ADDR_R,	    /* SVE [<Xn|SP>].  */
627   AARCH64_OPND_SVE_ADDR_RR,	    /* SVE [<Xn|SP>, <Xm|XZR>].  */
628   AARCH64_OPND_SVE_ADDR_RR_LSL1,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1].  */
629   AARCH64_OPND_SVE_ADDR_RR_LSL2,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2].  */
630   AARCH64_OPND_SVE_ADDR_RR_LSL3,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3].  */
631   AARCH64_OPND_SVE_ADDR_RR_LSL4,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4].  */
632   AARCH64_OPND_SVE_ADDR_RX,	    /* SVE [<Xn|SP>, <Xm>].  */
633   AARCH64_OPND_SVE_ADDR_RX_LSL1,    /* SVE [<Xn|SP>, <Xm>, LSL #1].  */
634   AARCH64_OPND_SVE_ADDR_RX_LSL2,    /* SVE [<Xn|SP>, <Xm>, LSL #2].  */
635   AARCH64_OPND_SVE_ADDR_RX_LSL3,    /* SVE [<Xn|SP>, <Xm>, LSL #3].  */
636   AARCH64_OPND_SVE_ADDR_ZX,	    /* SVE [Zn.<T>{, <Xm>}].  */
637   AARCH64_OPND_SVE_ADDR_RZ,	    /* SVE [<Xn|SP>, Zm.D].  */
638   AARCH64_OPND_SVE_ADDR_RZ_LSL1,    /* SVE [<Xn|SP>, Zm.D, LSL #1].  */
639   AARCH64_OPND_SVE_ADDR_RZ_LSL2,    /* SVE [<Xn|SP>, Zm.D, LSL #2].  */
640   AARCH64_OPND_SVE_ADDR_RZ_LSL3,    /* SVE [<Xn|SP>, Zm.D, LSL #3].  */
641   AARCH64_OPND_SVE_ADDR_RZ_XTW_14,  /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
642 				       Bit 14 controls S/U choice.  */
643   AARCH64_OPND_SVE_ADDR_RZ_XTW_22,  /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
644 				       Bit 22 controls S/U choice.  */
645   AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
646 				       Bit 14 controls S/U choice.  */
647   AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
648 				       Bit 22 controls S/U choice.  */
649   AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
650 				       Bit 14 controls S/U choice.  */
651   AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
652 				       Bit 22 controls S/U choice.  */
653   AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
654 				       Bit 14 controls S/U choice.  */
655   AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
656 				       Bit 22 controls S/U choice.  */
657   AARCH64_OPND_SVE_ADDR_ZI_U5,	    /* SVE [Zn.<T>, #<uimm5>].  */
658   AARCH64_OPND_SVE_ADDR_ZI_U5x2,    /* SVE [Zn.<T>, #<uimm5>*2].  */
659   AARCH64_OPND_SVE_ADDR_ZI_U5x4,    /* SVE [Zn.<T>, #<uimm5>*4].  */
660   AARCH64_OPND_SVE_ADDR_ZI_U5x8,    /* SVE [Zn.<T>, #<uimm5>*8].  */
661   AARCH64_OPND_SVE_ADDR_ZZ_LSL,     /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>].  */
662   AARCH64_OPND_SVE_ADDR_ZZ_SXTW,    /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>].  */
663   AARCH64_OPND_SVE_ADDR_ZZ_UXTW,    /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>].  */
664   AARCH64_OPND_SVE_AIMM,	/* SVE unsigned arithmetic immediate.  */
665   AARCH64_OPND_SVE_ASIMM,	/* SVE signed arithmetic immediate.  */
666   AARCH64_OPND_SVE_FPIMM8,	/* SVE 8-bit floating-point immediate.  */
667   AARCH64_OPND_SVE_I1_HALF_ONE,	/* SVE choice between 0.5 and 1.0.  */
668   AARCH64_OPND_SVE_I1_HALF_TWO,	/* SVE choice between 0.5 and 2.0.  */
669   AARCH64_OPND_SVE_I1_ZERO_ONE,	/* SVE choice between 0.0 and 1.0.  */
670   AARCH64_OPND_SVE_IMM_ROT1,	/* SVE 1-bit rotate operand (90 or 270).  */
671   AARCH64_OPND_SVE_IMM_ROT2,	/* SVE 2-bit rotate operand (N*90).  */
672   AARCH64_OPND_SVE_IMM_ROT3,	/* SVE cadd 1-bit rotate (90 or 270).  */
673   AARCH64_OPND_SVE_INV_LIMM,	/* SVE inverted logical immediate.  */
674   AARCH64_OPND_SVE_LIMM,	/* SVE logical immediate.  */
675   AARCH64_OPND_SVE_LIMM_MOV,	/* SVE logical immediate for MOV.  */
676   AARCH64_OPND_SVE_PATTERN,	/* SVE vector pattern enumeration.  */
677   AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor.  */
678   AARCH64_OPND_SVE_PRFOP,	/* SVE prefetch operation.  */
679   AARCH64_OPND_SVE_Pd,		/* SVE p0-p15 in Pd.  */
680   AARCH64_OPND_SVE_PNd,		/* SVE pn0-pn15 in Pd.  */
681   AARCH64_OPND_SVE_Pg3,		/* SVE p0-p7 in Pg.  */
682   AARCH64_OPND_SVE_Pg4_5,	/* SVE p0-p15 in Pg, bits [8,5].  */
683   AARCH64_OPND_SVE_Pg4_10,	/* SVE p0-p15 in Pg, bits [13,10].  */
684   AARCH64_OPND_SVE_PNg4_10,	/* SVE pn0-pn15 in Pg, bits [13,10].  */
685   AARCH64_OPND_SVE_Pg4_16,	/* SVE p0-p15 in Pg, bits [19,16].  */
686   AARCH64_OPND_SVE_Pm,		/* SVE p0-p15 in Pm.  */
687   AARCH64_OPND_SVE_Pn,		/* SVE p0-p15 in Pn.  */
688   AARCH64_OPND_SVE_PNn,		/* SVE pn0-pn15 in Pn.  */
689   AARCH64_OPND_SVE_Pt,		/* SVE p0-p15 in Pt.  */
690   AARCH64_OPND_SVE_PNt,		/* SVE pn0-pn15 in Pt.  */
691   AARCH64_OPND_SVE_Rm,		/* Integer Rm or ZR, alt. SVE position.  */
692   AARCH64_OPND_SVE_Rn_SP,	/* Integer Rn or SP, alt. SVE position.  */
693   AARCH64_OPND_SVE_SHLIMM_PRED,	  /* SVE shift left amount (predicated).  */
694   AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated).  */
695   AARCH64_OPND_SVE_SHLIMM_UNPRED_22,	/* SVE 3 bit shift left unpred.  */
696   AARCH64_OPND_SVE_SHRIMM_PRED,	  /* SVE shift right amount (predicated).  */
697   AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated).  */
698   AARCH64_OPND_SVE_SHRIMM_UNPRED_22,	/* SVE 3 bit shift right unpred.  */
699   AARCH64_OPND_SVE_SIMM5,	/* SVE signed 5-bit immediate.  */
700   AARCH64_OPND_SVE_SIMM5B,	/* SVE secondary signed 5-bit immediate.  */
701   AARCH64_OPND_SVE_SIMM6,	/* SVE signed 6-bit immediate.  */
702   AARCH64_OPND_SVE_SIMM8,	/* SVE signed 8-bit immediate.  */
703   AARCH64_OPND_SVE_UIMM3,	/* SVE unsigned 3-bit immediate.  */
704   AARCH64_OPND_SVE_UIMM7,	/* SVE unsigned 7-bit immediate.  */
705   AARCH64_OPND_SVE_UIMM8,	/* SVE unsigned 8-bit immediate.  */
706   AARCH64_OPND_SVE_UIMM8_53,	/* SVE split unsigned 8-bit immediate.  */
707   AARCH64_OPND_SVE_VZn,		/* Scalar SIMD&FP register in Zn field.  */
708   AARCH64_OPND_SVE_Vd,		/* Scalar SIMD&FP register in Vd.  */
709   AARCH64_OPND_SVE_Vm,		/* Scalar SIMD&FP register in Vm.  */
710   AARCH64_OPND_SVE_Vn,		/* Scalar SIMD&FP register in Vn.  */
711   AARCH64_OPND_SME_ZA_array_vrsb_1, /* Tile to vector, two registers (B).  */
712   AARCH64_OPND_SME_ZA_array_vrsh_1, /* Tile to vector, two registers (H).  */
713   AARCH64_OPND_SME_ZA_array_vrss_1, /* Tile to vector, two registers (S).  */
714   AARCH64_OPND_SME_ZA_array_vrsd_1, /* Tile to vector, two registers (D).  */
715   AARCH64_OPND_SME_ZA_array_vrsb_2, /* Tile to vector, four registers (B).  */
716   AARCH64_OPND_SME_ZA_array_vrsh_2, /* Tile to vector, four registers (H).  */
717   AARCH64_OPND_SME_ZA_array_vrss_2, /* Tile to vector, four registers (S). */
718   AARCH64_OPND_SME_ZA_array_vrsd_2, /* Tile to vector, four registers (D).  */
719   AARCH64_OPND_SVE_Za_5,	/* SVE vector register in Za, bits [9,5].  */
720   AARCH64_OPND_SVE_Za_16,	/* SVE vector register in Za, bits [20,16].  */
721   AARCH64_OPND_SVE_Zd,		/* SVE vector register in Zd.  */
722   AARCH64_OPND_SVE_Zm_5,	/* SVE vector register in Zm, bits [9,5].  */
723   AARCH64_OPND_SVE_Zm_16,	/* SVE vector register in Zm, bits [20,16].  */
724   AARCH64_OPND_SVE_Zm3_INDEX,	/* z0-z7[0-3] in Zm, bits [20,16].  */
725   AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11.  */
726   AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19.  */
727   AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22.  */
728   AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11.  */
729   AARCH64_OPND_SVE_Zm_imm4,     /* SVE vector register with 4bit index.  */
730   AARCH64_OPND_SVE_Zm4_INDEX,	/* z0-z15[0-1] in Zm, bits [20,16].  */
731   AARCH64_OPND_SVE_Zn,		/* SVE vector register in Zn.  */
732   AARCH64_OPND_SVE_Zn_5_INDEX,	/* Indexed SVE vector register, for DUPQ.  */
733   AARCH64_OPND_SVE_Zn_INDEX,	/* Indexed SVE vector register, for DUP.  */
734   AARCH64_OPND_SVE_ZnxN,	/* SVE vector register list in Zn.  */
735   AARCH64_OPND_SVE_Zt,		/* SVE vector register in Zt.  */
736   AARCH64_OPND_SVE_ZtxN,	/* SVE vector register list in Zt.  */
737   AARCH64_OPND_SME_Zdnx2,	/* SVE vector register list from [4:1]*2.  */
738   AARCH64_OPND_SME_Zdnx4,	/* SVE vector register list from [4:2]*4.  */
739   AARCH64_OPND_SME_Zm,		/* SVE vector register list in 4-bit Zm.  */
740   AARCH64_OPND_SME_Zmx2,	/* SVE vector register list from [20:17]*2.  */
741   AARCH64_OPND_SME_Zmx4,	/* SVE vector register list from [20:18]*4.  */
742   AARCH64_OPND_SME_Znx2,	/* SVE vector register list from [9:6]*2.  */
743   AARCH64_OPND_SME_Znx4,	/* SVE vector register list from [9:7]*4.  */
744   AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23.  */
745   AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19.  */
746   AARCH64_OPND_SME_ZAda_2b,	/* SME <ZAda>.S, 2-bits.  */
747   AARCH64_OPND_SME_ZAda_3b,	/* SME <ZAda>.D, 3-bits.  */
748   AARCH64_OPND_SME_ZA_HV_idx_src,	/* SME source ZA tile vector.  */
749   AARCH64_OPND_SME_ZA_HV_idx_srcxN,	/* SME N source ZA tile vectors.  */
750   AARCH64_OPND_SME_ZA_HV_idx_dest,	/* SME destination ZA tile vector.  */
751   AARCH64_OPND_SME_ZA_HV_idx_destxN,	/* SME N dest ZA tile vectors.  */
752   AARCH64_OPND_SME_Pdx2,	/* Predicate register list in [3:1].  */
753   AARCH64_OPND_SME_PdxN,	/* Predicate register list in [3:0].  */
754   AARCH64_OPND_SME_Pm,		/* SME scalable predicate register, bits [15:13].  */
755   AARCH64_OPND_SME_PNd3,	/* Predicate-as-counter register, bits [3:0].  */
756   AARCH64_OPND_SME_PNg3,	/* Predicate-as-counter register, bits [12:10].  */
757   AARCH64_OPND_SME_PNn,		/* Predicate-as-counter register, bits [8:5].  */
758   AARCH64_OPND_SME_PNn3_INDEX1,	/* Indexed pred-as-counter reg, bits [8:5].  */
759   AARCH64_OPND_SME_PNn3_INDEX2,	/* Indexed pred-as-counter reg, bits [9:5].  */
760   AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles.  */
761   AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector.  */
762   AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3].  */
763   AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1].  */
764   AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3].  */
765   AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}].  */
766   AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}].  */
767   AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1].  */
768   AARCH64_OPND_SME_ZA_array_off4,   /* SME ZA[<Wv>{, #<imm>}].  */
769   AARCH64_OPND_SME_ADDR_RI_U4xVL,   /* SME [<Xn|SP>{, #<imm>, MUL VL}].  */
770   AARCH64_OPND_SME_SM_ZA,           /* SME {SM | ZA}.  */
771   AARCH64_OPND_SME_PnT_Wm_imm,      /* SME <Pn>.<T>[<Wm>, #<imm>].  */
772   AARCH64_OPND_SME_SHRIMM4,	    /* 4-bit right shift, bits [19:16].  */
773   AARCH64_OPND_SME_SHRIMM5,	    /* size + 5-bit right shift, bits [23:22,20:16].  */
774   AARCH64_OPND_SME_Zm_INDEX1,	    /* Zn.T[index], bits [19:16,10].  */
775   AARCH64_OPND_SME_Zm_INDEX2,	    /* Zn.T[index], bits [19:16,11:10].  */
776   AARCH64_OPND_SME_Zm_INDEX3_1,     /* Zn.T[index], bits [19:16,10,2:1].  */
777   AARCH64_OPND_SME_Zm_INDEX3_2,     /* Zn.T[index], bits [19:16,11:10,2].  */
778   AARCH64_OPND_SME_Zm_INDEX3_10,    /* Zn.T[index], bits [19:16,15,11:10].  */
779   AARCH64_OPND_SME_Zm_INDEX4_1,     /* Zn.T[index], bits [19:16,11:10,2:1].  */
780   AARCH64_OPND_SME_Zm_INDEX4_10,    /* Zn.T[index], bits [19:16,15,12:10].  */
781   AARCH64_OPND_SME_Zn_INDEX1_16,    /* Zn[index], bits [9:5] and [16:16].  */
782   AARCH64_OPND_SME_Zn_INDEX2_15,    /* Zn[index], bits [9:5] and [16:15].  */
783   AARCH64_OPND_SME_Zn_INDEX2_16,    /* Zn[index], bits [9:5] and [17:16].  */
784   AARCH64_OPND_SME_Zn_INDEX3_14,    /* Zn[index], bits [9:5] and [16:14].  */
785   AARCH64_OPND_SME_Zn_INDEX3_15,    /* Zn[index], bits [9:5] and [17:15].  */
786   AARCH64_OPND_SME_Zn_INDEX4_14,    /* Zn[index], bits [9:5] and [17:14].  */
787   AARCH64_OPND_SME_VLxN_10,	/* VLx2 or VLx4, in bit 10.  */
788   AARCH64_OPND_SME_VLxN_13,	/* VLx2 or VLx4, in bit 13.  */
789   AARCH64_OPND_SME_ZT0,		/* The fixed token zt0/ZT0 (not encoded).  */
790   AARCH64_OPND_SME_ZT0_INDEX,	/* ZT0[<imm>], bits [14:12].  */
791   AARCH64_OPND_SME_ZT0_LIST,	/* { zt0/ZT0 } (not encoded).  */
792   AARCH64_OPND_TME_UIMM16,	/* TME unsigned 16-bit immediate.  */
793   AARCH64_OPND_SM3_IMM2,	/* SM3 encodes lane in bits [13, 14].  */
794   AARCH64_OPND_MOPS_ADDR_Rd,	/* [Rd]!, in bits [0, 4].  */
795   AARCH64_OPND_MOPS_ADDR_Rs,	/* [Rs]!, in bits [16, 20].  */
796   AARCH64_OPND_MOPS_WB_Rn,	/* Rn!, in bits [5, 9].  */
797   AARCH64_OPND_CSSC_SIMM8,	/* CSSC signed 8-bit immediate.  */
798   AARCH64_OPND_CSSC_UIMM8,	/* CSSC unsigned 8-bit immediate.  */
799   AARCH64_OPND_SME_Zt2,		/* Qobule SVE vector register list.  */
800   AARCH64_OPND_SME_Zt3,		/* Trible SVE vector register list.  */
801   AARCH64_OPND_SME_Zt4,		/* Quad SVE vector register list.  */
802   AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND,   /* [<Xn|SP>]{, #<imm>}.  */
803   AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB, /* [<Xn|SP>] or [<Xn|SP>, #<imm>]!.  */
804   AARCH64_OPND_RCPC3_ADDR_POSTIND,	 /* [<Xn|SP>], #<imm>.  */
805   AARCH64_OPND_RCPC3_ADDR_PREIND_WB, 	 /* [<Xn|SP>, #<imm>]!.  */
806   AARCH64_OPND_RCPC3_ADDR_OFFSET
807 };
808 
809 /* Qualifier constrains an operand.  It either specifies a variant of an
810    operand type or limits values available to an operand type.
811 
812    N.B. Order is important; keep aarch64_opnd_qualifiers synced.  */
813 
814 enum aarch64_opnd_qualifier
815 {
816   /* Indicating no further qualification on an operand.  */
817   AARCH64_OPND_QLF_NIL,
818 
819   /* Qualifying an operand which is a general purpose (integer) register;
820      indicating the operand data size or a specific register.  */
821   AARCH64_OPND_QLF_W,	/* Wn, WZR or WSP.  */
822   AARCH64_OPND_QLF_X,	/* Xn, XZR or XSP.  */
823   AARCH64_OPND_QLF_WSP,	/* WSP.  */
824   AARCH64_OPND_QLF_SP,	/* SP.  */
825 
826   /* Qualifying an operand which is a floating-point register, a SIMD
827      vector element or a SIMD vector element list; indicating operand data
828      size or the size of each SIMD vector element in the case of a SIMD
829      vector element list.
830      These qualifiers are also used to qualify an address operand to
831      indicate the size of data element a load/store instruction is
832      accessing.
833      They are also used for the immediate shift operand in e.g. SSHR.  Such
834      a use is only for the ease of operand encoding/decoding and qualifier
835      sequence matching; such a use should not be applied widely; use the value
836      constraint qualifiers for immediate operands wherever possible.  */
837   AARCH64_OPND_QLF_S_B,
838   AARCH64_OPND_QLF_S_H,
839   AARCH64_OPND_QLF_S_S,
840   AARCH64_OPND_QLF_S_D,
841   AARCH64_OPND_QLF_S_Q,
842   /* These type qualifiers have a special meaning in that they mean 4 x 1 byte
843      or 2 x 2 byte are selected by the instruction.  Other than that they have
844      no difference with AARCH64_OPND_QLF_S_B in encoding.  They are here purely
845      for syntactical reasons and is an exception from normal AArch64
846      disassembly scheme.  */
847   AARCH64_OPND_QLF_S_4B,
848   AARCH64_OPND_QLF_S_2H,
849 
850   /* Qualifying an operand which is a SIMD vector register or a SIMD vector
851      register list; indicating register shape.
852      They are also used for the immediate shift operand in e.g. SSHR.  Such
853      a use is only for the ease of operand encoding/decoding and qualifier
854      sequence matching; such a use should not be applied widely; use the value
855      constraint qualifiers for immediate operands wherever possible.  */
856   AARCH64_OPND_QLF_V_4B,
857   AARCH64_OPND_QLF_V_8B,
858   AARCH64_OPND_QLF_V_16B,
859   AARCH64_OPND_QLF_V_2H,
860   AARCH64_OPND_QLF_V_4H,
861   AARCH64_OPND_QLF_V_8H,
862   AARCH64_OPND_QLF_V_2S,
863   AARCH64_OPND_QLF_V_4S,
864   AARCH64_OPND_QLF_V_1D,
865   AARCH64_OPND_QLF_V_2D,
866   AARCH64_OPND_QLF_V_1Q,
867 
868   AARCH64_OPND_QLF_P_Z,
869   AARCH64_OPND_QLF_P_M,
870 
871   /* Used in scaled signed immediate that are scaled by a Tag granule
872      like in stg, st2g, etc.   */
873   AARCH64_OPND_QLF_imm_tag,
874 
875   /* Constraint on value.  */
876   AARCH64_OPND_QLF_CR,		/* CRn, CRm. */
877   AARCH64_OPND_QLF_imm_0_7,
878   AARCH64_OPND_QLF_imm_0_15,
879   AARCH64_OPND_QLF_imm_0_31,
880   AARCH64_OPND_QLF_imm_0_63,
881   AARCH64_OPND_QLF_imm_1_32,
882   AARCH64_OPND_QLF_imm_1_64,
883 
884   /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
885      or shift-ones.  */
886   AARCH64_OPND_QLF_LSL,
887   AARCH64_OPND_QLF_MSL,
888 
889   /* Special qualifier helping retrieve qualifier information during the
890      decoding time (currently not in use).  */
891   AARCH64_OPND_QLF_RETRIEVE,
892 };
893 
894 /* Instruction class.  */
895 
896 enum aarch64_insn_class
897 {
898   aarch64_misc,
899   addsub_carry,
900   addsub_ext,
901   addsub_imm,
902   addsub_shift,
903   asimdall,
904   asimddiff,
905   asimdelem,
906   asimdext,
907   asimdimm,
908   asimdins,
909   asimdmisc,
910   asimdperm,
911   asimdsame,
912   asimdshf,
913   asimdtbl,
914   asisddiff,
915   asisdelem,
916   asisdlse,
917   asisdlsep,
918   asisdlso,
919   asisdlsop,
920   asisdmisc,
921   asisdone,
922   asisdpair,
923   asisdsame,
924   asisdshf,
925   bitfield,
926   branch_imm,
927   branch_reg,
928   compbranch,
929   condbranch,
930   condcmp_imm,
931   condcmp_reg,
932   condsel,
933   cryptoaes,
934   cryptosha2,
935   cryptosha3,
936   dp_1src,
937   dp_2src,
938   dp_3src,
939   exception,
940   extract,
941   float2fix,
942   float2int,
943   floatccmp,
944   floatcmp,
945   floatdp1,
946   floatdp2,
947   floatdp3,
948   floatimm,
949   floatsel,
950   ldst_immpost,
951   ldst_immpre,
952   ldst_imm9,	/* immpost or immpre */
953   ldst_imm10,	/* LDRAA/LDRAB */
954   ldst_pos,
955   ldst_regoff,
956   ldst_unpriv,
957   ldst_unscaled,
958   ldstexcl,
959   ldstnapair_offs,
960   ldstpair_off,
961   ldstpair_indexed,
962   loadlit,
963   log_imm,
964   log_shift,
965   lse_atomic,
966   lse128_atomic,
967   movewide,
968   pcreladdr,
969   ic_system,
970   sme_fp_sd,
971   sme_int_sd,
972   sme_misc,
973   sme_mov,
974   sme_ldr,
975   sme_psel,
976   sme_shift,
977   sme_size_12_bhs,
978   sme_size_12_hs,
979   sme_size_22,
980   sme_size_22_hsd,
981   sme_sz_23,
982   sme_str,
983   sme_start,
984   sme_stop,
985   sme2_mov,
986   sme2_movaz,
987   sve_cpy,
988   sve_index,
989   sve_limm,
990   sve_misc,
991   sve_movprfx,
992   sve_pred_zm,
993   sve_shift_pred,
994   sve_shift_unpred,
995   sve_size_bhs,
996   sve_size_bhsd,
997   sve_size_hsd,
998   sve_size_hsd2,
999   sve_size_sd,
1000   sve_size_bh,
1001   sve_size_sd2,
1002   sve_size_13,
1003   sve_shift_tsz_hsd,
1004   sve_shift_tsz_bhsd,
1005   sve_size_tsz_bhs,
1006   testbranch,
1007   cryptosm3,
1008   cryptosm4,
1009   dotproduct,
1010   bfloat16,
1011   cssc,
1012   gcs,
1013   the,
1014   sve2_urqvs,
1015   sve_index1,
1016   rcpc3
1017 };
1018 
1019 /* Opcode enumerators.  */
1020 
1021 enum aarch64_op
1022 {
1023   OP_NIL,
1024   OP_STRB_POS,
1025   OP_LDRB_POS,
1026   OP_LDRSB_POS,
1027   OP_STRH_POS,
1028   OP_LDRH_POS,
1029   OP_LDRSH_POS,
1030   OP_STR_POS,
1031   OP_LDR_POS,
1032   OP_STRF_POS,
1033   OP_LDRF_POS,
1034   OP_LDRSW_POS,
1035   OP_PRFM_POS,
1036 
1037   OP_STURB,
1038   OP_LDURB,
1039   OP_LDURSB,
1040   OP_STURH,
1041   OP_LDURH,
1042   OP_LDURSH,
1043   OP_STUR,
1044   OP_LDUR,
1045   OP_STURV,
1046   OP_LDURV,
1047   OP_LDURSW,
1048   OP_PRFUM,
1049 
1050   OP_LDR_LIT,
1051   OP_LDRV_LIT,
1052   OP_LDRSW_LIT,
1053   OP_PRFM_LIT,
1054 
1055   OP_ADD,
1056   OP_B,
1057   OP_BL,
1058 
1059   OP_MOVN,
1060   OP_MOVZ,
1061   OP_MOVK,
1062 
1063   OP_MOV_IMM_LOG,	/* MOV alias for moving bitmask immediate.  */
1064   OP_MOV_IMM_WIDE,	/* MOV alias for moving wide immediate.  */
1065   OP_MOV_IMM_WIDEN,	/* MOV alias for moving wide immediate (negated).  */
1066 
1067   OP_MOV_V,		/* MOV alias for moving vector register.  */
1068 
1069   OP_ASR_IMM,
1070   OP_LSR_IMM,
1071   OP_LSL_IMM,
1072 
1073   OP_BIC,
1074 
1075   OP_UBFX,
1076   OP_BFXIL,
1077   OP_SBFX,
1078   OP_SBFIZ,
1079   OP_BFI,
1080   OP_BFC,		/* ARMv8.2.  */
1081   OP_UBFIZ,
1082   OP_UXTB,
1083   OP_UXTH,
1084   OP_UXTW,
1085 
1086   OP_CINC,
1087   OP_CINV,
1088   OP_CNEG,
1089   OP_CSET,
1090   OP_CSETM,
1091 
1092   OP_FCVT,
1093   OP_FCVTN,
1094   OP_FCVTN2,
1095   OP_FCVTL,
1096   OP_FCVTL2,
1097   OP_FCVTXN_S,		/* Scalar version.  */
1098 
1099   OP_ROR_IMM,
1100 
1101   OP_SXTL,
1102   OP_SXTL2,
1103   OP_UXTL,
1104   OP_UXTL2,
1105 
1106   OP_MOV_P_P,
1107   OP_MOV_PN_PN,
1108   OP_MOV_Z_P_Z,
1109   OP_MOV_Z_V,
1110   OP_MOV_Z_Z,
1111   OP_MOV_Z_Zi,
1112   OP_MOVM_P_P_P,
1113   OP_MOVS_P_P,
1114   OP_MOVZS_P_P_P,
1115   OP_MOVZ_P_P_P,
1116   OP_NOTS_P_P_P_Z,
1117   OP_NOT_P_P_P_Z,
1118 
1119   OP_FCMLA_ELEM,	/* ARMv8.3, indexed element version.  */
1120 
1121   OP_TOTAL_NUM,		/* Pseudo.  */
1122 };
1123 
1124 /* Error types.  */
1125 enum err_type
1126 {
1127   ERR_OK,
1128   ERR_UND,
1129   ERR_UNP,
1130   ERR_NYI,
1131   ERR_VFI,
1132   ERR_NR_ENTRIES
1133 };
1134 
1135 /* Maximum number of operands an instruction can have.  */
1136 #define AARCH64_MAX_OPND_NUM 7
1137 /* Maximum number of qualifier sequences an instruction can have.  */
1138 #define AARCH64_MAX_QLF_SEQ_NUM 10
1139 /* Operand qualifier typedef; optimized for the size.  */
1140 typedef unsigned char aarch64_opnd_qualifier_t;
1141 /* Operand qualifier sequence typedef.  */
1142 typedef aarch64_opnd_qualifier_t	\
1143 	  aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM];
1144 
1145 /* FIXME: improve the efficiency.  */
1146 static inline bool
empty_qualifier_sequence_p(const aarch64_opnd_qualifier_t * qualifiers)1147 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers)
1148 {
1149   int i;
1150   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1151     if (qualifiers[i] != AARCH64_OPND_QLF_NIL)
1152       return false;
1153   return true;
1154 }
1155 
1156 /*  Forward declare error reporting type.  */
1157 typedef struct aarch64_operand_error aarch64_operand_error;
1158 /* Forward declare instruction sequence type.  */
1159 typedef struct aarch64_instr_sequence aarch64_instr_sequence;
1160 /* Forward declare instruction definition.  */
1161 typedef struct aarch64_inst aarch64_inst;
1162 
1163 /* This structure holds information for a particular opcode.  */
1164 
1165 struct aarch64_opcode
1166 {
1167   /* The name of the mnemonic.  */
1168   const char *name;
1169 
1170   /* The opcode itself.  Those bits which will be filled in with
1171      operands are zeroes.  */
1172   aarch64_insn opcode;
1173 
1174   /* The opcode mask.  This is used by the disassembler.  This is a
1175      mask containing ones indicating those bits which must match the
1176      opcode field, and zeroes indicating those bits which need not
1177      match (and are presumably filled in by operands).  */
1178   aarch64_insn mask;
1179 
1180   /* Instruction class.  */
1181   enum aarch64_insn_class iclass;
1182 
1183   /* Enumerator identifier.  */
1184   enum aarch64_op op;
1185 
1186   /* Which architecture variant provides this instruction.  */
1187   const aarch64_feature_set *avariant;
1188 
1189   /* An array of operand codes.  Each code is an index into the
1190      operand table.  They appear in the order which the operands must
1191      appear in assembly code, and are terminated by a zero.  */
1192   enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM];
1193 
1194   /* A list of operand qualifier code sequence.  Each operand qualifier
1195      code qualifies the corresponding operand code.  Each operand
1196      qualifier sequence specifies a valid opcode variant and related
1197      constraint on operands.  */
1198   aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
1199 
1200   /* Flags providing information about this instruction */
1201   uint64_t flags;
1202 
1203   /* Extra constraints on the instruction that the verifier checks.  */
1204   uint32_t constraints;
1205 
1206   /* If nonzero, this operand and operand 0 are both registers and
1207      are required to have the same register number.  */
1208   unsigned char tied_operand;
1209 
1210   /* If non-NULL, a function to verify that a given instruction is valid.  */
1211   enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn,
1212 			      bfd_vma, bool, aarch64_operand_error *,
1213 			      struct aarch64_instr_sequence *);
1214 };
1215 
1216 typedef struct aarch64_opcode aarch64_opcode;
1217 
1218 /* Table describing all the AArch64 opcodes.  */
1219 extern const aarch64_opcode aarch64_opcode_table[];
1220 
1221 /* Opcode flags.  */
1222 #define F_ALIAS (1 << 0)
1223 #define F_HAS_ALIAS (1 << 1)
1224 /* Disassembly preference priority 1-3 (the larger the higher).  If nothing
1225    is specified, it is the priority 0 by default, i.e. the lowest priority.  */
1226 #define F_P1 (1 << 2)
1227 #define F_P2 (2 << 2)
1228 #define F_P3 (3 << 2)
1229 /* Flag an instruction that is truly conditional executed, e.g. b.cond.  */
1230 #define F_COND (1 << 4)
1231 /* Instruction has the field of 'sf'.  */
1232 #define F_SF (1 << 5)
1233 /* Instruction has the field of 'size:Q'.  */
1234 #define F_SIZEQ (1 << 6)
1235 /* Floating-point instruction has the field of 'type'.  */
1236 #define F_FPTYPE (1 << 7)
1237 /* AdvSIMD scalar instruction has the field of 'size'.  */
1238 #define F_SSIZE (1 << 8)
1239 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q".  */
1240 #define F_T (1 << 9)
1241 /* Size of GPR operand in AdvSIMD instructions encoded in Q.  */
1242 #define F_GPRSIZE_IN_Q (1 << 10)
1243 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22.  */
1244 #define F_LDS_SIZE (1 << 11)
1245 /* Optional operand; assume maximum of 1 operand can be optional.  */
1246 #define F_OPD0_OPT (1 << 12)
1247 #define F_OPD1_OPT (2 << 12)
1248 #define F_OPD2_OPT (3 << 12)
1249 #define F_OPD3_OPT (4 << 12)
1250 #define F_OPD4_OPT (5 << 12)
1251 /* Default value for the optional operand when omitted from the assembly.  */
1252 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
1253 /* Instruction that is an alias of another instruction needs to be
1254    encoded/decoded by converting it to/from the real form, followed by
1255    the encoding/decoding according to the rules of the real opcode.
1256    This compares to the direct coding using the alias's information.
1257    N.B. this flag requires F_ALIAS to be used together.  */
1258 #define F_CONV (1 << 20)
1259 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
1260    friendly pseudo instruction available only in the assembly code (thus will
1261    not show up in the disassembly).  */
1262 #define F_PSEUDO (1 << 21)
1263 /* Instruction has miscellaneous encoding/decoding rules.  */
1264 #define F_MISC (1 << 22)
1265 /* Instruction has the field of 'N'; used in conjunction with F_SF.  */
1266 #define F_N (1 << 23)
1267 /* Opcode dependent field.  */
1268 #define F_OD(X) (((X) & 0x7) << 24)
1269 /* Instruction has the field of 'sz'.  */
1270 #define F_LSE_SZ (1 << 27)
1271 /* Require an exact qualifier match, even for NIL qualifiers.  */
1272 #define F_STRICT (1ULL << 28)
1273 /* This system instruction is used to read system registers.  */
1274 #define F_SYS_READ (1ULL << 29)
1275 /* This system instruction is used to write system registers.  */
1276 #define F_SYS_WRITE (1ULL << 30)
1277 /* This instruction has an extra constraint on it that imposes a requirement on
1278    subsequent instructions.  */
1279 #define F_SCAN (1ULL << 31)
1280 /* Instruction takes a pair of optional operands.  If we specify the Nth operand
1281    to be optional, then we also implicitly specify (N+1)th operand to also be
1282    optional.  */
1283 #define F_OPD_PAIR_OPT (1ULL << 32)
1284 /* This instruction does not allow the full range of values that the
1285    width of fields in the assembler instruction would theoretically
1286    allow.  This impacts the constraintts on assembly but yelds no
1287    impact on disassembly.  */
1288 #define F_OPD_NARROW (1ULL << 33)
1289 /* For the instruction with size[22:23] field.  */
1290 #define F_OPD_SIZE (1ULL << 34)
1291 /* RCPC3 instruction has the field of 'size'.  */
1292 #define F_RCPC3_SIZE (1ULL << 35)
1293 /* Next bit is 36.  */
1294 
1295 /* Instruction constraints.  */
1296 /* This instruction has a predication constraint on the instruction at PC+4.  */
1297 #define C_SCAN_MOVPRFX (1U << 0)
1298 /* This instruction's operation width is determined by the operand with the
1299    largest element size.  */
1300 #define C_MAX_ELEM (1U << 1)
1301 #define C_SCAN_MOPS_P (1U << 2)
1302 #define C_SCAN_MOPS_M (2U << 2)
1303 #define C_SCAN_MOPS_E (3U << 2)
1304 #define C_SCAN_MOPS_PME (3U << 2)
1305 /* Next bit is 4.  */
1306 
1307 static inline bool
alias_opcode_p(const aarch64_opcode * opcode)1308 alias_opcode_p (const aarch64_opcode *opcode)
1309 {
1310   return (opcode->flags & F_ALIAS) != 0;
1311 }
1312 
1313 static inline bool
opcode_has_alias(const aarch64_opcode * opcode)1314 opcode_has_alias (const aarch64_opcode *opcode)
1315 {
1316   return (opcode->flags & F_HAS_ALIAS) != 0;
1317 }
1318 
1319 /* Priority for disassembling preference.  */
1320 static inline int
opcode_priority(const aarch64_opcode * opcode)1321 opcode_priority (const aarch64_opcode *opcode)
1322 {
1323   return (opcode->flags >> 2) & 0x3;
1324 }
1325 
1326 static inline bool
pseudo_opcode_p(const aarch64_opcode * opcode)1327 pseudo_opcode_p (const aarch64_opcode *opcode)
1328 {
1329   return (opcode->flags & F_PSEUDO) != 0lu;
1330 }
1331 
1332 /* Deal with two possible scenarios: If F_OP_PAIR_OPT not set, as is the case
1333    by default, F_OPDn_OPT must equal IDX + 1, else F_OPDn_OPT must be in range
1334    [IDX, IDX + 1].  */
1335 static inline bool
optional_operand_p(const aarch64_opcode * opcode,unsigned int idx)1336 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx)
1337 {
1338   if (opcode->flags & F_OPD_PAIR_OPT)
1339     return (((opcode->flags >> 12) & 0x7) == idx
1340 	    || ((opcode->flags >> 12) & 0x7) == idx + 1);
1341   return ((opcode->flags >> 12) & 0x7) == idx + 1;
1342 }
1343 
1344 static inline aarch64_insn
get_optional_operand_default_value(const aarch64_opcode * opcode)1345 get_optional_operand_default_value (const aarch64_opcode *opcode)
1346 {
1347   return (opcode->flags >> 15) & 0x1f;
1348 }
1349 
1350 static inline unsigned int
get_opcode_dependent_value(const aarch64_opcode * opcode)1351 get_opcode_dependent_value (const aarch64_opcode *opcode)
1352 {
1353   return (opcode->flags >> 24) & 0x7;
1354 }
1355 
1356 static inline bool
opcode_has_special_coder(const aarch64_opcode * opcode)1357 opcode_has_special_coder (const aarch64_opcode *opcode)
1358 {
1359   return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T
1360 	  | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND
1361 	  | F_OPD_SIZE | F_RCPC3_SIZE)) != 0;
1362 }
1363 
1364 struct aarch64_name_value_pair
1365 {
1366   const char *  name;
1367   aarch64_insn	value;
1368 };
1369 
1370 extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
1371 extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
1372 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4];
1373 extern const struct aarch64_name_value_pair aarch64_prfops [32];
1374 extern const struct aarch64_name_value_pair aarch64_hint_options [];
1375 
1376 #define AARCH64_MAX_SYSREG_NAME_LEN 32
1377 
1378 typedef struct
1379 {
1380   const char *  name;
1381   aarch64_insn	value;
1382   uint32_t	flags;
1383 
1384   /* A set of features, all of which are required for this system register to be
1385      available.  */
1386   aarch64_feature_set features;
1387 } aarch64_sys_reg;
1388 
1389 extern const aarch64_sys_reg aarch64_sys_regs [];
1390 extern const aarch64_sys_reg aarch64_pstatefields [];
1391 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
1392 extern bool aarch64_sys_reg_128bit_p (const uint32_t);
1393 extern bool aarch64_sys_reg_alias_p (const uint32_t);
1394 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set,
1395 					     const aarch64_sys_reg *);
1396 
1397 typedef struct
1398 {
1399   const char *name;
1400   uint32_t value;
1401   uint32_t flags ;
1402 
1403   /* A set of features, all of which are required for this system instruction to be
1404      available.  */
1405   aarch64_feature_set features;
1406 } aarch64_sys_ins_reg;
1407 
1408 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *);
1409 extern bool
1410 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set,
1411 				 const char *reg_name,
1412 				 uint32_t, const aarch64_feature_set *);
1413 
1414 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic [];
1415 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc [];
1416 extern const aarch64_sys_ins_reg aarch64_sys_regs_at [];
1417 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi [];
1418 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr [];
1419 
1420 /* Shift/extending operator kinds.
1421    N.B. order is important; keep aarch64_operand_modifiers synced.  */
1422 enum aarch64_modifier_kind
1423 {
1424   AARCH64_MOD_NONE,
1425   AARCH64_MOD_MSL,
1426   AARCH64_MOD_ROR,
1427   AARCH64_MOD_ASR,
1428   AARCH64_MOD_LSR,
1429   AARCH64_MOD_LSL,
1430   AARCH64_MOD_UXTB,
1431   AARCH64_MOD_UXTH,
1432   AARCH64_MOD_UXTW,
1433   AARCH64_MOD_UXTX,
1434   AARCH64_MOD_SXTB,
1435   AARCH64_MOD_SXTH,
1436   AARCH64_MOD_SXTW,
1437   AARCH64_MOD_SXTX,
1438   AARCH64_MOD_MUL,
1439   AARCH64_MOD_MUL_VL,
1440 };
1441 
1442 bool
1443 aarch64_extend_operator_p (enum aarch64_modifier_kind);
1444 
1445 enum aarch64_modifier_kind
1446 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *);
1447 /* Condition.  */
1448 
1449 typedef struct
1450 {
1451   /* A list of names with the first one as the disassembly preference;
1452      terminated by NULL if fewer than 3.  */
1453   const char *names[4];
1454   aarch64_insn value;
1455 } aarch64_cond;
1456 
1457 extern const aarch64_cond aarch64_conds[16];
1458 
1459 const aarch64_cond* get_cond_from_value (aarch64_insn value);
1460 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond);
1461 
1462 /* Information about a reference to part of ZA.  */
1463 struct aarch64_indexed_za
1464 {
1465   /* Which tile is being accessed.  Unused (and 0) for an index into ZA.  */
1466   int regno;
1467 
1468   struct
1469   {
1470     /* The 32-bit index register.  */
1471     int regno;
1472 
1473     /* The first (or only) immediate offset.  */
1474     int64_t imm;
1475 
1476     /* The last immediate offset minus the first immediate offset.
1477        Unlike the range size, this is guaranteed not to overflow
1478        when the end offset > the start offset.  */
1479     uint64_t countm1;
1480   } index;
1481 
1482   /* The vector group size, or 0 if none.  */
1483   unsigned group_size : 8;
1484 
1485   /* True if a tile access is vertical, false if it is horizontal.
1486      Unused (and 0) for an index into ZA.  */
1487   unsigned v : 1;
1488 };
1489 
1490 /* Information about a list of registers.  */
1491 struct aarch64_reglist
1492 {
1493   unsigned first_regno : 8;
1494   unsigned num_regs : 8;
1495   /* The difference between the nth and the n+1th register.  */
1496   unsigned stride : 8;
1497   /* 1 if it is a list of reg element.  */
1498   unsigned has_index : 1;
1499   /* Lane index; valid only when has_index is 1.  */
1500   int64_t index;
1501 };
1502 
1503 /* Structure representing an operand.  */
1504 
1505 struct aarch64_opnd_info
1506 {
1507   enum aarch64_opnd type;
1508   aarch64_opnd_qualifier_t qualifier;
1509   int idx;
1510 
1511   union
1512     {
1513       struct
1514 	{
1515 	  unsigned regno;
1516 	} reg;
1517       struct
1518 	{
1519 	  unsigned int regno;
1520 	  int64_t index;
1521 	} reglane;
1522       /* e.g. LVn.  */
1523       struct aarch64_reglist reglist;
1524       /* e.g. immediate or pc relative address offset.  */
1525       struct
1526 	{
1527 	  int64_t value;
1528 	  unsigned is_fp : 1;
1529 	} imm;
1530       /* e.g. address in STR (register offset).  */
1531       struct
1532 	{
1533 	  unsigned base_regno;
1534 	  struct
1535 	    {
1536 	      union
1537 		{
1538 		  int imm;
1539 		  unsigned regno;
1540 		};
1541 	      unsigned is_reg;
1542 	    } offset;
1543 	  unsigned pcrel : 1;		/* PC-relative.  */
1544 	  unsigned writeback : 1;
1545 	  unsigned preind : 1;		/* Pre-indexed.  */
1546 	  unsigned postind : 1;		/* Post-indexed.  */
1547 	} addr;
1548 
1549       struct
1550 	{
1551 	  /* The encoding of the system register.  */
1552 	  aarch64_insn value;
1553 
1554 	  /* The system register flags.  */
1555 	  uint32_t flags;
1556 	} sysreg;
1557 
1558       /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}]  */
1559       struct aarch64_indexed_za indexed_za;
1560 
1561       const aarch64_cond *cond;
1562       /* The encoding of the PSTATE field.  */
1563       aarch64_insn pstatefield;
1564       const aarch64_sys_ins_reg *sysins_op;
1565       const struct aarch64_name_value_pair *barrier;
1566       const struct aarch64_name_value_pair *hint_option;
1567       const struct aarch64_name_value_pair *prfop;
1568     };
1569 
1570   /* Operand shifter; in use when the operand is a register offset address,
1571      add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}.  */
1572   struct
1573     {
1574       enum aarch64_modifier_kind kind;
1575       unsigned operator_present: 1;	/* Only valid during encoding.  */
1576       /* Value of the 'S' field in ld/st reg offset; used only in decoding.  */
1577       unsigned amount_present: 1;
1578       int64_t amount;
1579     } shifter;
1580 
1581   unsigned skip:1;	/* Operand is not completed if there is a fixup needed
1582 			   to be done on it.  In some (but not all) of these
1583 			   cases, we need to tell libopcodes to skip the
1584 			   constraint checking and the encoding for this
1585 			   operand, so that the libopcodes can pick up the
1586 			   right opcode before the operand is fixed-up.  This
1587 			   flag should only be used during the
1588 			   assembling/encoding.  */
1589   unsigned present:1;	/* Whether this operand is present in the assembly
1590 			   line; not used during the disassembly.  */
1591 };
1592 
1593 typedef struct aarch64_opnd_info aarch64_opnd_info;
1594 
1595 /* Structure representing an instruction.
1596 
1597    It is used during both the assembling and disassembling.  The assembler
1598    fills an aarch64_inst after a successful parsing and then passes it to the
1599    encoding routine to do the encoding.  During the disassembling, the
1600    disassembler calls the decoding routine to decode a binary instruction; on a
1601    successful return, such a structure will be filled with information of the
1602    instruction; then the disassembler uses the information to print out the
1603    instruction.  */
1604 
1605 struct aarch64_inst
1606 {
1607   /* The value of the binary instruction.  */
1608   aarch64_insn value;
1609 
1610   /* Corresponding opcode entry.  */
1611   const aarch64_opcode *opcode;
1612 
1613   /* Condition for a truly conditional-executed instrutions, e.g. b.cond.  */
1614   const aarch64_cond *cond;
1615 
1616   /* Operands information.  */
1617   aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
1618 };
1619 
1620 /* Defining the HINT #imm values for the aarch64_hint_options.  */
1621 #define HINT_OPD_CSYNC	0x11
1622 #define HINT_OPD_DSYNC	0x13
1623 #define HINT_OPD_C	0x22
1624 #define HINT_OPD_J	0x24
1625 #define HINT_OPD_JC	0x26
1626 #define HINT_OPD_NULL	0x00
1627 
1628 
1629 /* Diagnosis related declaration and interface.  */
1630 
1631 /* Operand error kind enumerators.
1632 
1633    AARCH64_OPDE_RECOVERABLE
1634      Less severe error found during the parsing, very possibly because that
1635      GAS has picked up a wrong instruction template for the parsing.
1636 
1637    AARCH64_OPDE_A_SHOULD_FOLLOW_B
1638      The instruction forms (or is expected to form) part of a sequence,
1639      but the preceding instruction in the sequence wasn't the expected one.
1640      The message refers to two strings: the name of the current instruction,
1641      followed by the name of the expected preceding instruction.
1642 
1643    AARCH64_OPDE_EXPECTED_A_AFTER_B
1644      Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
1645      so that the current instruction is assumed to be the incorrect one:
1646      "since the previous instruction was B, the current one should be A".
1647 
1648    AARCH64_OPDE_SYNTAX_ERROR
1649      General syntax error; it can be either a user error, or simply because
1650      that GAS is trying a wrong instruction template.
1651 
1652    AARCH64_OPDE_FATAL_SYNTAX_ERROR
1653      Definitely a user syntax error.
1654 
1655    AARCH64_OPDE_INVALID_VARIANT
1656      No syntax error, but the operands are not a valid combination, e.g.
1657      FMOV D0,S0
1658 
1659    The following errors are only reported against an asm string that is
1660    syntactically valid and that has valid operand qualifiers.
1661 
1662    AARCH64_OPDE_INVALID_VG_SIZE
1663      Error about a "VGx<n>" modifier in a ZA index not having the
1664      correct <n>.  This error effectively forms a pair with
1665      AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number
1666      of vectors that an instruction operates on.  However, the "VGx<n>"
1667      modifier is optional, whereas a register list always has a known
1668      and explicit length.  It therefore seems better to place more
1669      importance on the register list length when selecting an opcode table
1670      entry.  This in turn means that having an incorrect register length
1671      should be more severe than having an incorrect "VGx<n>".
1672 
1673    AARCH64_OPDE_REG_LIST_LENGTH
1674      Error about a register list operand having an unexpected number of
1675      registers.  This error is low severity because there might be another
1676      opcode entry that supports the given number of registers.
1677 
1678    AARCH64_OPDE_REG_LIST_STRIDE
1679      Error about a register list operand having the correct number
1680      (and type) of registers, but an unexpected stride.  This error is
1681      more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies
1682      that the length is known to be correct.  However, it is lower than
1683      many other errors, since some instructions have forms that share
1684      the same number of registers but have different strides.
1685 
1686    AARCH64_OPDE_UNTIED_IMMS
1687      The asm failed to use the same immediate for a destination operand
1688      and a tied source operand.
1689 
1690    AARCH64_OPDE_UNTIED_OPERAND
1691      The asm failed to use the same register for a destination operand
1692      and a tied source operand.
1693 
1694    AARCH64_OPDE_OUT_OF_RANGE
1695      Error about some immediate value out of a valid range.
1696 
1697    AARCH64_OPDE_UNALIGNED
1698      Error about some immediate value not properly aligned (i.e. not being a
1699      multiple times of a certain value).
1700 
1701    AARCH64_OPDE_OTHER_ERROR
1702      Error of the highest severity and used for any severe issue that does not
1703      fall into any of the above categories.
1704 
1705    AARCH64_OPDE_INVALID_REGNO
1706      A register was syntactically valid and had the right type, but it was
1707      outside the range supported by the associated operand field.  This is
1708      a high severity error because there are currently no instructions that
1709      would accept the operands that precede the erroneous one (if any) and
1710      yet still accept a wider range of registers.
1711 
1712    AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
1713    AARCH64_OPDE_FATAL_SYNTAX_ERROR are only deteced by GAS while the
1714    AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
1715    only libopcodes has the information about the valid variants of each
1716    instruction.
1717 
1718    The enumerators have an increasing severity.  This is helpful when there are
1719    multiple instruction templates available for a given mnemonic name (e.g.
1720    FMOV); this mechanism will help choose the most suitable template from which
1721    the generated diagnostics can most closely describe the issues, if any.
1722 
1723    This enum needs to be kept up-to-date with operand_mismatch_kind_names
1724    in tc-aarch64.c.  */
1725 
1726 enum aarch64_operand_error_kind
1727 {
1728   AARCH64_OPDE_NIL,
1729   AARCH64_OPDE_RECOVERABLE,
1730   AARCH64_OPDE_A_SHOULD_FOLLOW_B,
1731   AARCH64_OPDE_EXPECTED_A_AFTER_B,
1732   AARCH64_OPDE_SYNTAX_ERROR,
1733   AARCH64_OPDE_FATAL_SYNTAX_ERROR,
1734   AARCH64_OPDE_INVALID_VARIANT,
1735   AARCH64_OPDE_INVALID_VG_SIZE,
1736   AARCH64_OPDE_REG_LIST_LENGTH,
1737   AARCH64_OPDE_REG_LIST_STRIDE,
1738   AARCH64_OPDE_UNTIED_IMMS,
1739   AARCH64_OPDE_UNTIED_OPERAND,
1740   AARCH64_OPDE_OUT_OF_RANGE,
1741   AARCH64_OPDE_UNALIGNED,
1742   AARCH64_OPDE_OTHER_ERROR,
1743   AARCH64_OPDE_INVALID_REGNO
1744 };
1745 
1746 /* N.B. GAS assumes that this structure work well with shallow copy.  */
1747 struct aarch64_operand_error
1748 {
1749   enum aarch64_operand_error_kind kind;
1750   int index;
1751   const char *error;
1752   /* Some data for extra information.  */
1753   union {
1754     int i;
1755     const char *s;
1756   } data[3];
1757   bool non_fatal;
1758 };
1759 
1760 /* AArch64 sequence structure used to track instructions with F_SCAN
1761    dependencies for both assembler and disassembler.  */
1762 struct aarch64_instr_sequence
1763 {
1764   /* The instructions in the sequence, starting with the one that
1765      caused it to be opened.  */
1766   aarch64_inst *instr;
1767   /* The number of instructions already in the sequence.  */
1768   int num_added_insns;
1769   /* The number of instructions allocated to the sequence.  */
1770   int num_allocated_insns;
1771 };
1772 
1773 /* Encoding entrypoint.  */
1774 
1775 extern bool
1776 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *,
1777 		       aarch64_insn *, aarch64_opnd_qualifier_t *,
1778 		       aarch64_operand_error *, aarch64_instr_sequence *);
1779 
1780 extern const aarch64_opcode *
1781 aarch64_replace_opcode (struct aarch64_inst *,
1782 			const aarch64_opcode *);
1783 
1784 /* Given the opcode enumerator OP, return the pointer to the corresponding
1785    opcode entry.  */
1786 
1787 extern const aarch64_opcode *
1788 aarch64_get_opcode (enum aarch64_op);
1789 
1790 /* An instance of this structure is passed to aarch64_print_operand, and
1791    the callback within this structure is used to apply styling to the
1792    disassembler output.  This structure encapsulates the callback and a
1793    state pointer.  */
1794 
1795 struct aarch64_styler
1796 {
1797   /* The callback used to apply styling.  Returns a string created from FMT
1798      and ARGS with STYLE applied to the string.  STYLER is a pointer back
1799      to this object so that the callback can access the state member.
1800 
1801      The string returned from this callback must remain valid until the
1802      call to aarch64_print_operand has completed.  */
1803   const char *(*apply_style) (struct aarch64_styler *styler,
1804 			      enum disassembler_style style,
1805 			      const char *fmt,
1806 			      va_list args);
1807 
1808   /* A pointer to a state object which can be used by the apply_style
1809      callback function.  */
1810   void *state;
1811 };
1812 
1813 /* Generate the string representation of an operand.  */
1814 extern void
1815 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
1816 		       const aarch64_opnd_info *, int, int *, bfd_vma *,
1817 		       char **, char *, size_t,
1818 		       aarch64_feature_set features,
1819 		       struct aarch64_styler *styler);
1820 
1821 /* Miscellaneous interface.  */
1822 
1823 extern int
1824 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd);
1825 
1826 extern aarch64_opnd_qualifier_t
1827 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
1828 				const aarch64_opnd_qualifier_t, int);
1829 
1830 extern bool
1831 aarch64_is_destructive_by_operands (const aarch64_opcode *);
1832 
1833 extern int
1834 aarch64_num_of_operands (const aarch64_opcode *);
1835 
1836 extern int
1837 aarch64_stack_pointer_p (const aarch64_opnd_info *);
1838 
1839 extern int
1840 aarch64_zero_register_p (const aarch64_opnd_info *);
1841 
1842 extern enum err_type
1843 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool,
1844 		     aarch64_operand_error *);
1845 
1846 extern void
1847 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
1848 
1849 /* Given an operand qualifier, return the expected data element size
1850    of a qualified operand.  */
1851 extern unsigned char
1852 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t);
1853 
1854 extern enum aarch64_operand_class
1855 aarch64_get_operand_class (enum aarch64_opnd);
1856 
1857 extern const char *
1858 aarch64_get_operand_name (enum aarch64_opnd);
1859 
1860 extern const char *
1861 aarch64_get_operand_desc (enum aarch64_opnd);
1862 
1863 extern bool
1864 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
1865 
1866 extern bool
1867 aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *);
1868 
1869 extern int
1870 calc_ldst_datasize (const aarch64_opnd_info *opnds);
1871 
1872 #ifdef DEBUG_AARCH64
1873 extern int debug_dump;
1874 
1875 extern void
1876 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2)));
1877 
1878 #define DEBUG_TRACE(M, ...)					\
1879   {								\
1880     if (debug_dump)						\
1881       aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__);	\
1882   }
1883 
1884 #define DEBUG_TRACE_IF(C, M, ...)				\
1885   {								\
1886     if (debug_dump && (C))					\
1887       aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__);	\
1888   }
1889 #else  /* !DEBUG_AARCH64 */
1890 #define DEBUG_TRACE(M, ...) ;
1891 #define DEBUG_TRACE_IF(C, M, ...) ;
1892 #endif /* DEBUG_AARCH64 */
1893 
1894 extern const char *const aarch64_sve_pattern_array[32];
1895 extern const char *const aarch64_sve_prfop_array[16];
1896 extern const char *const aarch64_rprfmop_array[64];
1897 extern const char *const aarch64_sme_vlxn_array[2];
1898 
1899 #ifdef __cplusplus
1900 }
1901 #endif
1902 
1903 #endif /* OPCODE_AARCH64_H */
1904