xref: /netbsd-src/external/gpl3/binutils.old/dist/include/opcode/aarch64.h (revision e992f068c547fd6e84b3f104dc2340adcc955732)
1 /* AArch64 assembler/disassembler support.
2 
3    Copyright (C) 2009-2022 Free Software Foundation, Inc.
4    Contributed by ARM Ltd.
5 
6    This file is part of GNU Binutils.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the license, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program; see the file COPYING3. If not,
20    see <http://www.gnu.org/licenses/>.  */
21 
22 #ifndef OPCODE_AARCH64_H
23 #define OPCODE_AARCH64_H
24 
25 #include "bfd.h"
26 #include <stdint.h>
27 #include <assert.h>
28 #include <stdlib.h>
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 /* The offset for pc-relative addressing is currently defined to be 0.  */
35 #define AARCH64_PCREL_OFFSET		0
36 
37 typedef uint32_t aarch64_insn;
38 
39 /* The following bitmasks control CPU features.  */
40 #define AARCH64_FEATURE_V8	     (1ULL << 0) /* All processors.  */
41 #define AARCH64_FEATURE_V8_6	     (1ULL << 1) /* ARMv8.6 processors.  */
42 #define AARCH64_FEATURE_BFLOAT16     (1ULL << 2) /* Bfloat16 insns.  */
43 #define AARCH64_FEATURE_V8_A	     (1ULL << 3) /* Armv8-A processors.  */
44 #define AARCH64_FEATURE_SVE2	     (1ULL << 4) /* SVE2 instructions.  */
45 #define AARCH64_FEATURE_V8_2	     (1ULL << 5) /* ARMv8.2 processors.  */
46 #define AARCH64_FEATURE_V8_3	     (1ULL << 6) /* ARMv8.3 processors.  */
47 #define AARCH64_FEATURE_SVE2_AES     (1ULL << 7)
48 #define AARCH64_FEATURE_SVE2_BITPERM (1ULL << 8)
49 #define AARCH64_FEATURE_SVE2_SM4     (1ULL << 9)
50 #define AARCH64_FEATURE_SVE2_SHA3    (1ULL << 10)
51 #define AARCH64_FEATURE_V8_4	     (1ULL << 11) /* ARMv8.4 processors.  */
52 #define AARCH64_FEATURE_V8_R	     (1ULL << 12) /* Armv8-R processors.  */
53 #define AARCH64_FEATURE_V8_7	     (1ULL << 13) /* Armv8.7 processors.  */
54 #define AARCH64_FEATURE_SME	     (1ULL << 14) /* Scalable Matrix Extension.  */
55 #define AARCH64_FEATURE_LS64	     (1ULL << 15) /* Atomic 64-byte load/store.  */
56 #define AARCH64_FEATURE_PAC	     (1ULL << 16) /* v8.3 Pointer Authentication.  */
57 #define AARCH64_FEATURE_FP	     (1ULL << 17) /* FP instructions.  */
58 #define AARCH64_FEATURE_SIMD	     (1ULL << 18) /* SIMD instructions.  */
59 #define AARCH64_FEATURE_CRC	     (1ULL << 19) /* CRC instructions.  */
60 #define AARCH64_FEATURE_LSE	     (1ULL << 20) /* LSE instructions.  */
61 #define AARCH64_FEATURE_PAN	     (1ULL << 21) /* PAN instructions.  */
62 #define AARCH64_FEATURE_LOR	     (1ULL << 22) /* LOR instructions.  */
63 #define AARCH64_FEATURE_RDMA	     (1ULL << 23) /* v8.1 SIMD instructions.  */
64 #define AARCH64_FEATURE_V8_1	     (1ULL << 24) /* v8.1 features.  */
65 #define AARCH64_FEATURE_F16	     (1ULL << 25) /* v8.2 FP16 instructions.  */
66 #define AARCH64_FEATURE_RAS	     (1ULL << 26) /* RAS Extensions.  */
67 #define AARCH64_FEATURE_PROFILE      (1ULL << 27) /* Statistical Profiling.  */
68 #define AARCH64_FEATURE_SVE	     (1ULL << 28) /* SVE instructions.  */
69 #define AARCH64_FEATURE_RCPC	     (1ULL << 29) /* RCPC instructions.  */
70 #define AARCH64_FEATURE_COMPNUM      (1ULL << 30) /* Complex # instructions.  */
71 #define AARCH64_FEATURE_DOTPROD      (1ULL << 31) /* Dot Product instructions.  */
72 #define AARCH64_FEATURE_SM4	     (1ULL << 32) /* SM3 & SM4 instructions.  */
73 #define AARCH64_FEATURE_SHA2	     (1ULL << 33) /* SHA2 instructions.  */
74 #define AARCH64_FEATURE_SHA3	     (1ULL << 34) /* SHA3 instructions.  */
75 #define AARCH64_FEATURE_AES	     (1ULL << 35) /* AES instructions.  */
76 #define AARCH64_FEATURE_F16_FML      (1ULL << 36) /* v8.2 FP16FML ins.  */
77 #define AARCH64_FEATURE_V8_5	     (1ULL << 37) /* ARMv8.5 processors.  */
78 #define AARCH64_FEATURE_FLAGMANIP    (1ULL << 38) /* v8.5 Flag Manipulation version 2.  */
79 #define AARCH64_FEATURE_FRINTTS      (1ULL << 39) /* FRINT[32,64][Z,X] insns.  */
80 #define AARCH64_FEATURE_SB	     (1ULL << 40) /* SB instruction.  */
81 #define AARCH64_FEATURE_PREDRES      (1ULL << 41) /* Execution and Data Prediction Restriction instructions.  */
82 #define AARCH64_FEATURE_CVADP	     (1ULL << 42) /* DC CVADP.  */
83 #define AARCH64_FEATURE_RNG	     (1ULL << 43) /* Random Number instructions.  */
84 #define AARCH64_FEATURE_BTI	     (1ULL << 44) /* BTI instructions.  */
85 #define AARCH64_FEATURE_SCXTNUM      (1ULL << 45) /* SCXTNUM_ELx.  */
86 #define AARCH64_FEATURE_ID_PFR2      (1ULL << 46) /* ID_PFR2 instructions.  */
87 #define AARCH64_FEATURE_SSBS	     (1ULL << 47) /* SSBS mechanism enabled.  */
88 #define AARCH64_FEATURE_MEMTAG       (1ULL << 48) /* Memory Tagging Extension.  */
89 #define AARCH64_FEATURE_TME	     (1ULL << 49) /* Transactional Memory Extension.  */
90 #define AARCH64_FEATURE_MOPS	     (1ULL << 50) /* Standardization of memory operations.  */
91 #define AARCH64_FEATURE_HBC	     (1ULL << 51) /* Hinted conditional branches.  */
92 #define AARCH64_FEATURE_I8MM	     (1ULL << 52) /* Matrix Multiply instructions.  */
93 #define AARCH64_FEATURE_F32MM	     (1ULL << 53)
94 #define AARCH64_FEATURE_F64MM	     (1ULL << 54)
95 #define AARCH64_FEATURE_FLAGM	     (1ULL << 55) /* v8.4 Flag Manipulation.  */
96 #define AARCH64_FEATURE_V9	     (1ULL << 56) /* Armv9.0-A processors.  */
97 #define AARCH64_FEATURE_SME_F64	     (1ULL << 57) /* SME F64.  */
98 #define AARCH64_FEATURE_SME_I64	     (1ULL << 58) /* SME I64.  */
99 #define AARCH64_FEATURE_V8_8	     (1ULL << 59) /* Armv8.8 processors.  */
100 
101 /* Crypto instructions are the combination of AES and SHA2.  */
102 #define AARCH64_FEATURE_CRYPTO	(AARCH64_FEATURE_SHA2 | AARCH64_FEATURE_AES)
103 
104 #define AARCH64_ARCH_V8_FEATURES	(AARCH64_FEATURE_V8_A		\
105 					 | AARCH64_FEATURE_FP		\
106 					 | AARCH64_FEATURE_RAS		\
107 					 | AARCH64_FEATURE_SIMD)
108 #define AARCH64_ARCH_V8_1_FEATURES	(AARCH64_FEATURE_V8_1		\
109 					 | AARCH64_FEATURE_CRC		\
110 					 | AARCH64_FEATURE_LSE		\
111 					 | AARCH64_FEATURE_PAN		\
112 					 | AARCH64_FEATURE_LOR		\
113 					 | AARCH64_FEATURE_RDMA)
114 #define AARCH64_ARCH_V8_2_FEATURES	(AARCH64_FEATURE_V8_2)
115 #define AARCH64_ARCH_V8_3_FEATURES	(AARCH64_FEATURE_V8_3		\
116 					 | AARCH64_FEATURE_PAC		\
117 					 | AARCH64_FEATURE_RCPC		\
118 					 | AARCH64_FEATURE_COMPNUM)
119 #define AARCH64_ARCH_V8_4_FEATURES	(AARCH64_FEATURE_V8_4		\
120 					 | AARCH64_FEATURE_DOTPROD	\
121 					 | AARCH64_FEATURE_FLAGM	\
122 					 | AARCH64_FEATURE_F16_FML)
123 #define AARCH64_ARCH_V8_5_FEATURES	(AARCH64_FEATURE_V8_5		\
124 					 | AARCH64_FEATURE_FLAGMANIP	\
125 					 | AARCH64_FEATURE_FRINTTS	\
126 					 | AARCH64_FEATURE_SB		\
127 					 | AARCH64_FEATURE_PREDRES	\
128 					 | AARCH64_FEATURE_CVADP	\
129 					 | AARCH64_FEATURE_BTI		\
130 					 | AARCH64_FEATURE_SCXTNUM	\
131 					 | AARCH64_FEATURE_ID_PFR2	\
132 					 | AARCH64_FEATURE_SSBS)
133 #define AARCH64_ARCH_V8_6_FEATURES	(AARCH64_FEATURE_V8_6		\
134 					 | AARCH64_FEATURE_BFLOAT16	\
135 					 | AARCH64_FEATURE_I8MM)
136 #define AARCH64_ARCH_V8_7_FEATURES	(AARCH64_FEATURE_V8_7		\
137 					 | AARCH64_FEATURE_LS64)
138 #define AARCH64_ARCH_V8_8_FEATURES	(AARCH64_FEATURE_V8_8		\
139 					 | AARCH64_FEATURE_MOPS		\
140 					 | AARCH64_FEATURE_HBC)
141 
142 #define AARCH64_ARCH_V9_FEATURES	(AARCH64_FEATURE_V9		\
143 					 | AARCH64_FEATURE_F16          \
144 					 | AARCH64_FEATURE_SVE		\
145 					 | AARCH64_FEATURE_SVE2)
146 #define AARCH64_ARCH_V9_1_FEATURES	(AARCH64_ARCH_V8_6_FEATURES)
147 #define AARCH64_ARCH_V9_2_FEATURES	(AARCH64_ARCH_V8_7_FEATURES)
148 #define AARCH64_ARCH_V9_3_FEATURES	(AARCH64_ARCH_V8_8_FEATURES)
149 
150 /* Architectures are the sum of the base and extensions.  */
151 #define AARCH64_ARCH_V8		AARCH64_FEATURE (AARCH64_FEATURE_V8, \
152 						 AARCH64_ARCH_V8_FEATURES)
153 #define AARCH64_ARCH_V8_1	AARCH64_FEATURE (AARCH64_ARCH_V8, \
154 						 AARCH64_ARCH_V8_1_FEATURES)
155 #define AARCH64_ARCH_V8_2	AARCH64_FEATURE (AARCH64_ARCH_V8_1,	\
156 						 AARCH64_ARCH_V8_2_FEATURES)
157 #define AARCH64_ARCH_V8_3	AARCH64_FEATURE (AARCH64_ARCH_V8_2,	\
158 						 AARCH64_ARCH_V8_3_FEATURES)
159 #define AARCH64_ARCH_V8_4	AARCH64_FEATURE (AARCH64_ARCH_V8_3,	\
160 						 AARCH64_ARCH_V8_4_FEATURES)
161 #define AARCH64_ARCH_V8_5	AARCH64_FEATURE (AARCH64_ARCH_V8_4,	\
162 						 AARCH64_ARCH_V8_5_FEATURES)
163 #define AARCH64_ARCH_V8_6	AARCH64_FEATURE (AARCH64_ARCH_V8_5,	\
164 						 AARCH64_ARCH_V8_6_FEATURES)
165 #define AARCH64_ARCH_V8_7	AARCH64_FEATURE (AARCH64_ARCH_V8_6,	\
166 						 AARCH64_ARCH_V8_7_FEATURES)
167 #define AARCH64_ARCH_V8_8	AARCH64_FEATURE (AARCH64_ARCH_V8_7,	\
168 						 AARCH64_ARCH_V8_8_FEATURES)
169 #define AARCH64_ARCH_V8_R	(AARCH64_FEATURE (AARCH64_ARCH_V8_4,	\
170 						 AARCH64_FEATURE_V8_R)	\
171 			      & ~(AARCH64_FEATURE_V8_A | AARCH64_FEATURE_LOR))
172 
173 #define AARCH64_ARCH_V9		AARCH64_FEATURE (AARCH64_ARCH_V8_5,	\
174 						 AARCH64_ARCH_V9_FEATURES)
175 #define AARCH64_ARCH_V9_1	AARCH64_FEATURE (AARCH64_ARCH_V9,	\
176 						 AARCH64_ARCH_V9_1_FEATURES)
177 #define AARCH64_ARCH_V9_2	AARCH64_FEATURE (AARCH64_ARCH_V9_1,	\
178 						 AARCH64_ARCH_V9_2_FEATURES)
179 #define AARCH64_ARCH_V9_3	AARCH64_FEATURE (AARCH64_ARCH_V9_2,	\
180 						 AARCH64_ARCH_V9_3_FEATURES)
181 
182 #define AARCH64_ARCH_NONE	AARCH64_FEATURE (0, 0)
183 #define AARCH64_ANY		AARCH64_FEATURE (-1, 0)	/* Any basic core.  */
184 
185 /* CPU-specific features.  */
186 typedef unsigned long long aarch64_feature_set;
187 
188 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT)	\
189   ((~(CPU) & (FEAT)) == 0)
190 
191 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT)	\
192   (((CPU) & (FEAT)) != 0)
193 
194 #define AARCH64_CPU_HAS_FEATURE(CPU,FEAT)	\
195   AARCH64_CPU_HAS_ALL_FEATURES (CPU,FEAT)
196 
197 #define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2)	\
198   do						\
199     {						\
200       (TARG) = (F1) | (F2);			\
201     }						\
202   while (0)
203 
204 #define AARCH64_CLEAR_FEATURE(TARG,F1,F2)	\
205   do						\
206     { 						\
207       (TARG) = (F1) &~ (F2);			\
208     }						\
209   while (0)
210 
211 #define AARCH64_FEATURE(core,coproc) ((core) | (coproc))
212 
213 enum aarch64_operand_class
214 {
215   AARCH64_OPND_CLASS_NIL,
216   AARCH64_OPND_CLASS_INT_REG,
217   AARCH64_OPND_CLASS_MODIFIED_REG,
218   AARCH64_OPND_CLASS_FP_REG,
219   AARCH64_OPND_CLASS_SIMD_REG,
220   AARCH64_OPND_CLASS_SIMD_ELEMENT,
221   AARCH64_OPND_CLASS_SISD_REG,
222   AARCH64_OPND_CLASS_SIMD_REGLIST,
223   AARCH64_OPND_CLASS_SVE_REG,
224   AARCH64_OPND_CLASS_PRED_REG,
225   AARCH64_OPND_CLASS_ADDRESS,
226   AARCH64_OPND_CLASS_IMMEDIATE,
227   AARCH64_OPND_CLASS_SYSTEM,
228   AARCH64_OPND_CLASS_COND,
229 };
230 
231 /* Operand code that helps both parsing and coding.
232    Keep AARCH64_OPERANDS synced.  */
233 
234 enum aarch64_opnd
235 {
236   AARCH64_OPND_NIL,	/* no operand---MUST BE FIRST!*/
237 
238   AARCH64_OPND_Rd,	/* Integer register as destination.  */
239   AARCH64_OPND_Rn,	/* Integer register as source.  */
240   AARCH64_OPND_Rm,	/* Integer register as source.  */
241   AARCH64_OPND_Rt,	/* Integer register used in ld/st instructions.  */
242   AARCH64_OPND_Rt2,	/* Integer register used in ld/st pair instructions.  */
243   AARCH64_OPND_Rt_LS64,	/* Integer register used in LS64 instructions.  */
244   AARCH64_OPND_Rt_SP,	/* Integer Rt or SP used in STG instructions.  */
245   AARCH64_OPND_Rs,	/* Integer register used in ld/st exclusive.  */
246   AARCH64_OPND_Ra,	/* Integer register used in ddp_3src instructions.  */
247   AARCH64_OPND_Rt_SYS,	/* Integer register used in system instructions.  */
248 
249   AARCH64_OPND_Rd_SP,	/* Integer Rd or SP.  */
250   AARCH64_OPND_Rn_SP,	/* Integer Rn or SP.  */
251   AARCH64_OPND_Rm_SP,	/* Integer Rm or SP.  */
252   AARCH64_OPND_PAIRREG,	/* Paired register operand.  */
253   AARCH64_OPND_Rm_EXT,	/* Integer Rm extended.  */
254   AARCH64_OPND_Rm_SFT,	/* Integer Rm shifted.  */
255 
256   AARCH64_OPND_Fd,	/* Floating-point Fd.  */
257   AARCH64_OPND_Fn,	/* Floating-point Fn.  */
258   AARCH64_OPND_Fm,	/* Floating-point Fm.  */
259   AARCH64_OPND_Fa,	/* Floating-point Fa.  */
260   AARCH64_OPND_Ft,	/* Floating-point Ft.  */
261   AARCH64_OPND_Ft2,	/* Floating-point Ft2.  */
262 
263   AARCH64_OPND_Sd,	/* AdvSIMD Scalar Sd.  */
264   AARCH64_OPND_Sn,	/* AdvSIMD Scalar Sn.  */
265   AARCH64_OPND_Sm,	/* AdvSIMD Scalar Sm.  */
266 
267   AARCH64_OPND_Va,	/* AdvSIMD Vector Va.  */
268   AARCH64_OPND_Vd,	/* AdvSIMD Vector Vd.  */
269   AARCH64_OPND_Vn,	/* AdvSIMD Vector Vn.  */
270   AARCH64_OPND_Vm,	/* AdvSIMD Vector Vm.  */
271   AARCH64_OPND_VdD1,	/* AdvSIMD <Vd>.D[1]; for FMOV only.  */
272   AARCH64_OPND_VnD1,	/* AdvSIMD <Vn>.D[1]; for FMOV only.  */
273   AARCH64_OPND_Ed,	/* AdvSIMD Vector Element Vd.  */
274   AARCH64_OPND_En,	/* AdvSIMD Vector Element Vn.  */
275   AARCH64_OPND_Em,	/* AdvSIMD Vector Element Vm.  */
276   AARCH64_OPND_Em16,	/* AdvSIMD Vector Element Vm restricted to V0 - V15 when
277 			   qualifier is S_H.  */
278   AARCH64_OPND_LVn,	/* AdvSIMD Vector register list used in e.g. TBL.  */
279   AARCH64_OPND_LVt,	/* AdvSIMD Vector register list used in ld/st.  */
280   AARCH64_OPND_LVt_AL,	/* AdvSIMD Vector register list for loading single
281 			   structure to all lanes.  */
282   AARCH64_OPND_LEt,	/* AdvSIMD Vector Element list.  */
283 
284   AARCH64_OPND_CRn,	/* Co-processor register in CRn field.  */
285   AARCH64_OPND_CRm,	/* Co-processor register in CRm field.  */
286 
287   AARCH64_OPND_IDX,	/* AdvSIMD EXT index operand.  */
288   AARCH64_OPND_MASK,	/* AdvSIMD EXT index operand.  */
289   AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left.  */
290   AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right.  */
291   AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift.  */
292   AARCH64_OPND_SIMD_IMM_SFT,	/* AdvSIMD modified immediate with shift.  */
293   AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate.  */
294   AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction
295 			   (no encoding).  */
296   AARCH64_OPND_IMM0,	/* Immediate for #0.  */
297   AARCH64_OPND_FPIMM0,	/* Immediate for #0.0.  */
298   AARCH64_OPND_FPIMM,	/* Floating-point Immediate.  */
299   AARCH64_OPND_IMMR,	/* Immediate #<immr> in e.g. BFM.  */
300   AARCH64_OPND_IMMS,	/* Immediate #<imms> in e.g. BFM.  */
301   AARCH64_OPND_WIDTH,	/* Immediate #<width> in e.g. BFI.  */
302   AARCH64_OPND_IMM,	/* Immediate.  */
303   AARCH64_OPND_IMM_2,	/* Immediate.  */
304   AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field.  */
305   AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field.  */
306   AARCH64_OPND_UIMM4,	/* Unsigned 4-bit immediate in the CRm field.  */
307   AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg.  */
308   AARCH64_OPND_UIMM7,	/* Unsigned 7-bit immediate in the CRm:op2 fields.  */
309   AARCH64_OPND_UIMM10,	/* Unsigned 10-bit immediate in addg/subg.  */
310   AARCH64_OPND_BIT_NUM,	/* Immediate.  */
311   AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions.  */
312   AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */
313   AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions.  */
314   AARCH64_OPND_SIMM5,	/* 5-bit signed immediate in the imm5 field.  */
315   AARCH64_OPND_NZCV,	/* Flag bit specifier giving an alternative value for
316 			   each condition flag.  */
317 
318   AARCH64_OPND_LIMM,	/* Logical Immediate.  */
319   AARCH64_OPND_AIMM,	/* Arithmetic immediate.  */
320   AARCH64_OPND_HALF,	/* #<imm16>{, LSL #<shift>} operand in move wide.  */
321   AARCH64_OPND_FBITS,	/* FP #<fbits> operand in e.g. SCVTF */
322   AARCH64_OPND_IMM_MOV,	/* Immediate operand for the MOV alias.  */
323   AARCH64_OPND_IMM_ROT1,	/* Immediate rotate operand for FCMLA.  */
324   AARCH64_OPND_IMM_ROT2,	/* Immediate rotate operand for indexed FCMLA.  */
325   AARCH64_OPND_IMM_ROT3,	/* Immediate rotate operand for FCADD.  */
326 
327   AARCH64_OPND_COND,	/* Standard condition as the last operand.  */
328   AARCH64_OPND_COND1,	/* Same as the above, but excluding AL and NV.  */
329 
330   AARCH64_OPND_ADDR_ADRP,	/* Memory address for ADRP */
331   AARCH64_OPND_ADDR_PCREL14,	/* 14-bit PC-relative address for e.g. TBZ.  */
332   AARCH64_OPND_ADDR_PCREL19,	/* 19-bit PC-relative address for e.g. LDR.  */
333   AARCH64_OPND_ADDR_PCREL21,	/* 21-bit PC-relative address for e.g. ADR.  */
334   AARCH64_OPND_ADDR_PCREL26,	/* 26-bit PC-relative address for e.g. BL.  */
335 
336   AARCH64_OPND_ADDR_SIMPLE,	/* Address of ld/st exclusive.  */
337   AARCH64_OPND_ADDR_REGOFF,	/* Address of register offset.  */
338   AARCH64_OPND_ADDR_SIMM7,	/* Address of signed 7-bit immediate.  */
339   AARCH64_OPND_ADDR_SIMM9,	/* Address of signed 9-bit immediate.  */
340   AARCH64_OPND_ADDR_SIMM9_2,	/* Same as the above, but the immediate is
341 				   negative or unaligned and there is
342 				   no writeback allowed.  This operand code
343 				   is only used to support the programmer-
344 				   friendly feature of using LDR/STR as the
345 				   the mnemonic name for LDUR/STUR instructions
346 				   wherever there is no ambiguity.  */
347   AARCH64_OPND_ADDR_SIMM10,	/* Address of signed 10-bit immediate.  */
348   AARCH64_OPND_ADDR_SIMM11,	/* Address with a signed 11-bit (multiple of
349 				   16) immediate.  */
350   AARCH64_OPND_ADDR_UIMM12,	/* Address of unsigned 12-bit immediate.  */
351   AARCH64_OPND_ADDR_SIMM13,	/* Address with a signed 13-bit (multiple of
352 				   16) immediate.  */
353   AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures.  */
354   AARCH64_OPND_ADDR_OFFSET,     /* Address with an optional 9-bit immediate.  */
355   AARCH64_OPND_SIMD_ADDR_POST,	/* Address of ld/st multiple post-indexed.  */
356 
357   AARCH64_OPND_SYSREG,		/* System register operand.  */
358   AARCH64_OPND_PSTATEFIELD,	/* PSTATE field name operand.  */
359   AARCH64_OPND_SYSREG_AT,	/* System register <at_op> operand.  */
360   AARCH64_OPND_SYSREG_DC,	/* System register <dc_op> operand.  */
361   AARCH64_OPND_SYSREG_IC,	/* System register <ic_op> operand.  */
362   AARCH64_OPND_SYSREG_TLBI,	/* System register <tlbi_op> operand.  */
363   AARCH64_OPND_SYSREG_SR,	/* System register RCTX operand.  */
364   AARCH64_OPND_BARRIER,		/* Barrier operand.  */
365   AARCH64_OPND_BARRIER_DSB_NXS,	/* Barrier operand for DSB nXS variant.  */
366   AARCH64_OPND_BARRIER_ISB,	/* Barrier operand for ISB.  */
367   AARCH64_OPND_PRFOP,		/* Prefetch operation.  */
368   AARCH64_OPND_BARRIER_PSB,	/* Barrier operand for PSB.  */
369   AARCH64_OPND_BTI_TARGET,	/* BTI {<target>}.  */
370   AARCH64_OPND_SVE_ADDR_RI_S4x16,   /* SVE [<Xn|SP>, #<simm4>*16].  */
371   AARCH64_OPND_SVE_ADDR_RI_S4x32,   /* SVE [<Xn|SP>, #<simm4>*32].  */
372   AARCH64_OPND_SVE_ADDR_RI_S4xVL,   /* SVE [<Xn|SP>, #<simm4>, MUL VL].  */
373   AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL].  */
374   AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL].  */
375   AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL].  */
376   AARCH64_OPND_SVE_ADDR_RI_S6xVL,   /* SVE [<Xn|SP>, #<simm6>, MUL VL].  */
377   AARCH64_OPND_SVE_ADDR_RI_S9xVL,   /* SVE [<Xn|SP>, #<simm9>, MUL VL].  */
378   AARCH64_OPND_SVE_ADDR_RI_U6,	    /* SVE [<Xn|SP>, #<uimm6>].  */
379   AARCH64_OPND_SVE_ADDR_RI_U6x2,    /* SVE [<Xn|SP>, #<uimm6>*2].  */
380   AARCH64_OPND_SVE_ADDR_RI_U6x4,    /* SVE [<Xn|SP>, #<uimm6>*4].  */
381   AARCH64_OPND_SVE_ADDR_RI_U6x8,    /* SVE [<Xn|SP>, #<uimm6>*8].  */
382   AARCH64_OPND_SVE_ADDR_R,	    /* SVE [<Xn|SP>].  */
383   AARCH64_OPND_SVE_ADDR_RR,	    /* SVE [<Xn|SP>, <Xm|XZR>].  */
384   AARCH64_OPND_SVE_ADDR_RR_LSL1,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1].  */
385   AARCH64_OPND_SVE_ADDR_RR_LSL2,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2].  */
386   AARCH64_OPND_SVE_ADDR_RR_LSL3,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3].  */
387   AARCH64_OPND_SVE_ADDR_RR_LSL4,    /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4].  */
388   AARCH64_OPND_SVE_ADDR_RX,	    /* SVE [<Xn|SP>, <Xm>].  */
389   AARCH64_OPND_SVE_ADDR_RX_LSL1,    /* SVE [<Xn|SP>, <Xm>, LSL #1].  */
390   AARCH64_OPND_SVE_ADDR_RX_LSL2,    /* SVE [<Xn|SP>, <Xm>, LSL #2].  */
391   AARCH64_OPND_SVE_ADDR_RX_LSL3,    /* SVE [<Xn|SP>, <Xm>, LSL #3].  */
392   AARCH64_OPND_SVE_ADDR_ZX,	    /* SVE [Zn.<T>{, <Xm>}].  */
393   AARCH64_OPND_SVE_ADDR_RZ,	    /* SVE [<Xn|SP>, Zm.D].  */
394   AARCH64_OPND_SVE_ADDR_RZ_LSL1,    /* SVE [<Xn|SP>, Zm.D, LSL #1].  */
395   AARCH64_OPND_SVE_ADDR_RZ_LSL2,    /* SVE [<Xn|SP>, Zm.D, LSL #2].  */
396   AARCH64_OPND_SVE_ADDR_RZ_LSL3,    /* SVE [<Xn|SP>, Zm.D, LSL #3].  */
397   AARCH64_OPND_SVE_ADDR_RZ_XTW_14,  /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
398 				       Bit 14 controls S/U choice.  */
399   AARCH64_OPND_SVE_ADDR_RZ_XTW_22,  /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
400 				       Bit 22 controls S/U choice.  */
401   AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
402 				       Bit 14 controls S/U choice.  */
403   AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
404 				       Bit 22 controls S/U choice.  */
405   AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
406 				       Bit 14 controls S/U choice.  */
407   AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
408 				       Bit 22 controls S/U choice.  */
409   AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
410 				       Bit 14 controls S/U choice.  */
411   AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
412 				       Bit 22 controls S/U choice.  */
413   AARCH64_OPND_SVE_ADDR_ZI_U5,	    /* SVE [Zn.<T>, #<uimm5>].  */
414   AARCH64_OPND_SVE_ADDR_ZI_U5x2,    /* SVE [Zn.<T>, #<uimm5>*2].  */
415   AARCH64_OPND_SVE_ADDR_ZI_U5x4,    /* SVE [Zn.<T>, #<uimm5>*4].  */
416   AARCH64_OPND_SVE_ADDR_ZI_U5x8,    /* SVE [Zn.<T>, #<uimm5>*8].  */
417   AARCH64_OPND_SVE_ADDR_ZZ_LSL,     /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>].  */
418   AARCH64_OPND_SVE_ADDR_ZZ_SXTW,    /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>].  */
419   AARCH64_OPND_SVE_ADDR_ZZ_UXTW,    /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>].  */
420   AARCH64_OPND_SVE_AIMM,	/* SVE unsigned arithmetic immediate.  */
421   AARCH64_OPND_SVE_ASIMM,	/* SVE signed arithmetic immediate.  */
422   AARCH64_OPND_SVE_FPIMM8,	/* SVE 8-bit floating-point immediate.  */
423   AARCH64_OPND_SVE_I1_HALF_ONE,	/* SVE choice between 0.5 and 1.0.  */
424   AARCH64_OPND_SVE_I1_HALF_TWO,	/* SVE choice between 0.5 and 2.0.  */
425   AARCH64_OPND_SVE_I1_ZERO_ONE,	/* SVE choice between 0.0 and 1.0.  */
426   AARCH64_OPND_SVE_IMM_ROT1,	/* SVE 1-bit rotate operand (90 or 270).  */
427   AARCH64_OPND_SVE_IMM_ROT2,	/* SVE 2-bit rotate operand (N*90).  */
428   AARCH64_OPND_SVE_IMM_ROT3,	/* SVE cadd 1-bit rotate (90 or 270).  */
429   AARCH64_OPND_SVE_INV_LIMM,	/* SVE inverted logical immediate.  */
430   AARCH64_OPND_SVE_LIMM,	/* SVE logical immediate.  */
431   AARCH64_OPND_SVE_LIMM_MOV,	/* SVE logical immediate for MOV.  */
432   AARCH64_OPND_SVE_PATTERN,	/* SVE vector pattern enumeration.  */
433   AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor.  */
434   AARCH64_OPND_SVE_PRFOP,	/* SVE prefetch operation.  */
435   AARCH64_OPND_SVE_Pd,		/* SVE p0-p15 in Pd.  */
436   AARCH64_OPND_SVE_Pg3,		/* SVE p0-p7 in Pg.  */
437   AARCH64_OPND_SVE_Pg4_5,	/* SVE p0-p15 in Pg, bits [8,5].  */
438   AARCH64_OPND_SVE_Pg4_10,	/* SVE p0-p15 in Pg, bits [13,10].  */
439   AARCH64_OPND_SVE_Pg4_16,	/* SVE p0-p15 in Pg, bits [19,16].  */
440   AARCH64_OPND_SVE_Pm,		/* SVE p0-p15 in Pm.  */
441   AARCH64_OPND_SVE_Pn,		/* SVE p0-p15 in Pn.  */
442   AARCH64_OPND_SVE_Pt,		/* SVE p0-p15 in Pt.  */
443   AARCH64_OPND_SVE_Rm,		/* Integer Rm or ZR, alt. SVE position.  */
444   AARCH64_OPND_SVE_Rn_SP,	/* Integer Rn or SP, alt. SVE position.  */
445   AARCH64_OPND_SVE_SHLIMM_PRED,	  /* SVE shift left amount (predicated).  */
446   AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated).  */
447   AARCH64_OPND_SVE_SHLIMM_UNPRED_22,	/* SVE 3 bit shift left unpred.  */
448   AARCH64_OPND_SVE_SHRIMM_PRED,	  /* SVE shift right amount (predicated).  */
449   AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated).  */
450   AARCH64_OPND_SVE_SHRIMM_UNPRED_22,	/* SVE 3 bit shift right unpred.  */
451   AARCH64_OPND_SVE_SIMM5,	/* SVE signed 5-bit immediate.  */
452   AARCH64_OPND_SVE_SIMM5B,	/* SVE secondary signed 5-bit immediate.  */
453   AARCH64_OPND_SVE_SIMM6,	/* SVE signed 6-bit immediate.  */
454   AARCH64_OPND_SVE_SIMM8,	/* SVE signed 8-bit immediate.  */
455   AARCH64_OPND_SVE_UIMM3,	/* SVE unsigned 3-bit immediate.  */
456   AARCH64_OPND_SVE_UIMM7,	/* SVE unsigned 7-bit immediate.  */
457   AARCH64_OPND_SVE_UIMM8,	/* SVE unsigned 8-bit immediate.  */
458   AARCH64_OPND_SVE_UIMM8_53,	/* SVE split unsigned 8-bit immediate.  */
459   AARCH64_OPND_SVE_VZn,		/* Scalar SIMD&FP register in Zn field.  */
460   AARCH64_OPND_SVE_Vd,		/* Scalar SIMD&FP register in Vd.  */
461   AARCH64_OPND_SVE_Vm,		/* Scalar SIMD&FP register in Vm.  */
462   AARCH64_OPND_SVE_Vn,		/* Scalar SIMD&FP register in Vn.  */
463   AARCH64_OPND_SVE_Za_5,	/* SVE vector register in Za, bits [9,5].  */
464   AARCH64_OPND_SVE_Za_16,	/* SVE vector register in Za, bits [20,16].  */
465   AARCH64_OPND_SVE_Zd,		/* SVE vector register in Zd.  */
466   AARCH64_OPND_SVE_Zm_5,	/* SVE vector register in Zm, bits [9,5].  */
467   AARCH64_OPND_SVE_Zm_16,	/* SVE vector register in Zm, bits [20,16].  */
468   AARCH64_OPND_SVE_Zm3_INDEX,	/* z0-z7[0-3] in Zm, bits [20,16].  */
469   AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22.  */
470   AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11.  */
471   AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11.  */
472   AARCH64_OPND_SVE_Zm4_INDEX,	/* z0-z15[0-1] in Zm, bits [20,16].  */
473   AARCH64_OPND_SVE_Zn,		/* SVE vector register in Zn.  */
474   AARCH64_OPND_SVE_Zn_INDEX,	/* Indexed SVE vector register, for DUP.  */
475   AARCH64_OPND_SVE_ZnxN,	/* SVE vector register list in Zn.  */
476   AARCH64_OPND_SVE_Zt,		/* SVE vector register in Zt.  */
477   AARCH64_OPND_SVE_ZtxN,	/* SVE vector register list in Zt.  */
478   AARCH64_OPND_SME_ZAda_2b,	/* SME <ZAda>.S, 2-bits.  */
479   AARCH64_OPND_SME_ZAda_3b,	/* SME <ZAda>.D, 3-bits.  */
480   AARCH64_OPND_SME_ZA_HV_idx_src,	/* SME source ZA tile vector.  */
481   AARCH64_OPND_SME_ZA_HV_idx_dest,	/* SME destination ZA tile vector.  */
482   AARCH64_OPND_SME_Pm,		/* SME scalable predicate register, bits [15:13].  */
483   AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles.  */
484   AARCH64_OPND_SME_ZA_HV_idx_ldstr,	/* SME destination ZA tile vector.  */
485   AARCH64_OPND_SME_ZA_array,        /* SME ZA[<Wv>{, #<imm>}].  */
486   AARCH64_OPND_SME_ADDR_RI_U4xVL,   /* SME [<Xn|SP>{, #<imm>, MUL VL}].  */
487   AARCH64_OPND_SME_SM_ZA,           /* SME {SM | ZA}.  */
488   AARCH64_OPND_SME_PnT_Wm_imm,           /* SME <Pn>.<T>[<Wm>, #<imm>].  */
489   AARCH64_OPND_TME_UIMM16,	/* TME unsigned 16-bit immediate.  */
490   AARCH64_OPND_SM3_IMM2,	/* SM3 encodes lane in bits [13, 14].  */
491   AARCH64_OPND_MOPS_ADDR_Rd,	/* [Rd]!, in bits [0, 4].  */
492   AARCH64_OPND_MOPS_ADDR_Rs,	/* [Rs]!, in bits [16, 20].  */
493   AARCH64_OPND_MOPS_WB_Rn	/* Rn!, in bits [5, 9].  */
494 };
495 
496 /* Qualifier constrains an operand.  It either specifies a variant of an
497    operand type or limits values available to an operand type.
498 
499    N.B. Order is important; keep aarch64_opnd_qualifiers synced.  */
500 
501 enum aarch64_opnd_qualifier
502 {
503   /* Indicating no further qualification on an operand.  */
504   AARCH64_OPND_QLF_NIL,
505 
506   /* Qualifying an operand which is a general purpose (integer) register;
507      indicating the operand data size or a specific register.  */
508   AARCH64_OPND_QLF_W,	/* Wn, WZR or WSP.  */
509   AARCH64_OPND_QLF_X,	/* Xn, XZR or XSP.  */
510   AARCH64_OPND_QLF_WSP,	/* WSP.  */
511   AARCH64_OPND_QLF_SP,	/* SP.  */
512 
513   /* Qualifying an operand which is a floating-point register, a SIMD
514      vector element or a SIMD vector element list; indicating operand data
515      size or the size of each SIMD vector element in the case of a SIMD
516      vector element list.
517      These qualifiers are also used to qualify an address operand to
518      indicate the size of data element a load/store instruction is
519      accessing.
520      They are also used for the immediate shift operand in e.g. SSHR.  Such
521      a use is only for the ease of operand encoding/decoding and qualifier
522      sequence matching; such a use should not be applied widely; use the value
523      constraint qualifiers for immediate operands wherever possible.  */
524   AARCH64_OPND_QLF_S_B,
525   AARCH64_OPND_QLF_S_H,
526   AARCH64_OPND_QLF_S_S,
527   AARCH64_OPND_QLF_S_D,
528   AARCH64_OPND_QLF_S_Q,
529   /* These type qualifiers have a special meaning in that they mean 4 x 1 byte
530      or 2 x 2 byte are selected by the instruction.  Other than that they have
531      no difference with AARCH64_OPND_QLF_S_B in encoding.  They are here purely
532      for syntactical reasons and is an exception from normal AArch64
533      disassembly scheme.  */
534   AARCH64_OPND_QLF_S_4B,
535   AARCH64_OPND_QLF_S_2H,
536 
537   /* Qualifying an operand which is a SIMD vector register or a SIMD vector
538      register list; indicating register shape.
539      They are also used for the immediate shift operand in e.g. SSHR.  Such
540      a use is only for the ease of operand encoding/decoding and qualifier
541      sequence matching; such a use should not be applied widely; use the value
542      constraint qualifiers for immediate operands wherever possible.  */
543   AARCH64_OPND_QLF_V_4B,
544   AARCH64_OPND_QLF_V_8B,
545   AARCH64_OPND_QLF_V_16B,
546   AARCH64_OPND_QLF_V_2H,
547   AARCH64_OPND_QLF_V_4H,
548   AARCH64_OPND_QLF_V_8H,
549   AARCH64_OPND_QLF_V_2S,
550   AARCH64_OPND_QLF_V_4S,
551   AARCH64_OPND_QLF_V_1D,
552   AARCH64_OPND_QLF_V_2D,
553   AARCH64_OPND_QLF_V_1Q,
554 
555   AARCH64_OPND_QLF_P_Z,
556   AARCH64_OPND_QLF_P_M,
557 
558   /* Used in scaled signed immediate that are scaled by a Tag granule
559      like in stg, st2g, etc.   */
560   AARCH64_OPND_QLF_imm_tag,
561 
562   /* Constraint on value.  */
563   AARCH64_OPND_QLF_CR,		/* CRn, CRm. */
564   AARCH64_OPND_QLF_imm_0_7,
565   AARCH64_OPND_QLF_imm_0_15,
566   AARCH64_OPND_QLF_imm_0_31,
567   AARCH64_OPND_QLF_imm_0_63,
568   AARCH64_OPND_QLF_imm_1_32,
569   AARCH64_OPND_QLF_imm_1_64,
570 
571   /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
572      or shift-ones.  */
573   AARCH64_OPND_QLF_LSL,
574   AARCH64_OPND_QLF_MSL,
575 
576   /* Special qualifier helping retrieve qualifier information during the
577      decoding time (currently not in use).  */
578   AARCH64_OPND_QLF_RETRIEVE,
579 };
580 
581 /* Instruction class.  */
582 
583 enum aarch64_insn_class
584 {
585   aarch64_misc,
586   addsub_carry,
587   addsub_ext,
588   addsub_imm,
589   addsub_shift,
590   asimdall,
591   asimddiff,
592   asimdelem,
593   asimdext,
594   asimdimm,
595   asimdins,
596   asimdmisc,
597   asimdperm,
598   asimdsame,
599   asimdshf,
600   asimdtbl,
601   asisddiff,
602   asisdelem,
603   asisdlse,
604   asisdlsep,
605   asisdlso,
606   asisdlsop,
607   asisdmisc,
608   asisdone,
609   asisdpair,
610   asisdsame,
611   asisdshf,
612   bitfield,
613   branch_imm,
614   branch_reg,
615   compbranch,
616   condbranch,
617   condcmp_imm,
618   condcmp_reg,
619   condsel,
620   cryptoaes,
621   cryptosha2,
622   cryptosha3,
623   dp_1src,
624   dp_2src,
625   dp_3src,
626   exception,
627   extract,
628   float2fix,
629   float2int,
630   floatccmp,
631   floatcmp,
632   floatdp1,
633   floatdp2,
634   floatdp3,
635   floatimm,
636   floatsel,
637   ldst_immpost,
638   ldst_immpre,
639   ldst_imm9,	/* immpost or immpre */
640   ldst_imm10,	/* LDRAA/LDRAB */
641   ldst_pos,
642   ldst_regoff,
643   ldst_unpriv,
644   ldst_unscaled,
645   ldstexcl,
646   ldstnapair_offs,
647   ldstpair_off,
648   ldstpair_indexed,
649   loadlit,
650   log_imm,
651   log_shift,
652   lse_atomic,
653   movewide,
654   pcreladdr,
655   ic_system,
656   sme_misc,
657   sme_ldr,
658   sme_str,
659   sme_start,
660   sme_stop,
661   sve_cpy,
662   sve_index,
663   sve_limm,
664   sve_misc,
665   sve_movprfx,
666   sve_pred_zm,
667   sve_shift_pred,
668   sve_shift_unpred,
669   sve_size_bhs,
670   sve_size_bhsd,
671   sve_size_hsd,
672   sve_size_hsd2,
673   sve_size_sd,
674   sve_size_bh,
675   sve_size_sd2,
676   sve_size_13,
677   sve_shift_tsz_hsd,
678   sve_shift_tsz_bhsd,
679   sve_size_tsz_bhs,
680   testbranch,
681   cryptosm3,
682   cryptosm4,
683   dotproduct,
684   bfloat16,
685 };
686 
687 /* Opcode enumerators.  */
688 
689 enum aarch64_op
690 {
691   OP_NIL,
692   OP_STRB_POS,
693   OP_LDRB_POS,
694   OP_LDRSB_POS,
695   OP_STRH_POS,
696   OP_LDRH_POS,
697   OP_LDRSH_POS,
698   OP_STR_POS,
699   OP_LDR_POS,
700   OP_STRF_POS,
701   OP_LDRF_POS,
702   OP_LDRSW_POS,
703   OP_PRFM_POS,
704 
705   OP_STURB,
706   OP_LDURB,
707   OP_LDURSB,
708   OP_STURH,
709   OP_LDURH,
710   OP_LDURSH,
711   OP_STUR,
712   OP_LDUR,
713   OP_STURV,
714   OP_LDURV,
715   OP_LDURSW,
716   OP_PRFUM,
717 
718   OP_LDR_LIT,
719   OP_LDRV_LIT,
720   OP_LDRSW_LIT,
721   OP_PRFM_LIT,
722 
723   OP_ADD,
724   OP_B,
725   OP_BL,
726 
727   OP_MOVN,
728   OP_MOVZ,
729   OP_MOVK,
730 
731   OP_MOV_IMM_LOG,	/* MOV alias for moving bitmask immediate.  */
732   OP_MOV_IMM_WIDE,	/* MOV alias for moving wide immediate.  */
733   OP_MOV_IMM_WIDEN,	/* MOV alias for moving wide immediate (negated).  */
734 
735   OP_MOV_V,		/* MOV alias for moving vector register.  */
736 
737   OP_ASR_IMM,
738   OP_LSR_IMM,
739   OP_LSL_IMM,
740 
741   OP_BIC,
742 
743   OP_UBFX,
744   OP_BFXIL,
745   OP_SBFX,
746   OP_SBFIZ,
747   OP_BFI,
748   OP_BFC,		/* ARMv8.2.  */
749   OP_UBFIZ,
750   OP_UXTB,
751   OP_UXTH,
752   OP_UXTW,
753 
754   OP_CINC,
755   OP_CINV,
756   OP_CNEG,
757   OP_CSET,
758   OP_CSETM,
759 
760   OP_FCVT,
761   OP_FCVTN,
762   OP_FCVTN2,
763   OP_FCVTL,
764   OP_FCVTL2,
765   OP_FCVTXN_S,		/* Scalar version.  */
766 
767   OP_ROR_IMM,
768 
769   OP_SXTL,
770   OP_SXTL2,
771   OP_UXTL,
772   OP_UXTL2,
773 
774   OP_MOV_P_P,
775   OP_MOV_Z_P_Z,
776   OP_MOV_Z_V,
777   OP_MOV_Z_Z,
778   OP_MOV_Z_Zi,
779   OP_MOVM_P_P_P,
780   OP_MOVS_P_P,
781   OP_MOVZS_P_P_P,
782   OP_MOVZ_P_P_P,
783   OP_NOTS_P_P_P_Z,
784   OP_NOT_P_P_P_Z,
785 
786   OP_FCMLA_ELEM,	/* ARMv8.3, indexed element version.  */
787 
788   OP_TOTAL_NUM,		/* Pseudo.  */
789 };
790 
791 /* Error types.  */
792 enum err_type
793 {
794   ERR_OK,
795   ERR_UND,
796   ERR_UNP,
797   ERR_NYI,
798   ERR_VFI,
799   ERR_NR_ENTRIES
800 };
801 
802 /* Maximum number of operands an instruction can have.  */
803 #define AARCH64_MAX_OPND_NUM 6
804 /* Maximum number of qualifier sequences an instruction can have.  */
805 #define AARCH64_MAX_QLF_SEQ_NUM 10
806 /* Operand qualifier typedef; optimized for the size.  */
807 typedef unsigned char aarch64_opnd_qualifier_t;
808 /* Operand qualifier sequence typedef.  */
809 typedef aarch64_opnd_qualifier_t	\
810 	  aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM];
811 
812 /* FIXME: improve the efficiency.  */
813 static inline bool
empty_qualifier_sequence_p(const aarch64_opnd_qualifier_t * qualifiers)814 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers)
815 {
816   int i;
817   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
818     if (qualifiers[i] != AARCH64_OPND_QLF_NIL)
819       return false;
820   return true;
821 }
822 
823 /*  Forward declare error reporting type.  */
824 typedef struct aarch64_operand_error aarch64_operand_error;
825 /* Forward declare instruction sequence type.  */
826 typedef struct aarch64_instr_sequence aarch64_instr_sequence;
827 /* Forward declare instruction definition.  */
828 typedef struct aarch64_inst aarch64_inst;
829 
830 /* This structure holds information for a particular opcode.  */
831 
832 struct aarch64_opcode
833 {
834   /* The name of the mnemonic.  */
835   const char *name;
836 
837   /* The opcode itself.  Those bits which will be filled in with
838      operands are zeroes.  */
839   aarch64_insn opcode;
840 
841   /* The opcode mask.  This is used by the disassembler.  This is a
842      mask containing ones indicating those bits which must match the
843      opcode field, and zeroes indicating those bits which need not
844      match (and are presumably filled in by operands).  */
845   aarch64_insn mask;
846 
847   /* Instruction class.  */
848   enum aarch64_insn_class iclass;
849 
850   /* Enumerator identifier.  */
851   enum aarch64_op op;
852 
853   /* Which architecture variant provides this instruction.  */
854   const aarch64_feature_set *avariant;
855 
856   /* An array of operand codes.  Each code is an index into the
857      operand table.  They appear in the order which the operands must
858      appear in assembly code, and are terminated by a zero.  */
859   enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM];
860 
861   /* A list of operand qualifier code sequence.  Each operand qualifier
862      code qualifies the corresponding operand code.  Each operand
863      qualifier sequence specifies a valid opcode variant and related
864      constraint on operands.  */
865   aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
866 
867   /* Flags providing information about this instruction */
868   uint64_t flags;
869 
870   /* Extra constraints on the instruction that the verifier checks.  */
871   uint32_t constraints;
872 
873   /* If nonzero, this operand and operand 0 are both registers and
874      are required to have the same register number.  */
875   unsigned char tied_operand;
876 
877   /* If non-NULL, a function to verify that a given instruction is valid.  */
878   enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn,
879 			      bfd_vma, bool, aarch64_operand_error *,
880 			      struct aarch64_instr_sequence *);
881 };
882 
883 typedef struct aarch64_opcode aarch64_opcode;
884 
885 /* Table describing all the AArch64 opcodes.  */
886 extern const aarch64_opcode aarch64_opcode_table[];
887 
888 /* Opcode flags.  */
889 #define F_ALIAS (1 << 0)
890 #define F_HAS_ALIAS (1 << 1)
891 /* Disassembly preference priority 1-3 (the larger the higher).  If nothing
892    is specified, it is the priority 0 by default, i.e. the lowest priority.  */
893 #define F_P1 (1 << 2)
894 #define F_P2 (2 << 2)
895 #define F_P3 (3 << 2)
896 /* Flag an instruction that is truly conditional executed, e.g. b.cond.  */
897 #define F_COND (1 << 4)
898 /* Instruction has the field of 'sf'.  */
899 #define F_SF (1 << 5)
900 /* Instruction has the field of 'size:Q'.  */
901 #define F_SIZEQ (1 << 6)
902 /* Floating-point instruction has the field of 'type'.  */
903 #define F_FPTYPE (1 << 7)
904 /* AdvSIMD scalar instruction has the field of 'size'.  */
905 #define F_SSIZE (1 << 8)
906 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q".  */
907 #define F_T (1 << 9)
908 /* Size of GPR operand in AdvSIMD instructions encoded in Q.  */
909 #define F_GPRSIZE_IN_Q (1 << 10)
910 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22.  */
911 #define F_LDS_SIZE (1 << 11)
912 /* Optional operand; assume maximum of 1 operand can be optional.  */
913 #define F_OPD0_OPT (1 << 12)
914 #define F_OPD1_OPT (2 << 12)
915 #define F_OPD2_OPT (3 << 12)
916 #define F_OPD3_OPT (4 << 12)
917 #define F_OPD4_OPT (5 << 12)
918 /* Default value for the optional operand when omitted from the assembly.  */
919 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
920 /* Instruction that is an alias of another instruction needs to be
921    encoded/decoded by converting it to/from the real form, followed by
922    the encoding/decoding according to the rules of the real opcode.
923    This compares to the direct coding using the alias's information.
924    N.B. this flag requires F_ALIAS to be used together.  */
925 #define F_CONV (1 << 20)
926 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
927    friendly pseudo instruction available only in the assembly code (thus will
928    not show up in the disassembly).  */
929 #define F_PSEUDO (1 << 21)
930 /* Instruction has miscellaneous encoding/decoding rules.  */
931 #define F_MISC (1 << 22)
932 /* Instruction has the field of 'N'; used in conjunction with F_SF.  */
933 #define F_N (1 << 23)
934 /* Opcode dependent field.  */
935 #define F_OD(X) (((X) & 0x7) << 24)
936 /* Instruction has the field of 'sz'.  */
937 #define F_LSE_SZ (1 << 27)
938 /* Require an exact qualifier match, even for NIL qualifiers.  */
939 #define F_STRICT (1ULL << 28)
940 /* This system instruction is used to read system registers.  */
941 #define F_SYS_READ (1ULL << 29)
942 /* This system instruction is used to write system registers.  */
943 #define F_SYS_WRITE (1ULL << 30)
944 /* This instruction has an extra constraint on it that imposes a requirement on
945    subsequent instructions.  */
946 #define F_SCAN (1ULL << 31)
947 /* Next bit is 32.  */
948 
949 /* Instruction constraints.  */
950 /* This instruction has a predication constraint on the instruction at PC+4.  */
951 #define C_SCAN_MOVPRFX (1U << 0)
952 /* This instruction's operation width is determined by the operand with the
953    largest element size.  */
954 #define C_MAX_ELEM (1U << 1)
955 #define C_SCAN_MOPS_P (1U << 2)
956 #define C_SCAN_MOPS_M (2U << 2)
957 #define C_SCAN_MOPS_E (3U << 2)
958 #define C_SCAN_MOPS_PME (3U << 2)
959 /* Next bit is 4.  */
960 
961 static inline bool
alias_opcode_p(const aarch64_opcode * opcode)962 alias_opcode_p (const aarch64_opcode *opcode)
963 {
964   return (opcode->flags & F_ALIAS) != 0;
965 }
966 
967 static inline bool
opcode_has_alias(const aarch64_opcode * opcode)968 opcode_has_alias (const aarch64_opcode *opcode)
969 {
970   return (opcode->flags & F_HAS_ALIAS) != 0;
971 }
972 
973 /* Priority for disassembling preference.  */
974 static inline int
opcode_priority(const aarch64_opcode * opcode)975 opcode_priority (const aarch64_opcode *opcode)
976 {
977   return (opcode->flags >> 2) & 0x3;
978 }
979 
980 static inline bool
pseudo_opcode_p(const aarch64_opcode * opcode)981 pseudo_opcode_p (const aarch64_opcode *opcode)
982 {
983   return (opcode->flags & F_PSEUDO) != 0lu;
984 }
985 
986 static inline bool
optional_operand_p(const aarch64_opcode * opcode,unsigned int idx)987 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx)
988 {
989   return ((opcode->flags >> 12) & 0x7) == idx + 1;
990 }
991 
992 static inline aarch64_insn
get_optional_operand_default_value(const aarch64_opcode * opcode)993 get_optional_operand_default_value (const aarch64_opcode *opcode)
994 {
995   return (opcode->flags >> 15) & 0x1f;
996 }
997 
998 static inline unsigned int
get_opcode_dependent_value(const aarch64_opcode * opcode)999 get_opcode_dependent_value (const aarch64_opcode *opcode)
1000 {
1001   return (opcode->flags >> 24) & 0x7;
1002 }
1003 
1004 static inline bool
opcode_has_special_coder(const aarch64_opcode * opcode)1005 opcode_has_special_coder (const aarch64_opcode *opcode)
1006 {
1007   return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T
1008 	  | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND)) != 0;
1009 }
1010 
1011 struct aarch64_name_value_pair
1012 {
1013   const char *  name;
1014   aarch64_insn	value;
1015 };
1016 
1017 extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
1018 extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
1019 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4];
1020 extern const struct aarch64_name_value_pair aarch64_prfops [32];
1021 extern const struct aarch64_name_value_pair aarch64_hint_options [];
1022 
1023 #define AARCH64_MAX_SYSREG_NAME_LEN 32
1024 
1025 typedef struct
1026 {
1027   const char *  name;
1028   aarch64_insn	value;
1029   uint32_t	flags;
1030 
1031   /* A set of features, all of which are required for this system register to be
1032      available.  */
1033   aarch64_feature_set features;
1034 } aarch64_sys_reg;
1035 
1036 extern const aarch64_sys_reg aarch64_sys_regs [];
1037 extern const aarch64_sys_reg aarch64_pstatefields [];
1038 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
1039 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set,
1040 					     const aarch64_sys_reg *);
1041 
1042 typedef struct
1043 {
1044   const char *name;
1045   uint32_t value;
1046   uint32_t flags ;
1047 } aarch64_sys_ins_reg;
1048 
1049 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *);
1050 extern bool
1051 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set,
1052 				 const char *reg_name, aarch64_insn,
1053                                  uint32_t, aarch64_feature_set);
1054 
1055 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic [];
1056 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc [];
1057 extern const aarch64_sys_ins_reg aarch64_sys_regs_at [];
1058 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi [];
1059 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr [];
1060 
1061 /* Shift/extending operator kinds.
1062    N.B. order is important; keep aarch64_operand_modifiers synced.  */
1063 enum aarch64_modifier_kind
1064 {
1065   AARCH64_MOD_NONE,
1066   AARCH64_MOD_MSL,
1067   AARCH64_MOD_ROR,
1068   AARCH64_MOD_ASR,
1069   AARCH64_MOD_LSR,
1070   AARCH64_MOD_LSL,
1071   AARCH64_MOD_UXTB,
1072   AARCH64_MOD_UXTH,
1073   AARCH64_MOD_UXTW,
1074   AARCH64_MOD_UXTX,
1075   AARCH64_MOD_SXTB,
1076   AARCH64_MOD_SXTH,
1077   AARCH64_MOD_SXTW,
1078   AARCH64_MOD_SXTX,
1079   AARCH64_MOD_MUL,
1080   AARCH64_MOD_MUL_VL,
1081 };
1082 
1083 bool
1084 aarch64_extend_operator_p (enum aarch64_modifier_kind);
1085 
1086 enum aarch64_modifier_kind
1087 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *);
1088 /* Condition.  */
1089 
1090 typedef struct
1091 {
1092   /* A list of names with the first one as the disassembly preference;
1093      terminated by NULL if fewer than 3.  */
1094   const char *names[4];
1095   aarch64_insn value;
1096 } aarch64_cond;
1097 
1098 extern const aarch64_cond aarch64_conds[16];
1099 
1100 const aarch64_cond* get_cond_from_value (aarch64_insn value);
1101 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond);
1102 
1103 /* Structure representing an operand.  */
1104 
1105 struct aarch64_opnd_info
1106 {
1107   enum aarch64_opnd type;
1108   aarch64_opnd_qualifier_t qualifier;
1109   int idx;
1110 
1111   union
1112     {
1113       struct
1114 	{
1115 	  unsigned regno;
1116 	} reg;
1117       struct
1118 	{
1119 	  unsigned int regno;
1120 	  int64_t index;
1121 	} reglane;
1122       /* e.g. LVn.  */
1123       struct
1124 	{
1125 	  unsigned first_regno : 5;
1126 	  unsigned num_regs : 3;
1127 	  /* 1 if it is a list of reg element.  */
1128 	  unsigned has_index : 1;
1129 	  /* Lane index; valid only when has_index is 1.  */
1130 	  int64_t index;
1131 	} reglist;
1132       /* e.g. immediate or pc relative address offset.  */
1133       struct
1134 	{
1135 	  int64_t value;
1136 	  unsigned is_fp : 1;
1137 	} imm;
1138       /* e.g. address in STR (register offset).  */
1139       struct
1140 	{
1141 	  unsigned base_regno;
1142 	  struct
1143 	    {
1144 	      union
1145 		{
1146 		  int imm;
1147 		  unsigned regno;
1148 		};
1149 	      unsigned is_reg;
1150 	    } offset;
1151 	  unsigned pcrel : 1;		/* PC-relative.  */
1152 	  unsigned writeback : 1;
1153 	  unsigned preind : 1;		/* Pre-indexed.  */
1154 	  unsigned postind : 1;		/* Post-indexed.  */
1155 	} addr;
1156 
1157       struct
1158 	{
1159 	  /* The encoding of the system register.  */
1160 	  aarch64_insn value;
1161 
1162 	  /* The system register flags.  */
1163 	  uint32_t flags;
1164 	} sysreg;
1165 
1166       /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}]  */
1167       struct
1168 	{
1169 	  int regno;      /* <ZAn> */
1170 	  struct
1171 	  {
1172 	    int regno;    /* <Wv>  */
1173 	    int imm;      /* <imm>  */
1174 	  } index;
1175 	  unsigned v : 1;	/* <HV> horizontal or vertical vector indicator.  */
1176 	} za_tile_vector;
1177 
1178       const aarch64_cond *cond;
1179       /* The encoding of the PSTATE field.  */
1180       aarch64_insn pstatefield;
1181       const aarch64_sys_ins_reg *sysins_op;
1182       const struct aarch64_name_value_pair *barrier;
1183       const struct aarch64_name_value_pair *hint_option;
1184       const struct aarch64_name_value_pair *prfop;
1185     };
1186 
1187   /* Operand shifter; in use when the operand is a register offset address,
1188      add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}.  */
1189   struct
1190     {
1191       enum aarch64_modifier_kind kind;
1192       unsigned operator_present: 1;	/* Only valid during encoding.  */
1193       /* Value of the 'S' field in ld/st reg offset; used only in decoding.  */
1194       unsigned amount_present: 1;
1195       int64_t amount;
1196     } shifter;
1197 
1198   unsigned skip:1;	/* Operand is not completed if there is a fixup needed
1199 			   to be done on it.  In some (but not all) of these
1200 			   cases, we need to tell libopcodes to skip the
1201 			   constraint checking and the encoding for this
1202 			   operand, so that the libopcodes can pick up the
1203 			   right opcode before the operand is fixed-up.  This
1204 			   flag should only be used during the
1205 			   assembling/encoding.  */
1206   unsigned present:1;	/* Whether this operand is present in the assembly
1207 			   line; not used during the disassembly.  */
1208 };
1209 
1210 typedef struct aarch64_opnd_info aarch64_opnd_info;
1211 
1212 /* Structure representing an instruction.
1213 
1214    It is used during both the assembling and disassembling.  The assembler
1215    fills an aarch64_inst after a successful parsing and then passes it to the
1216    encoding routine to do the encoding.  During the disassembling, the
1217    disassembler calls the decoding routine to decode a binary instruction; on a
1218    successful return, such a structure will be filled with information of the
1219    instruction; then the disassembler uses the information to print out the
1220    instruction.  */
1221 
1222 struct aarch64_inst
1223 {
1224   /* The value of the binary instruction.  */
1225   aarch64_insn value;
1226 
1227   /* Corresponding opcode entry.  */
1228   const aarch64_opcode *opcode;
1229 
1230   /* Condition for a truly conditional-executed instrutions, e.g. b.cond.  */
1231   const aarch64_cond *cond;
1232 
1233   /* Operands information.  */
1234   aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
1235 };
1236 
1237 /* Defining the HINT #imm values for the aarch64_hint_options.  */
1238 #define HINT_OPD_CSYNC	0x11
1239 #define HINT_OPD_C	0x22
1240 #define HINT_OPD_J	0x24
1241 #define HINT_OPD_JC	0x26
1242 #define HINT_OPD_NULL	0x00
1243 
1244 
1245 /* Diagnosis related declaration and interface.  */
1246 
1247 /* Operand error kind enumerators.
1248 
1249    AARCH64_OPDE_RECOVERABLE
1250      Less severe error found during the parsing, very possibly because that
1251      GAS has picked up a wrong instruction template for the parsing.
1252 
1253    AARCH64_OPDE_A_SHOULD_FOLLOW_B
1254      The instruction forms (or is expected to form) part of a sequence,
1255      but the preceding instruction in the sequence wasn't the expected one.
1256      The message refers to two strings: the name of the current instruction,
1257      followed by the name of the expected preceding instruction.
1258 
1259    AARCH64_OPDE_EXPECTED_A_AFTER_B
1260      Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
1261      so that the current instruction is assumed to be the incorrect one:
1262      "since the previous instruction was B, the current one should be A".
1263 
1264    AARCH64_OPDE_SYNTAX_ERROR
1265      General syntax error; it can be either a user error, or simply because
1266      that GAS is trying a wrong instruction template.
1267 
1268    AARCH64_OPDE_FATAL_SYNTAX_ERROR
1269      Definitely a user syntax error.
1270 
1271    AARCH64_OPDE_INVALID_VARIANT
1272      No syntax error, but the operands are not a valid combination, e.g.
1273      FMOV D0,S0
1274 
1275    AARCH64_OPDE_UNTIED_IMMS
1276      The asm failed to use the same immediate for a destination operand
1277      and a tied source operand.
1278 
1279    AARCH64_OPDE_UNTIED_OPERAND
1280      The asm failed to use the same register for a destination operand
1281      and a tied source operand.
1282 
1283    AARCH64_OPDE_OUT_OF_RANGE
1284      Error about some immediate value out of a valid range.
1285 
1286    AARCH64_OPDE_UNALIGNED
1287      Error about some immediate value not properly aligned (i.e. not being a
1288      multiple times of a certain value).
1289 
1290    AARCH64_OPDE_REG_LIST
1291      Error about the register list operand having unexpected number of
1292      registers.
1293 
1294    AARCH64_OPDE_OTHER_ERROR
1295      Error of the highest severity and used for any severe issue that does not
1296      fall into any of the above categories.
1297 
1298    AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
1299    AARCH64_OPDE_FATAL_SYNTAX_ERROR are only deteced by GAS while the
1300    AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
1301    only libopcodes has the information about the valid variants of each
1302    instruction.
1303 
1304    The enumerators have an increasing severity.  This is helpful when there are
1305    multiple instruction templates available for a given mnemonic name (e.g.
1306    FMOV); this mechanism will help choose the most suitable template from which
1307    the generated diagnostics can most closely describe the issues, if any.  */
1308 
1309 enum aarch64_operand_error_kind
1310 {
1311   AARCH64_OPDE_NIL,
1312   AARCH64_OPDE_RECOVERABLE,
1313   AARCH64_OPDE_A_SHOULD_FOLLOW_B,
1314   AARCH64_OPDE_EXPECTED_A_AFTER_B,
1315   AARCH64_OPDE_SYNTAX_ERROR,
1316   AARCH64_OPDE_FATAL_SYNTAX_ERROR,
1317   AARCH64_OPDE_INVALID_VARIANT,
1318   AARCH64_OPDE_UNTIED_IMMS,
1319   AARCH64_OPDE_UNTIED_OPERAND,
1320   AARCH64_OPDE_OUT_OF_RANGE,
1321   AARCH64_OPDE_UNALIGNED,
1322   AARCH64_OPDE_REG_LIST,
1323   AARCH64_OPDE_OTHER_ERROR
1324 };
1325 
1326 /* N.B. GAS assumes that this structure work well with shallow copy.  */
1327 struct aarch64_operand_error
1328 {
1329   enum aarch64_operand_error_kind kind;
1330   int index;
1331   const char *error;
1332   /* Some data for extra information.  */
1333   union {
1334     int i;
1335     const char *s;
1336   } data[3];
1337   bool non_fatal;
1338 };
1339 
1340 /* AArch64 sequence structure used to track instructions with F_SCAN
1341    dependencies for both assembler and disassembler.  */
1342 struct aarch64_instr_sequence
1343 {
1344   /* The instructions in the sequence, starting with the one that
1345      caused it to be opened.  */
1346   aarch64_inst *instr;
1347   /* The number of instructions already in the sequence.  */
1348   int num_added_insns;
1349   /* The number of instructions allocated to the sequence.  */
1350   int num_allocated_insns;
1351 };
1352 
1353 /* Encoding entrypoint.  */
1354 
1355 extern bool
1356 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *,
1357 		       aarch64_insn *, aarch64_opnd_qualifier_t *,
1358 		       aarch64_operand_error *, aarch64_instr_sequence *);
1359 
1360 extern const aarch64_opcode *
1361 aarch64_replace_opcode (struct aarch64_inst *,
1362 			const aarch64_opcode *);
1363 
1364 /* Given the opcode enumerator OP, return the pointer to the corresponding
1365    opcode entry.  */
1366 
1367 extern const aarch64_opcode *
1368 aarch64_get_opcode (enum aarch64_op);
1369 
1370 /* Generate the string representation of an operand.  */
1371 extern void
1372 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
1373 		       const aarch64_opnd_info *, int, int *, bfd_vma *,
1374 		       char **, char *, size_t,
1375 		       aarch64_feature_set features);
1376 
1377 /* Miscellaneous interface.  */
1378 
1379 extern int
1380 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd);
1381 
1382 extern aarch64_opnd_qualifier_t
1383 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
1384 				const aarch64_opnd_qualifier_t, int);
1385 
1386 extern bool
1387 aarch64_is_destructive_by_operands (const aarch64_opcode *);
1388 
1389 extern int
1390 aarch64_num_of_operands (const aarch64_opcode *);
1391 
1392 extern int
1393 aarch64_stack_pointer_p (const aarch64_opnd_info *);
1394 
1395 extern int
1396 aarch64_zero_register_p (const aarch64_opnd_info *);
1397 
1398 extern enum err_type
1399 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool,
1400 		     aarch64_operand_error *);
1401 
1402 extern void
1403 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
1404 
1405 /* Given an operand qualifier, return the expected data element size
1406    of a qualified operand.  */
1407 extern unsigned char
1408 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t);
1409 
1410 extern enum aarch64_operand_class
1411 aarch64_get_operand_class (enum aarch64_opnd);
1412 
1413 extern const char *
1414 aarch64_get_operand_name (enum aarch64_opnd);
1415 
1416 extern const char *
1417 aarch64_get_operand_desc (enum aarch64_opnd);
1418 
1419 extern bool
1420 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
1421 
1422 #ifdef DEBUG_AARCH64
1423 extern int debug_dump;
1424 
1425 extern void
1426 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2)));
1427 
1428 #define DEBUG_TRACE(M, ...)					\
1429   {								\
1430     if (debug_dump)						\
1431       aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__);	\
1432   }
1433 
1434 #define DEBUG_TRACE_IF(C, M, ...)				\
1435   {								\
1436     if (debug_dump && (C))					\
1437       aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__);	\
1438   }
1439 #else  /* !DEBUG_AARCH64 */
1440 #define DEBUG_TRACE(M, ...) ;
1441 #define DEBUG_TRACE_IF(C, M, ...) ;
1442 #endif /* DEBUG_AARCH64 */
1443 
1444 extern const char *const aarch64_sve_pattern_array[32];
1445 extern const char *const aarch64_sve_prfop_array[16];
1446 
1447 #ifdef __cplusplus
1448 }
1449 #endif
1450 
1451 #endif /* OPCODE_AARCH64_H */
1452