1 /* AArch64 assembler/disassembler support. 2 3 Copyright (C) 2009-2024 Free Software Foundation, Inc. 4 Contributed by ARM Ltd. 5 6 This file is part of GNU Binutils. 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the license, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program; see the file COPYING3. If not, 20 see <http://www.gnu.org/licenses/>. */ 21 22 #ifndef OPCODE_AARCH64_H 23 #define OPCODE_AARCH64_H 24 25 #include "bfd.h" 26 #include <stdint.h> 27 #include <assert.h> 28 #include <stdlib.h> 29 30 #include "dis-asm.h" 31 32 #ifdef __cplusplus 33 extern "C" { 34 #endif 35 36 /* The offset for pc-relative addressing is currently defined to be 0. */ 37 #define AARCH64_PCREL_OFFSET 0 38 39 typedef uint32_t aarch64_insn; 40 41 /* An enum containing all known CPU features. The values act as bit positions 42 into aarch64_feature_set. */ 43 enum aarch64_feature_bit { 44 /* All processors. */ 45 AARCH64_FEATURE_V8, 46 /* ARMv8.6 processors. */ 47 AARCH64_FEATURE_V8_6A, 48 /* Bfloat16 insns. */ 49 AARCH64_FEATURE_BFLOAT16, 50 /* Armv8-A processors. */ 51 AARCH64_FEATURE_V8A, 52 /* SVE2 instructions. */ 53 AARCH64_FEATURE_SVE2, 54 /* ARMv8.2 processors. */ 55 AARCH64_FEATURE_V8_2A, 56 /* ARMv8.3 processors. */ 57 AARCH64_FEATURE_V8_3A, 58 AARCH64_FEATURE_SVE2_AES, 59 AARCH64_FEATURE_SVE2_BITPERM, 60 AARCH64_FEATURE_SVE2_SM4, 61 AARCH64_FEATURE_SVE2_SHA3, 62 /* ARMv8.4 processors. */ 63 AARCH64_FEATURE_V8_4A, 64 /* Armv8-R processors. */ 65 AARCH64_FEATURE_V8R, 66 /* Armv8.7 processors. */ 67 AARCH64_FEATURE_V8_7A, 68 /* Scalable Matrix Extension. */ 69 AARCH64_FEATURE_SME, 70 /* Atomic 64-byte load/store. */ 71 AARCH64_FEATURE_LS64, 72 /* v8.3 Pointer Authentication. */ 73 AARCH64_FEATURE_PAUTH, 74 /* FP instructions. */ 75 AARCH64_FEATURE_FP, 76 /* SIMD instructions. */ 77 AARCH64_FEATURE_SIMD, 78 /* CRC instructions. */ 79 AARCH64_FEATURE_CRC, 80 /* LSE instructions. */ 81 AARCH64_FEATURE_LSE, 82 /* PAN instructions. */ 83 AARCH64_FEATURE_PAN, 84 /* LOR instructions. */ 85 AARCH64_FEATURE_LOR, 86 /* v8.1 SIMD instructions. */ 87 AARCH64_FEATURE_RDMA, 88 /* v8.1 features. */ 89 AARCH64_FEATURE_V8_1A, 90 /* v8.2 FP16 instructions. */ 91 AARCH64_FEATURE_F16, 92 /* RAS Extensions. */ 93 AARCH64_FEATURE_RAS, 94 /* Statistical Profiling. */ 95 AARCH64_FEATURE_PROFILE, 96 /* SVE instructions. */ 97 AARCH64_FEATURE_SVE, 98 /* RCPC instructions. */ 99 AARCH64_FEATURE_RCPC, 100 /* RCPC2 instructions. */ 101 AARCH64_FEATURE_RCPC2, 102 /* Complex # instructions. */ 103 AARCH64_FEATURE_COMPNUM, 104 /* JavaScript conversion instructions. */ 105 AARCH64_FEATURE_JSCVT, 106 /* Dot Product instructions. */ 107 AARCH64_FEATURE_DOTPROD, 108 /* SM3 & SM4 instructions. */ 109 AARCH64_FEATURE_SM4, 110 /* SHA2 instructions. */ 111 AARCH64_FEATURE_SHA2, 112 /* SHA3 instructions. */ 113 AARCH64_FEATURE_SHA3, 114 /* AES instructions. */ 115 AARCH64_FEATURE_AES, 116 /* v8.2 FP16FML ins. */ 117 AARCH64_FEATURE_F16_FML, 118 /* ARMv8.5 processors. */ 119 AARCH64_FEATURE_V8_5A, 120 /* v8.5 Flag Manipulation version 2. */ 121 AARCH64_FEATURE_FLAGMANIP, 122 /* FRINT[32,64][Z,X] insns. */ 123 AARCH64_FEATURE_FRINTTS, 124 /* SB instruction. */ 125 AARCH64_FEATURE_SB, 126 /* Execution and Data Prediction Restriction instructions. */ 127 AARCH64_FEATURE_PREDRES, 128 /* DC CVADP. */ 129 AARCH64_FEATURE_CVADP, 130 /* Random Number instructions. */ 131 AARCH64_FEATURE_RNG, 132 /* SCXTNUM_ELx. */ 133 AARCH64_FEATURE_SCXTNUM, 134 /* ID_PFR2 instructions. */ 135 AARCH64_FEATURE_ID_PFR2, 136 /* SSBS mechanism enabled. */ 137 AARCH64_FEATURE_SSBS, 138 /* Memory Tagging Extension. */ 139 AARCH64_FEATURE_MEMTAG, 140 /* Transactional Memory Extension. */ 141 AARCH64_FEATURE_TME, 142 /* XS memory attribute. */ 143 AARCH64_FEATURE_XS, 144 /* WFx instructions with timeout. */ 145 AARCH64_FEATURE_WFXT, 146 /* Standardization of memory operations. */ 147 AARCH64_FEATURE_MOPS, 148 /* Hinted conditional branches. */ 149 AARCH64_FEATURE_HBC, 150 /* Matrix Multiply instructions. */ 151 AARCH64_FEATURE_I8MM, 152 AARCH64_FEATURE_F32MM, 153 AARCH64_FEATURE_F64MM, 154 /* v8.4 Flag Manipulation. */ 155 AARCH64_FEATURE_FLAGM, 156 /* Armv9.0-A processors. */ 157 AARCH64_FEATURE_V9A, 158 /* SME F64F64. */ 159 AARCH64_FEATURE_SME_F64F64, 160 /* SME I16I64. */ 161 AARCH64_FEATURE_SME_I16I64, 162 /* Armv8.8 processors. */ 163 AARCH64_FEATURE_V8_8A, 164 /* Common Short Sequence Compression instructions. */ 165 AARCH64_FEATURE_CSSC, 166 /* Armv8.9-A processors. */ 167 AARCH64_FEATURE_V8_9A, 168 /* Check Feature Status Extension. */ 169 AARCH64_FEATURE_CHK, 170 /* Guarded Control Stack. */ 171 AARCH64_FEATURE_GCS, 172 /* SPE Call Return branch records. */ 173 AARCH64_FEATURE_SPE_CRR, 174 /* SPE Filter by data source. */ 175 AARCH64_FEATURE_SPE_FDS, 176 /* Additional SPE events. */ 177 AARCH64_FEATURE_SPEv1p4, 178 /* SME2. */ 179 AARCH64_FEATURE_SME2, 180 /* Translation Hardening Extension. */ 181 AARCH64_FEATURE_THE, 182 /* LSE128. */ 183 AARCH64_FEATURE_LSE128, 184 /* ARMv8.9-A RAS Extensions. */ 185 AARCH64_FEATURE_RASv2, 186 /* System Control Register2. */ 187 AARCH64_FEATURE_SCTLR2, 188 /* Fine Grained Traps. */ 189 AARCH64_FEATURE_FGT2, 190 /* Physical Fault Address. */ 191 AARCH64_FEATURE_PFAR, 192 /* Address Translate Stage 1. */ 193 AARCH64_FEATURE_ATS1A, 194 /* Memory Attribute Index Enhancement. */ 195 AARCH64_FEATURE_AIE, 196 /* Stage 1 Permission Indirection Extension. */ 197 AARCH64_FEATURE_S1PIE, 198 /* Stage 2 Permission Indirection Extension. */ 199 AARCH64_FEATURE_S2PIE, 200 /* Stage 1 Permission Overlay Extension. */ 201 AARCH64_FEATURE_S1POE, 202 /* Stage 2 Permission Overlay Extension. */ 203 AARCH64_FEATURE_S2POE, 204 /* Extension to Translation Control Registers. */ 205 AARCH64_FEATURE_TCR2, 206 /* Speculation Prediction Restriction instructions. */ 207 AARCH64_FEATURE_PREDRES2, 208 /* Instrumentation Extension. */ 209 AARCH64_FEATURE_ITE, 210 /* 128-bit page table descriptor, system registers 211 and isntructions. */ 212 AARCH64_FEATURE_D128, 213 /* Armv8.9-A/Armv9.4-A architecture Debug extension. */ 214 AARCH64_FEATURE_DEBUGv8p9, 215 /* Performance Monitors Extension. */ 216 AARCH64_FEATURE_PMUv3p9, 217 /* Performance Monitors Snapshots Extension. */ 218 AARCH64_FEATURE_PMUv3_SS, 219 /* Performance Monitors Instruction Counter Extension. */ 220 AARCH64_FEATURE_PMUv3_ICNTR, 221 /* System Performance Monitors Extension */ 222 AARCH64_FEATURE_SPMU, 223 /* Performance Monitors Synchronous-Exception-Based Event Extension. */ 224 AARCH64_FEATURE_SEBEP, 225 /* SVE2.1 and SME2.1 non-widening BFloat16 instructions. */ 226 AARCH64_FEATURE_B16B16, 227 /* SME2.1 instructions. */ 228 AARCH64_FEATURE_SME2p1, 229 /* SVE2.1 instructions. */ 230 AARCH64_FEATURE_SVE2p1, 231 /* RCPC3 instructions. */ 232 AARCH64_FEATURE_RCPC3, 233 /* Checked Pointer Arithmetic instructions. */ 234 AARCH64_FEATURE_CPA, 235 /* FAMINMAX instructions. */ 236 AARCH64_FEATURE_FAMINMAX, 237 /* FP8 instructions. */ 238 AARCH64_FEATURE_FP8, 239 AARCH64_NUM_FEATURES 240 }; 241 242 /* These macros take an initial argument X that gives the index into 243 an aarch64_feature_set. The macros then return the bitmask for 244 that array index. */ 245 246 /* A mask in which feature bit BIT is set and all other bits are clear. */ 247 #define AARCH64_UINT64_BIT(X, BIT) \ 248 ((X) == (BIT) / 64 ? 1ULL << (BIT) % 64 : 0) 249 250 /* A mask that includes only AARCH64_FEATURE_<NAME>. */ 251 #define AARCH64_FEATBIT(X, NAME) \ 252 AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME) 253 254 /* A mask of the features that are enabled by each architecture version, 255 excluding those that are inherited from other architecture versions. */ 256 #define AARCH64_ARCH_V8A_FEATURES(X) (AARCH64_FEATBIT (X, V8A) \ 257 | AARCH64_FEATBIT (X, FP) \ 258 | AARCH64_FEATBIT (X, RAS) \ 259 | AARCH64_FEATBIT (X, SIMD) \ 260 | AARCH64_FEATBIT (X, CHK)) 261 #define AARCH64_ARCH_V8_1A_FEATURES(X) (AARCH64_FEATBIT (X, V8_1A) \ 262 | AARCH64_FEATBIT (X, CRC) \ 263 | AARCH64_FEATBIT (X, LSE) \ 264 | AARCH64_FEATBIT (X, PAN) \ 265 | AARCH64_FEATBIT (X, LOR) \ 266 | AARCH64_FEATBIT (X, RDMA)) 267 #define AARCH64_ARCH_V8_2A_FEATURES(X) (AARCH64_FEATBIT (X, V8_2A)) 268 #define AARCH64_ARCH_V8_3A_FEATURES(X) (AARCH64_FEATBIT (X, V8_3A) \ 269 | AARCH64_FEATBIT (X, PAUTH) \ 270 | AARCH64_FEATBIT (X, RCPC) \ 271 | AARCH64_FEATBIT (X, COMPNUM) \ 272 | AARCH64_FEATBIT (X, JSCVT)) 273 #define AARCH64_ARCH_V8_4A_FEATURES(X) (AARCH64_FEATBIT (X, V8_4A) \ 274 | AARCH64_FEATBIT (X, RCPC2) \ 275 | AARCH64_FEATBIT (X, DOTPROD) \ 276 | AARCH64_FEATBIT (X, FLAGM) \ 277 | AARCH64_FEATBIT (X, F16_FML)) 278 #define AARCH64_ARCH_V8_5A_FEATURES(X) (AARCH64_FEATBIT (X, V8_5A) \ 279 | AARCH64_FEATBIT (X, FLAGMANIP) \ 280 | AARCH64_FEATBIT (X, FRINTTS) \ 281 | AARCH64_FEATBIT (X, SB) \ 282 | AARCH64_FEATBIT (X, PREDRES) \ 283 | AARCH64_FEATBIT (X, CVADP) \ 284 | AARCH64_FEATBIT (X, SCXTNUM) \ 285 | AARCH64_FEATBIT (X, ID_PFR2) \ 286 | AARCH64_FEATBIT (X, SSBS)) 287 #define AARCH64_ARCH_V8_6A_FEATURES(X) (AARCH64_FEATBIT (X, V8_6A) \ 288 | AARCH64_FEATBIT (X, BFLOAT16) \ 289 | AARCH64_FEATBIT (X, I8MM)) 290 #define AARCH64_ARCH_V8_7A_FEATURES(X) (AARCH64_FEATBIT (X, V8_7A) \ 291 | AARCH64_FEATBIT (X, XS) \ 292 | AARCH64_FEATBIT (X, WFXT) \ 293 | AARCH64_FEATBIT (X, LS64)) 294 #define AARCH64_ARCH_V8_8A_FEATURES(X) (AARCH64_FEATBIT (X, V8_8A) \ 295 | AARCH64_FEATBIT (X, MOPS) \ 296 | AARCH64_FEATBIT (X, HBC)) 297 #define AARCH64_ARCH_V8_9A_FEATURES(X) (AARCH64_FEATBIT (X, V8_9A) \ 298 | AARCH64_FEATBIT (X, SPEv1p4) \ 299 | AARCH64_FEATBIT (X, SPE_CRR) \ 300 | AARCH64_FEATBIT (X, SPE_FDS) \ 301 | AARCH64_FEATBIT (X, RASv2) \ 302 | AARCH64_FEATBIT (X, SCTLR2) \ 303 | AARCH64_FEATBIT (X, FGT2) \ 304 | AARCH64_FEATBIT (X, PFAR) \ 305 | AARCH64_FEATBIT (X, ATS1A) \ 306 | AARCH64_FEATBIT (X, AIE) \ 307 | AARCH64_FEATBIT (X, S1PIE) \ 308 | AARCH64_FEATBIT (X, S2PIE) \ 309 | AARCH64_FEATBIT (X, S1POE) \ 310 | AARCH64_FEATBIT (X, S2POE) \ 311 | AARCH64_FEATBIT (X, TCR2) \ 312 | AARCH64_FEATBIT (X, DEBUGv8p9) \ 313 | AARCH64_FEATBIT (X, PMUv3p9) \ 314 | AARCH64_FEATBIT (X, PMUv3_SS) \ 315 | AARCH64_FEATBIT (X, PMUv3_ICNTR) \ 316 | AARCH64_FEATBIT (X, SPMU) \ 317 | AARCH64_FEATBIT (X, SEBEP) \ 318 | AARCH64_FEATBIT (X, PREDRES2) \ 319 ) 320 321 #define AARCH64_ARCH_V9A_FEATURES(X) (AARCH64_FEATBIT (X, V9A) \ 322 | AARCH64_FEATBIT (X, F16) \ 323 | AARCH64_FEATBIT (X, SVE) \ 324 | AARCH64_FEATBIT (X, SVE2)) 325 #define AARCH64_ARCH_V9_1A_FEATURES(X) AARCH64_ARCH_V8_6A_FEATURES (X) 326 #define AARCH64_ARCH_V9_2A_FEATURES(X) AARCH64_ARCH_V8_7A_FEATURES (X) 327 #define AARCH64_ARCH_V9_3A_FEATURES(X) AARCH64_ARCH_V8_8A_FEATURES (X) 328 #define AARCH64_ARCH_V9_4A_FEATURES(X) AARCH64_ARCH_V8_9A_FEATURES (X) 329 330 /* Architectures are the sum of the base and extensions. */ 331 #define AARCH64_ARCH_V8A(X) (AARCH64_FEATBIT (X, V8) \ 332 | AARCH64_ARCH_V8A_FEATURES (X)) 333 #define AARCH64_ARCH_V8_1A(X) (AARCH64_ARCH_V8A (X) \ 334 | AARCH64_ARCH_V8_1A_FEATURES (X)) 335 #define AARCH64_ARCH_V8_2A(X) (AARCH64_ARCH_V8_1A (X) \ 336 | AARCH64_ARCH_V8_2A_FEATURES (X)) 337 #define AARCH64_ARCH_V8_3A(X) (AARCH64_ARCH_V8_2A (X) \ 338 | AARCH64_ARCH_V8_3A_FEATURES (X)) 339 #define AARCH64_ARCH_V8_4A(X) (AARCH64_ARCH_V8_3A (X) \ 340 | AARCH64_ARCH_V8_4A_FEATURES (X)) 341 #define AARCH64_ARCH_V8_5A(X) (AARCH64_ARCH_V8_4A (X) \ 342 | AARCH64_ARCH_V8_5A_FEATURES (X)) 343 #define AARCH64_ARCH_V8_6A(X) (AARCH64_ARCH_V8_5A (X) \ 344 | AARCH64_ARCH_V8_6A_FEATURES (X)) 345 #define AARCH64_ARCH_V8_7A(X) (AARCH64_ARCH_V8_6A (X) \ 346 | AARCH64_ARCH_V8_7A_FEATURES (X)) 347 #define AARCH64_ARCH_V8_8A(X) (AARCH64_ARCH_V8_7A (X) \ 348 | AARCH64_ARCH_V8_8A_FEATURES (X)) 349 #define AARCH64_ARCH_V8_9A(X) (AARCH64_ARCH_V8_8A (X) \ 350 | AARCH64_ARCH_V8_9A_FEATURES (X)) 351 #define AARCH64_ARCH_V8R(X) ((AARCH64_ARCH_V8_4A (X) \ 352 | AARCH64_FEATBIT (X, V8R)) \ 353 & ~AARCH64_FEATBIT (X, V8A) \ 354 & ~AARCH64_FEATBIT (X, LOR)) 355 356 #define AARCH64_ARCH_V9A(X) (AARCH64_ARCH_V8_5A (X) \ 357 | AARCH64_ARCH_V9A_FEATURES (X)) 358 #define AARCH64_ARCH_V9_1A(X) (AARCH64_ARCH_V9A (X) \ 359 | AARCH64_ARCH_V9_1A_FEATURES (X)) 360 #define AARCH64_ARCH_V9_2A(X) (AARCH64_ARCH_V9_1A (X) \ 361 | AARCH64_ARCH_V9_2A_FEATURES (X)) 362 #define AARCH64_ARCH_V9_3A(X) (AARCH64_ARCH_V9_2A (X) \ 363 | AARCH64_ARCH_V9_3A_FEATURES (X)) 364 #define AARCH64_ARCH_V9_4A(X) (AARCH64_ARCH_V9_3A (X) \ 365 | AARCH64_ARCH_V9_4A_FEATURES (X)) 366 367 #define AARCH64_ARCH_NONE(X) 0 368 369 /* CPU-specific features. */ 370 typedef struct { 371 uint64_t flags[(AARCH64_NUM_FEATURES + 63) / 64]; 372 } aarch64_feature_set; 373 374 #define AARCH64_CPU_HAS_FEATURE(CPU,FEAT) \ 375 ((~(CPU).flags[0] & AARCH64_FEATBIT (0, FEAT)) == 0 \ 376 && (~(CPU).flags[1] & AARCH64_FEATBIT (1, FEAT)) == 0) 377 378 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT) \ 379 ((~(CPU).flags[0] & (FEAT).flags[0]) == 0 \ 380 && (~(CPU).flags[1] & (FEAT).flags[1]) == 0) 381 382 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT) \ 383 (((CPU).flags[0] & (FEAT).flags[0]) != 0 \ 384 || ((CPU).flags[1] & (FEAT).flags[1]) != 0) 385 386 #define AARCH64_SET_FEATURE(DEST, FEAT) \ 387 ((DEST).flags[0] = FEAT (0), \ 388 (DEST).flags[1] = FEAT (1)) 389 390 #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT) \ 391 ((DEST).flags[0] = (SRC).flags[0] & ~AARCH64_FEATBIT (0, FEAT), \ 392 (DEST).flags[1] = (SRC).flags[1] & ~AARCH64_FEATBIT (1, FEAT)) 393 394 #define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2) \ 395 do \ 396 { \ 397 (TARG).flags[0] = (F1).flags[0] | (F2).flags[0]; \ 398 (TARG).flags[1] = (F1).flags[1] | (F2).flags[1]; \ 399 } \ 400 while (0) 401 402 #define AARCH64_CLEAR_FEATURES(TARG,F1,F2) \ 403 do \ 404 { \ 405 (TARG).flags[0] = (F1).flags[0] &~ (F2).flags[0]; \ 406 (TARG).flags[1] = (F1).flags[1] &~ (F2).flags[1]; \ 407 } \ 408 while (0) 409 410 /* aarch64_feature_set initializers for no features and all features, 411 respectively. */ 412 #define AARCH64_NO_FEATURES { { 0, 0 } } 413 #define AARCH64_ALL_FEATURES { { -1, -1 } } 414 415 /* An aarch64_feature_set initializer for a single feature, 416 AARCH64_FEATURE_<FEAT>. */ 417 #define AARCH64_FEATURE(FEAT) \ 418 { { AARCH64_FEATBIT (0, FEAT), AARCH64_FEATBIT (1, FEAT) } } 419 420 /* An aarch64_feature_set initializer for a specific architecture version, 421 including all the features that are enabled by default for that architecture 422 version. */ 423 #define AARCH64_ARCH_FEATURES(ARCH) \ 424 { { AARCH64_ARCH_##ARCH (0), AARCH64_ARCH_##ARCH (1) } } 425 426 /* Used by AARCH64_CPU_FEATURES. */ 427 #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \ 428 (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X)) 429 #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \ 430 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2)) 431 #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \ 432 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__)) 433 #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \ 434 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__)) 435 #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \ 436 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__)) 437 #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \ 438 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__)) 439 #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \ 440 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__)) 441 #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \ 442 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__)) 443 #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \ 444 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__)) 445 446 /* An aarch64_feature_set initializer for a CPU that implements architecture 447 version ARCH, and additionally provides the N features listed in "...". */ 448 #define AARCH64_CPU_FEATURES(ARCH, N, ...) \ 449 { { AARCH64_OR_FEATURES_##N (0, ARCH, __VA_ARGS__), \ 450 AARCH64_OR_FEATURES_##N (1, ARCH, __VA_ARGS__) } } 451 452 /* An aarch64_feature_set initializer for the N features listed in "...". */ 453 #define AARCH64_FEATURES(N, ...) \ 454 AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__) 455 456 enum aarch64_operand_class 457 { 458 AARCH64_OPND_CLASS_NIL, 459 AARCH64_OPND_CLASS_INT_REG, 460 AARCH64_OPND_CLASS_MODIFIED_REG, 461 AARCH64_OPND_CLASS_FP_REG, 462 AARCH64_OPND_CLASS_SIMD_REG, 463 AARCH64_OPND_CLASS_SIMD_ELEMENT, 464 AARCH64_OPND_CLASS_SISD_REG, 465 AARCH64_OPND_CLASS_SIMD_REGLIST, 466 AARCH64_OPND_CLASS_SVE_REG, 467 AARCH64_OPND_CLASS_SVE_REGLIST, 468 AARCH64_OPND_CLASS_PRED_REG, 469 AARCH64_OPND_CLASS_ZA_ACCESS, 470 AARCH64_OPND_CLASS_ADDRESS, 471 AARCH64_OPND_CLASS_IMMEDIATE, 472 AARCH64_OPND_CLASS_SYSTEM, 473 AARCH64_OPND_CLASS_COND, 474 }; 475 476 /* Operand code that helps both parsing and coding. 477 Keep AARCH64_OPERANDS synced. */ 478 479 enum aarch64_opnd 480 { 481 AARCH64_OPND_NIL, /* no operand---MUST BE FIRST!*/ 482 483 AARCH64_OPND_Rd, /* Integer register as destination. */ 484 AARCH64_OPND_Rn, /* Integer register as source. */ 485 AARCH64_OPND_Rm, /* Integer register as source. */ 486 AARCH64_OPND_Rt, /* Integer register used in ld/st instructions. */ 487 AARCH64_OPND_Rt2, /* Integer register used in ld/st pair instructions. */ 488 AARCH64_OPND_X16, /* Integer register x16 in chkfeat instruction. */ 489 AARCH64_OPND_Rt_LS64, /* Integer register used in LS64 instructions. */ 490 AARCH64_OPND_Rt_SP, /* Integer Rt or SP used in STG instructions. */ 491 AARCH64_OPND_Rs, /* Integer register used in ld/st exclusive. */ 492 AARCH64_OPND_Ra, /* Integer register used in ddp_3src instructions. */ 493 AARCH64_OPND_Rt_SYS, /* Integer register used in system instructions. */ 494 495 AARCH64_OPND_Rd_SP, /* Integer Rd or SP. */ 496 AARCH64_OPND_Rn_SP, /* Integer Rn or SP. */ 497 AARCH64_OPND_Rm_SP, /* Integer Rm or SP. */ 498 AARCH64_OPND_PAIRREG, /* Paired register operand. */ 499 AARCH64_OPND_PAIRREG_OR_XZR, /* Paired register operand, optionally xzr. */ 500 AARCH64_OPND_Rm_EXT, /* Integer Rm extended. */ 501 AARCH64_OPND_Rm_SFT, /* Integer Rm shifted. */ 502 AARCH64_OPND_Rm_LSL, /* Integer Rm shifted (LSL-only). */ 503 504 AARCH64_OPND_Fd, /* Floating-point Fd. */ 505 AARCH64_OPND_Fn, /* Floating-point Fn. */ 506 AARCH64_OPND_Fm, /* Floating-point Fm. */ 507 AARCH64_OPND_Fa, /* Floating-point Fa. */ 508 AARCH64_OPND_Ft, /* Floating-point Ft. */ 509 AARCH64_OPND_Ft2, /* Floating-point Ft2. */ 510 511 AARCH64_OPND_Sd, /* AdvSIMD Scalar Sd. */ 512 AARCH64_OPND_Sn, /* AdvSIMD Scalar Sn. */ 513 AARCH64_OPND_Sm, /* AdvSIMD Scalar Sm. */ 514 515 AARCH64_OPND_Va, /* AdvSIMD Vector Va. */ 516 AARCH64_OPND_Vd, /* AdvSIMD Vector Vd. */ 517 AARCH64_OPND_Vn, /* AdvSIMD Vector Vn. */ 518 AARCH64_OPND_Vm, /* AdvSIMD Vector Vm. */ 519 AARCH64_OPND_VdD1, /* AdvSIMD <Vd>.D[1]; for FMOV only. */ 520 AARCH64_OPND_VnD1, /* AdvSIMD <Vn>.D[1]; for FMOV only. */ 521 AARCH64_OPND_Ed, /* AdvSIMD Vector Element Vd. */ 522 AARCH64_OPND_En, /* AdvSIMD Vector Element Vn. */ 523 AARCH64_OPND_Em, /* AdvSIMD Vector Element Vm. */ 524 AARCH64_OPND_Em16, /* AdvSIMD Vector Element Vm restricted to V0 - V15 when 525 qualifier is S_H. */ 526 AARCH64_OPND_LVn, /* AdvSIMD Vector register list used in e.g. TBL. */ 527 AARCH64_OPND_LVt, /* AdvSIMD Vector register list used in ld/st. */ 528 AARCH64_OPND_LVt_AL, /* AdvSIMD Vector register list for loading single 529 structure to all lanes. */ 530 AARCH64_OPND_LEt, /* AdvSIMD Vector Element list. */ 531 532 AARCH64_OPND_CRn, /* Co-processor register in CRn field. */ 533 AARCH64_OPND_CRm, /* Co-processor register in CRm field. */ 534 535 AARCH64_OPND_IDX, /* AdvSIMD EXT index operand. */ 536 AARCH64_OPND_MASK, /* AdvSIMD EXT index operand. */ 537 AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left. */ 538 AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right. */ 539 AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift. */ 540 AARCH64_OPND_SIMD_IMM_SFT, /* AdvSIMD modified immediate with shift. */ 541 AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate. */ 542 AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction 543 (no encoding). */ 544 AARCH64_OPND_IMM0, /* Immediate for #0. */ 545 AARCH64_OPND_FPIMM0, /* Immediate for #0.0. */ 546 AARCH64_OPND_FPIMM, /* Floating-point Immediate. */ 547 AARCH64_OPND_IMMR, /* Immediate #<immr> in e.g. BFM. */ 548 AARCH64_OPND_IMMS, /* Immediate #<imms> in e.g. BFM. */ 549 AARCH64_OPND_WIDTH, /* Immediate #<width> in e.g. BFI. */ 550 AARCH64_OPND_IMM, /* Immediate. */ 551 AARCH64_OPND_IMM_2, /* Immediate. */ 552 AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field. */ 553 AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field. */ 554 AARCH64_OPND_UIMM4, /* Unsigned 4-bit immediate in the CRm field. */ 555 AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg. */ 556 AARCH64_OPND_UIMM7, /* Unsigned 7-bit immediate in the CRm:op2 fields. */ 557 AARCH64_OPND_UIMM10, /* Unsigned 10-bit immediate in addg/subg. */ 558 AARCH64_OPND_BIT_NUM, /* Immediate. */ 559 AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions. */ 560 AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */ 561 AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions. */ 562 AARCH64_OPND_SIMM5, /* 5-bit signed immediate in the imm5 field. */ 563 AARCH64_OPND_NZCV, /* Flag bit specifier giving an alternative value for 564 each condition flag. */ 565 566 AARCH64_OPND_LIMM, /* Logical Immediate. */ 567 AARCH64_OPND_AIMM, /* Arithmetic immediate. */ 568 AARCH64_OPND_HALF, /* #<imm16>{, LSL #<shift>} operand in move wide. */ 569 AARCH64_OPND_FBITS, /* FP #<fbits> operand in e.g. SCVTF */ 570 AARCH64_OPND_IMM_MOV, /* Immediate operand for the MOV alias. */ 571 AARCH64_OPND_IMM_ROT1, /* Immediate rotate operand for FCMLA. */ 572 AARCH64_OPND_IMM_ROT2, /* Immediate rotate operand for indexed FCMLA. */ 573 AARCH64_OPND_IMM_ROT3, /* Immediate rotate operand for FCADD. */ 574 575 AARCH64_OPND_COND, /* Standard condition as the last operand. */ 576 AARCH64_OPND_COND1, /* Same as the above, but excluding AL and NV. */ 577 578 AARCH64_OPND_ADDR_ADRP, /* Memory address for ADRP */ 579 AARCH64_OPND_ADDR_PCREL14, /* 14-bit PC-relative address for e.g. TBZ. */ 580 AARCH64_OPND_ADDR_PCREL19, /* 19-bit PC-relative address for e.g. LDR. */ 581 AARCH64_OPND_ADDR_PCREL21, /* 21-bit PC-relative address for e.g. ADR. */ 582 AARCH64_OPND_ADDR_PCREL26, /* 26-bit PC-relative address for e.g. BL. */ 583 584 AARCH64_OPND_ADDR_SIMPLE, /* Address of ld/st exclusive. */ 585 AARCH64_OPND_ADDR_REGOFF, /* Address of register offset. */ 586 AARCH64_OPND_ADDR_SIMM7, /* Address of signed 7-bit immediate. */ 587 AARCH64_OPND_ADDR_SIMM9, /* Address of signed 9-bit immediate. */ 588 AARCH64_OPND_ADDR_SIMM9_2, /* Same as the above, but the immediate is 589 negative or unaligned and there is 590 no writeback allowed. This operand code 591 is only used to support the programmer- 592 friendly feature of using LDR/STR as the 593 the mnemonic name for LDUR/STUR instructions 594 wherever there is no ambiguity. */ 595 AARCH64_OPND_ADDR_SIMM10, /* Address of signed 10-bit immediate. */ 596 AARCH64_OPND_ADDR_SIMM11, /* Address with a signed 11-bit (multiple of 597 16) immediate. */ 598 AARCH64_OPND_ADDR_UIMM12, /* Address of unsigned 12-bit immediate. */ 599 AARCH64_OPND_ADDR_SIMM13, /* Address with a signed 13-bit (multiple of 600 16) immediate. */ 601 AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures. */ 602 AARCH64_OPND_ADDR_OFFSET, /* Address with an optional 9-bit immediate. */ 603 AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed. */ 604 605 AARCH64_OPND_SYSREG, /* System register operand. */ 606 AARCH64_OPND_SYSREG128, /* 128-bit system register operand. */ 607 AARCH64_OPND_PSTATEFIELD, /* PSTATE field name operand. */ 608 AARCH64_OPND_SYSREG_AT, /* System register <at_op> operand. */ 609 AARCH64_OPND_SYSREG_DC, /* System register <dc_op> operand. */ 610 AARCH64_OPND_SYSREG_IC, /* System register <ic_op> operand. */ 611 AARCH64_OPND_SYSREG_TLBI, /* System register <tlbi_op> operand. */ 612 AARCH64_OPND_SYSREG_TLBIP, /* System register <tlbip_op> operand. */ 613 AARCH64_OPND_SYSREG_SR, /* System register RCTX operand. */ 614 AARCH64_OPND_BARRIER, /* Barrier operand. */ 615 AARCH64_OPND_BARRIER_DSB_NXS, /* Barrier operand for DSB nXS variant. */ 616 AARCH64_OPND_BARRIER_ISB, /* Barrier operand for ISB. */ 617 AARCH64_OPND_PRFOP, /* Prefetch operation. */ 618 AARCH64_OPND_RPRFMOP, /* Range prefetch operation. */ 619 AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */ 620 AARCH64_OPND_BARRIER_GCSB, /* Barrier operand for GCSB. */ 621 AARCH64_OPND_BTI_TARGET, /* BTI {<target>}. */ 622 AARCH64_OPND_LSE128_Rt, /* LSE128 <Xt1>. */ 623 AARCH64_OPND_LSE128_Rt2, /* LSE128 <Xt2>. */ 624 AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */ 625 AARCH64_OPND_SVE_ADDR_RI_S4x32, /* SVE [<Xn|SP>, #<simm4>*32]. */ 626 AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */ 627 AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */ 628 AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */ 629 AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */ 630 AARCH64_OPND_SVE_ADDR_RI_S6xVL, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */ 631 AARCH64_OPND_SVE_ADDR_RI_S9xVL, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */ 632 AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */ 633 AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */ 634 AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */ 635 AARCH64_OPND_SVE_ADDR_RI_U6x8, /* SVE [<Xn|SP>, #<uimm6>*8]. */ 636 AARCH64_OPND_SVE_ADDR_R, /* SVE [<Xn|SP>]. */ 637 AARCH64_OPND_SVE_ADDR_RR, /* SVE [<Xn|SP>, <Xm|XZR>]. */ 638 AARCH64_OPND_SVE_ADDR_RR_LSL1, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */ 639 AARCH64_OPND_SVE_ADDR_RR_LSL2, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */ 640 AARCH64_OPND_SVE_ADDR_RR_LSL3, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */ 641 AARCH64_OPND_SVE_ADDR_RR_LSL4, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4]. */ 642 AARCH64_OPND_SVE_ADDR_RX, /* SVE [<Xn|SP>, <Xm>]. */ 643 AARCH64_OPND_SVE_ADDR_RX_LSL1, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */ 644 AARCH64_OPND_SVE_ADDR_RX_LSL2, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */ 645 AARCH64_OPND_SVE_ADDR_RX_LSL3, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */ 646 AARCH64_OPND_SVE_ADDR_ZX, /* SVE [Zn.<T>{, <Xm>}]. */ 647 AARCH64_OPND_SVE_ADDR_RZ, /* SVE [<Xn|SP>, Zm.D]. */ 648 AARCH64_OPND_SVE_ADDR_RZ_LSL1, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */ 649 AARCH64_OPND_SVE_ADDR_RZ_LSL2, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */ 650 AARCH64_OPND_SVE_ADDR_RZ_LSL3, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */ 651 AARCH64_OPND_SVE_ADDR_RZ_XTW_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW]. 652 Bit 14 controls S/U choice. */ 653 AARCH64_OPND_SVE_ADDR_RZ_XTW_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW]. 654 Bit 22 controls S/U choice. */ 655 AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1]. 656 Bit 14 controls S/U choice. */ 657 AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1]. 658 Bit 22 controls S/U choice. */ 659 AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2]. 660 Bit 14 controls S/U choice. */ 661 AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2]. 662 Bit 22 controls S/U choice. */ 663 AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3]. 664 Bit 14 controls S/U choice. */ 665 AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3]. 666 Bit 22 controls S/U choice. */ 667 AARCH64_OPND_SVE_ADDR_ZI_U5, /* SVE [Zn.<T>, #<uimm5>]. */ 668 AARCH64_OPND_SVE_ADDR_ZI_U5x2, /* SVE [Zn.<T>, #<uimm5>*2]. */ 669 AARCH64_OPND_SVE_ADDR_ZI_U5x4, /* SVE [Zn.<T>, #<uimm5>*4]. */ 670 AARCH64_OPND_SVE_ADDR_ZI_U5x8, /* SVE [Zn.<T>, #<uimm5>*8]. */ 671 AARCH64_OPND_SVE_ADDR_ZZ_LSL, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */ 672 AARCH64_OPND_SVE_ADDR_ZZ_SXTW, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */ 673 AARCH64_OPND_SVE_ADDR_ZZ_UXTW, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */ 674 AARCH64_OPND_SVE_AIMM, /* SVE unsigned arithmetic immediate. */ 675 AARCH64_OPND_SVE_ASIMM, /* SVE signed arithmetic immediate. */ 676 AARCH64_OPND_SVE_FPIMM8, /* SVE 8-bit floating-point immediate. */ 677 AARCH64_OPND_SVE_I1_HALF_ONE, /* SVE choice between 0.5 and 1.0. */ 678 AARCH64_OPND_SVE_I1_HALF_TWO, /* SVE choice between 0.5 and 2.0. */ 679 AARCH64_OPND_SVE_I1_ZERO_ONE, /* SVE choice between 0.0 and 1.0. */ 680 AARCH64_OPND_SVE_IMM_ROT1, /* SVE 1-bit rotate operand (90 or 270). */ 681 AARCH64_OPND_SVE_IMM_ROT2, /* SVE 2-bit rotate operand (N*90). */ 682 AARCH64_OPND_SVE_IMM_ROT3, /* SVE cadd 1-bit rotate (90 or 270). */ 683 AARCH64_OPND_SVE_INV_LIMM, /* SVE inverted logical immediate. */ 684 AARCH64_OPND_SVE_LIMM, /* SVE logical immediate. */ 685 AARCH64_OPND_SVE_LIMM_MOV, /* SVE logical immediate for MOV. */ 686 AARCH64_OPND_SVE_PATTERN, /* SVE vector pattern enumeration. */ 687 AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor. */ 688 AARCH64_OPND_SVE_PRFOP, /* SVE prefetch operation. */ 689 AARCH64_OPND_SVE_Pd, /* SVE p0-p15 in Pd. */ 690 AARCH64_OPND_SVE_PNd, /* SVE pn0-pn15 in Pd. */ 691 AARCH64_OPND_SVE_Pg3, /* SVE p0-p7 in Pg. */ 692 AARCH64_OPND_SVE_Pg4_5, /* SVE p0-p15 in Pg, bits [8,5]. */ 693 AARCH64_OPND_SVE_Pg4_10, /* SVE p0-p15 in Pg, bits [13,10]. */ 694 AARCH64_OPND_SVE_PNg4_10, /* SVE pn0-pn15 in Pg, bits [13,10]. */ 695 AARCH64_OPND_SVE_Pg4_16, /* SVE p0-p15 in Pg, bits [19,16]. */ 696 AARCH64_OPND_SVE_Pm, /* SVE p0-p15 in Pm. */ 697 AARCH64_OPND_SVE_Pn, /* SVE p0-p15 in Pn. */ 698 AARCH64_OPND_SVE_PNn, /* SVE pn0-pn15 in Pn. */ 699 AARCH64_OPND_SVE_Pt, /* SVE p0-p15 in Pt. */ 700 AARCH64_OPND_SVE_PNt, /* SVE pn0-pn15 in Pt. */ 701 AARCH64_OPND_SVE_Rm, /* Integer Rm or ZR, alt. SVE position. */ 702 AARCH64_OPND_SVE_Rn_SP, /* Integer Rn or SP, alt. SVE position. */ 703 AARCH64_OPND_SVE_SHLIMM_PRED, /* SVE shift left amount (predicated). */ 704 AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated). */ 705 AARCH64_OPND_SVE_SHLIMM_UNPRED_22, /* SVE 3 bit shift left unpred. */ 706 AARCH64_OPND_SVE_SHRIMM_PRED, /* SVE shift right amount (predicated). */ 707 AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated). */ 708 AARCH64_OPND_SVE_SHRIMM_UNPRED_22, /* SVE 3 bit shift right unpred. */ 709 AARCH64_OPND_SVE_SIMM5, /* SVE signed 5-bit immediate. */ 710 AARCH64_OPND_SVE_SIMM5B, /* SVE secondary signed 5-bit immediate. */ 711 AARCH64_OPND_SVE_SIMM6, /* SVE signed 6-bit immediate. */ 712 AARCH64_OPND_SVE_SIMM8, /* SVE signed 8-bit immediate. */ 713 AARCH64_OPND_SVE_UIMM3, /* SVE unsigned 3-bit immediate. */ 714 AARCH64_OPND_SVE_UIMM7, /* SVE unsigned 7-bit immediate. */ 715 AARCH64_OPND_SVE_UIMM8, /* SVE unsigned 8-bit immediate. */ 716 AARCH64_OPND_SVE_UIMM8_53, /* SVE split unsigned 8-bit immediate. */ 717 AARCH64_OPND_SVE_VZn, /* Scalar SIMD&FP register in Zn field. */ 718 AARCH64_OPND_SVE_Vd, /* Scalar SIMD&FP register in Vd. */ 719 AARCH64_OPND_SVE_Vm, /* Scalar SIMD&FP register in Vm. */ 720 AARCH64_OPND_SVE_Vn, /* Scalar SIMD&FP register in Vn. */ 721 AARCH64_OPND_SME_ZA_array_vrsb_1, /* Tile to vector, two registers (B). */ 722 AARCH64_OPND_SME_ZA_array_vrsh_1, /* Tile to vector, two registers (H). */ 723 AARCH64_OPND_SME_ZA_array_vrss_1, /* Tile to vector, two registers (S). */ 724 AARCH64_OPND_SME_ZA_array_vrsd_1, /* Tile to vector, two registers (D). */ 725 AARCH64_OPND_SME_ZA_array_vrsb_2, /* Tile to vector, four registers (B). */ 726 AARCH64_OPND_SME_ZA_array_vrsh_2, /* Tile to vector, four registers (H). */ 727 AARCH64_OPND_SME_ZA_array_vrss_2, /* Tile to vector, four registers (S). */ 728 AARCH64_OPND_SME_ZA_array_vrsd_2, /* Tile to vector, four registers (D). */ 729 AARCH64_OPND_SVE_Za_5, /* SVE vector register in Za, bits [9,5]. */ 730 AARCH64_OPND_SVE_Za_16, /* SVE vector register in Za, bits [20,16]. */ 731 AARCH64_OPND_SVE_Zd, /* SVE vector register in Zd. */ 732 AARCH64_OPND_SVE_Zm_5, /* SVE vector register in Zm, bits [9,5]. */ 733 AARCH64_OPND_SVE_Zm_16, /* SVE vector register in Zm, bits [20,16]. */ 734 AARCH64_OPND_SVE_Zm3_INDEX, /* z0-z7[0-3] in Zm, bits [20,16]. */ 735 AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11. */ 736 AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19. */ 737 AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */ 738 AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11. */ 739 AARCH64_OPND_SVE_Zm_imm4, /* SVE vector register with 4bit index. */ 740 AARCH64_OPND_SVE_Zm4_INDEX, /* z0-z15[0-1] in Zm, bits [20,16]. */ 741 AARCH64_OPND_SVE_Zn, /* SVE vector register in Zn. */ 742 AARCH64_OPND_SVE_Zn_5_INDEX, /* Indexed SVE vector register, for DUPQ. */ 743 AARCH64_OPND_SVE_Zn_INDEX, /* Indexed SVE vector register, for DUP. */ 744 AARCH64_OPND_SVE_ZnxN, /* SVE vector register list in Zn. */ 745 AARCH64_OPND_SVE_Zt, /* SVE vector register in Zt. */ 746 AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */ 747 AARCH64_OPND_SME_Zdnx2, /* SVE vector register list from [4:1]*2. */ 748 AARCH64_OPND_SME_Zdnx4, /* SVE vector register list from [4:2]*4. */ 749 AARCH64_OPND_SME_Zm, /* SVE vector register list in 4-bit Zm. */ 750 AARCH64_OPND_SME_Zmx2, /* SVE vector register list from [20:17]*2. */ 751 AARCH64_OPND_SME_Zmx4, /* SVE vector register list from [20:18]*4. */ 752 AARCH64_OPND_SME_Znx2, /* SVE vector register list from [9:6]*2. */ 753 AARCH64_OPND_SME_Znx4, /* SVE vector register list from [9:7]*4. */ 754 AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23. */ 755 AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19. */ 756 AARCH64_OPND_SME_ZAda_2b, /* SME <ZAda>.S, 2-bits. */ 757 AARCH64_OPND_SME_ZAda_3b, /* SME <ZAda>.D, 3-bits. */ 758 AARCH64_OPND_SME_ZA_HV_idx_src, /* SME source ZA tile vector. */ 759 AARCH64_OPND_SME_ZA_HV_idx_srcxN, /* SME N source ZA tile vectors. */ 760 AARCH64_OPND_SME_ZA_HV_idx_dest, /* SME destination ZA tile vector. */ 761 AARCH64_OPND_SME_ZA_HV_idx_destxN, /* SME N dest ZA tile vectors. */ 762 AARCH64_OPND_SME_Pdx2, /* Predicate register list in [3:1]. */ 763 AARCH64_OPND_SME_PdxN, /* Predicate register list in [3:0]. */ 764 AARCH64_OPND_SME_Pm, /* SME scalable predicate register, bits [15:13]. */ 765 AARCH64_OPND_SME_PNd3, /* Predicate-as-counter register, bits [3:0]. */ 766 AARCH64_OPND_SME_PNg3, /* Predicate-as-counter register, bits [12:10]. */ 767 AARCH64_OPND_SME_PNn, /* Predicate-as-counter register, bits [8:5]. */ 768 AARCH64_OPND_SME_PNn3_INDEX1, /* Indexed pred-as-counter reg, bits [8:5]. */ 769 AARCH64_OPND_SME_PNn3_INDEX2, /* Indexed pred-as-counter reg, bits [9:5]. */ 770 AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles. */ 771 AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector. */ 772 AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3]. */ 773 AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1]. */ 774 AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3]. */ 775 AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}]. */ 776 AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}]. */ 777 AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1]. */ 778 AARCH64_OPND_SME_ZA_array_off4, /* SME ZA[<Wv>{, #<imm>}]. */ 779 AARCH64_OPND_SME_ADDR_RI_U4xVL, /* SME [<Xn|SP>{, #<imm>, MUL VL}]. */ 780 AARCH64_OPND_SME_SM_ZA, /* SME {SM | ZA}. */ 781 AARCH64_OPND_SME_PnT_Wm_imm, /* SME <Pn>.<T>[<Wm>, #<imm>]. */ 782 AARCH64_OPND_SME_SHRIMM4, /* 4-bit right shift, bits [19:16]. */ 783 AARCH64_OPND_SME_SHRIMM5, /* size + 5-bit right shift, bits [23:22,20:16]. */ 784 AARCH64_OPND_SME_Zm_INDEX1, /* Zn.T[index], bits [19:16,10]. */ 785 AARCH64_OPND_SME_Zm_INDEX2, /* Zn.T[index], bits [19:16,11:10]. */ 786 AARCH64_OPND_SME_Zm_INDEX3_1, /* Zn.T[index], bits [19:16,10,2:1]. */ 787 AARCH64_OPND_SME_Zm_INDEX3_2, /* Zn.T[index], bits [19:16,11:10,2]. */ 788 AARCH64_OPND_SME_Zm_INDEX3_10, /* Zn.T[index], bits [19:16,15,11:10]. */ 789 AARCH64_OPND_SME_Zm_INDEX4_1, /* Zn.T[index], bits [19:16,11:10,2:1]. */ 790 AARCH64_OPND_SME_Zm_INDEX4_10, /* Zn.T[index], bits [19:16,15,12:10]. */ 791 AARCH64_OPND_SME_Zn_INDEX1_16, /* Zn[index], bits [9:5] and [16:16]. */ 792 AARCH64_OPND_SME_Zn_INDEX2_15, /* Zn[index], bits [9:5] and [16:15]. */ 793 AARCH64_OPND_SME_Zn_INDEX2_16, /* Zn[index], bits [9:5] and [17:16]. */ 794 AARCH64_OPND_SME_Zn_INDEX3_14, /* Zn[index], bits [9:5] and [16:14]. */ 795 AARCH64_OPND_SME_Zn_INDEX3_15, /* Zn[index], bits [9:5] and [17:15]. */ 796 AARCH64_OPND_SME_Zn_INDEX4_14, /* Zn[index], bits [9:5] and [17:14]. */ 797 AARCH64_OPND_SME_VLxN_10, /* VLx2 or VLx4, in bit 10. */ 798 AARCH64_OPND_SME_VLxN_13, /* VLx2 or VLx4, in bit 13. */ 799 AARCH64_OPND_SME_ZT0, /* The fixed token zt0/ZT0 (not encoded). */ 800 AARCH64_OPND_SME_ZT0_INDEX, /* ZT0[<imm>], bits [14:12]. */ 801 AARCH64_OPND_SME_ZT0_LIST, /* { zt0/ZT0 } (not encoded). */ 802 AARCH64_OPND_TME_UIMM16, /* TME unsigned 16-bit immediate. */ 803 AARCH64_OPND_SM3_IMM2, /* SM3 encodes lane in bits [13, 14]. */ 804 AARCH64_OPND_MOPS_ADDR_Rd, /* [Rd]!, in bits [0, 4]. */ 805 AARCH64_OPND_MOPS_ADDR_Rs, /* [Rs]!, in bits [16, 20]. */ 806 AARCH64_OPND_MOPS_WB_Rn, /* Rn!, in bits [5, 9]. */ 807 AARCH64_OPND_CSSC_SIMM8, /* CSSC signed 8-bit immediate. */ 808 AARCH64_OPND_CSSC_UIMM8, /* CSSC unsigned 8-bit immediate. */ 809 AARCH64_OPND_SME_Zt2, /* Qobule SVE vector register list. */ 810 AARCH64_OPND_SME_Zt3, /* Trible SVE vector register list. */ 811 AARCH64_OPND_SME_Zt4, /* Quad SVE vector register list. */ 812 AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND, /* [<Xn|SP>]{, #<imm>}. */ 813 AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB, /* [<Xn|SP>] or [<Xn|SP>, #<imm>]!. */ 814 AARCH64_OPND_RCPC3_ADDR_POSTIND, /* [<Xn|SP>], #<imm>. */ 815 AARCH64_OPND_RCPC3_ADDR_PREIND_WB, /* [<Xn|SP>, #<imm>]!. */ 816 AARCH64_OPND_RCPC3_ADDR_OFFSET 817 }; 818 819 /* Qualifier constrains an operand. It either specifies a variant of an 820 operand type or limits values available to an operand type. 821 822 N.B. Order is important; keep aarch64_opnd_qualifiers synced. */ 823 824 enum aarch64_opnd_qualifier 825 { 826 /* Indicating no further qualification on an operand. */ 827 AARCH64_OPND_QLF_NIL, 828 829 /* Qualifying an operand which is a general purpose (integer) register; 830 indicating the operand data size or a specific register. */ 831 AARCH64_OPND_QLF_W, /* Wn, WZR or WSP. */ 832 AARCH64_OPND_QLF_X, /* Xn, XZR or XSP. */ 833 AARCH64_OPND_QLF_WSP, /* WSP. */ 834 AARCH64_OPND_QLF_SP, /* SP. */ 835 836 /* Qualifying an operand which is a floating-point register, a SIMD 837 vector element or a SIMD vector element list; indicating operand data 838 size or the size of each SIMD vector element in the case of a SIMD 839 vector element list. 840 These qualifiers are also used to qualify an address operand to 841 indicate the size of data element a load/store instruction is 842 accessing. 843 They are also used for the immediate shift operand in e.g. SSHR. Such 844 a use is only for the ease of operand encoding/decoding and qualifier 845 sequence matching; such a use should not be applied widely; use the value 846 constraint qualifiers for immediate operands wherever possible. */ 847 AARCH64_OPND_QLF_S_B, 848 AARCH64_OPND_QLF_S_H, 849 AARCH64_OPND_QLF_S_S, 850 AARCH64_OPND_QLF_S_D, 851 AARCH64_OPND_QLF_S_Q, 852 /* These type qualifiers have a special meaning in that they mean 4 x 1 byte 853 or 2 x 2 byte are selected by the instruction. Other than that they have 854 no difference with AARCH64_OPND_QLF_S_B in encoding. They are here purely 855 for syntactical reasons and is an exception from normal AArch64 856 disassembly scheme. */ 857 AARCH64_OPND_QLF_S_4B, 858 AARCH64_OPND_QLF_S_2H, 859 860 /* Qualifying an operand which is a SIMD vector register or a SIMD vector 861 register list; indicating register shape. 862 They are also used for the immediate shift operand in e.g. SSHR. Such 863 a use is only for the ease of operand encoding/decoding and qualifier 864 sequence matching; such a use should not be applied widely; use the value 865 constraint qualifiers for immediate operands wherever possible. */ 866 AARCH64_OPND_QLF_V_4B, 867 AARCH64_OPND_QLF_V_8B, 868 AARCH64_OPND_QLF_V_16B, 869 AARCH64_OPND_QLF_V_2H, 870 AARCH64_OPND_QLF_V_4H, 871 AARCH64_OPND_QLF_V_8H, 872 AARCH64_OPND_QLF_V_2S, 873 AARCH64_OPND_QLF_V_4S, 874 AARCH64_OPND_QLF_V_1D, 875 AARCH64_OPND_QLF_V_2D, 876 AARCH64_OPND_QLF_V_1Q, 877 878 AARCH64_OPND_QLF_P_Z, 879 AARCH64_OPND_QLF_P_M, 880 881 /* Used in scaled signed immediate that are scaled by a Tag granule 882 like in stg, st2g, etc. */ 883 AARCH64_OPND_QLF_imm_tag, 884 885 /* Constraint on value. */ 886 AARCH64_OPND_QLF_CR, /* CRn, CRm. */ 887 AARCH64_OPND_QLF_imm_0_7, 888 AARCH64_OPND_QLF_imm_0_15, 889 AARCH64_OPND_QLF_imm_0_31, 890 AARCH64_OPND_QLF_imm_0_63, 891 AARCH64_OPND_QLF_imm_1_32, 892 AARCH64_OPND_QLF_imm_1_64, 893 894 /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros 895 or shift-ones. */ 896 AARCH64_OPND_QLF_LSL, 897 AARCH64_OPND_QLF_MSL, 898 899 /* Special qualifier helping retrieve qualifier information during the 900 decoding time (currently not in use). */ 901 AARCH64_OPND_QLF_RETRIEVE, 902 903 /* Special qualifier used for indicating error in qualifier retrieval. */ 904 AARCH64_OPND_QLF_ERR, 905 }; 906 907 /* Instruction class. */ 908 909 enum aarch64_insn_class 910 { 911 aarch64_misc, 912 addsub_carry, 913 addsub_ext, 914 addsub_imm, 915 addsub_shift, 916 asimdall, 917 asimddiff, 918 asimdelem, 919 asimdext, 920 asimdimm, 921 asimdins, 922 asimdmisc, 923 asimdperm, 924 asimdsame, 925 asimdshf, 926 asimdtbl, 927 asisddiff, 928 asisdelem, 929 asisdlse, 930 asisdlsep, 931 asisdlso, 932 asisdlsop, 933 asisdmisc, 934 asisdone, 935 asisdpair, 936 asisdsame, 937 asisdshf, 938 bitfield, 939 branch_imm, 940 branch_reg, 941 compbranch, 942 condbranch, 943 condcmp_imm, 944 condcmp_reg, 945 condsel, 946 cryptoaes, 947 cryptosha2, 948 cryptosha3, 949 dp_1src, 950 dp_2src, 951 dp_3src, 952 exception, 953 extract, 954 float2fix, 955 float2int, 956 floatccmp, 957 floatcmp, 958 floatdp1, 959 floatdp2, 960 floatdp3, 961 floatimm, 962 floatsel, 963 ldst_immpost, 964 ldst_immpre, 965 ldst_imm9, /* immpost or immpre */ 966 ldst_imm10, /* LDRAA/LDRAB */ 967 ldst_pos, 968 ldst_regoff, 969 ldst_unpriv, 970 ldst_unscaled, 971 ldstexcl, 972 ldstnapair_offs, 973 ldstpair_off, 974 ldstpair_indexed, 975 loadlit, 976 log_imm, 977 log_shift, 978 lse_atomic, 979 lse128_atomic, 980 movewide, 981 pcreladdr, 982 ic_system, 983 sme_fp_sd, 984 sme_int_sd, 985 sme_misc, 986 sme_mov, 987 sme_ldr, 988 sme_psel, 989 sme_shift, 990 sme_size_12_bhs, 991 sme_size_12_hs, 992 sme_size_22, 993 sme_size_22_hsd, 994 sme_sz_23, 995 sme_str, 996 sme_start, 997 sme_stop, 998 sme2_mov, 999 sme2_movaz, 1000 sve_cpy, 1001 sve_index, 1002 sve_limm, 1003 sve_misc, 1004 sve_movprfx, 1005 sve_pred_zm, 1006 sve_shift_pred, 1007 sve_shift_unpred, 1008 sve_size_bhs, 1009 sve_size_bhsd, 1010 sve_size_hsd, 1011 sve_size_hsd2, 1012 sve_size_sd, 1013 sve_size_bh, 1014 sve_size_sd2, 1015 sve_size_13, 1016 sve_shift_tsz_hsd, 1017 sve_shift_tsz_bhsd, 1018 sve_size_tsz_bhs, 1019 testbranch, 1020 cryptosm3, 1021 cryptosm4, 1022 dotproduct, 1023 bfloat16, 1024 cssc, 1025 gcs, 1026 the, 1027 sve2_urqvs, 1028 sve_index1, 1029 rcpc3 1030 }; 1031 1032 /* Opcode enumerators. */ 1033 1034 enum aarch64_op 1035 { 1036 OP_NIL, 1037 OP_STRB_POS, 1038 OP_LDRB_POS, 1039 OP_LDRSB_POS, 1040 OP_STRH_POS, 1041 OP_LDRH_POS, 1042 OP_LDRSH_POS, 1043 OP_STR_POS, 1044 OP_LDR_POS, 1045 OP_STRF_POS, 1046 OP_LDRF_POS, 1047 OP_LDRSW_POS, 1048 OP_PRFM_POS, 1049 1050 OP_STURB, 1051 OP_LDURB, 1052 OP_LDURSB, 1053 OP_STURH, 1054 OP_LDURH, 1055 OP_LDURSH, 1056 OP_STUR, 1057 OP_LDUR, 1058 OP_STURV, 1059 OP_LDURV, 1060 OP_LDURSW, 1061 OP_PRFUM, 1062 1063 OP_LDR_LIT, 1064 OP_LDRV_LIT, 1065 OP_LDRSW_LIT, 1066 OP_PRFM_LIT, 1067 1068 OP_ADD, 1069 OP_B, 1070 OP_BL, 1071 1072 OP_MOVN, 1073 OP_MOVZ, 1074 OP_MOVK, 1075 1076 OP_MOV_IMM_LOG, /* MOV alias for moving bitmask immediate. */ 1077 OP_MOV_IMM_WIDE, /* MOV alias for moving wide immediate. */ 1078 OP_MOV_IMM_WIDEN, /* MOV alias for moving wide immediate (negated). */ 1079 1080 OP_MOV_V, /* MOV alias for moving vector register. */ 1081 1082 OP_ASR_IMM, 1083 OP_LSR_IMM, 1084 OP_LSL_IMM, 1085 1086 OP_BIC, 1087 1088 OP_UBFX, 1089 OP_BFXIL, 1090 OP_SBFX, 1091 OP_SBFIZ, 1092 OP_BFI, 1093 OP_BFC, /* ARMv8.2. */ 1094 OP_UBFIZ, 1095 OP_UXTB, 1096 OP_UXTH, 1097 OP_UXTW, 1098 1099 OP_CINC, 1100 OP_CINV, 1101 OP_CNEG, 1102 OP_CSET, 1103 OP_CSETM, 1104 1105 OP_FCVT, 1106 OP_FCVTN, 1107 OP_FCVTN2, 1108 OP_FCVTL, 1109 OP_FCVTL2, 1110 OP_FCVTXN_S, /* Scalar version. */ 1111 1112 OP_ROR_IMM, 1113 1114 OP_SXTL, 1115 OP_SXTL2, 1116 OP_UXTL, 1117 OP_UXTL2, 1118 1119 OP_MOV_P_P, 1120 OP_MOV_PN_PN, 1121 OP_MOV_Z_P_Z, 1122 OP_MOV_Z_V, 1123 OP_MOV_Z_Z, 1124 OP_MOV_Z_Zi, 1125 OP_MOVM_P_P_P, 1126 OP_MOVS_P_P, 1127 OP_MOVZS_P_P_P, 1128 OP_MOVZ_P_P_P, 1129 OP_NOTS_P_P_P_Z, 1130 OP_NOT_P_P_P_Z, 1131 1132 OP_FCMLA_ELEM, /* ARMv8.3, indexed element version. */ 1133 1134 OP_TOTAL_NUM, /* Pseudo. */ 1135 }; 1136 1137 /* Error types. */ 1138 enum err_type 1139 { 1140 ERR_OK, 1141 ERR_UND, 1142 ERR_UNP, 1143 ERR_NYI, 1144 ERR_VFI, 1145 ERR_NR_ENTRIES 1146 }; 1147 1148 /* Maximum number of operands an instruction can have. */ 1149 #define AARCH64_MAX_OPND_NUM 7 1150 /* Maximum number of qualifier sequences an instruction can have. */ 1151 #define AARCH64_MAX_QLF_SEQ_NUM 10 1152 /* Operand qualifier typedef; optimized for the size. */ 1153 typedef unsigned char aarch64_opnd_qualifier_t; 1154 /* Operand qualifier sequence typedef. */ 1155 typedef aarch64_opnd_qualifier_t \ 1156 aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM]; 1157 1158 /* FIXME: improve the efficiency. */ 1159 static inline bool 1160 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers) 1161 { 1162 int i; 1163 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 1164 if (qualifiers[i] != AARCH64_OPND_QLF_NIL) 1165 return false; 1166 return true; 1167 } 1168 1169 /* Forward declare error reporting type. */ 1170 typedef struct aarch64_operand_error aarch64_operand_error; 1171 /* Forward declare instruction sequence type. */ 1172 typedef struct aarch64_instr_sequence aarch64_instr_sequence; 1173 /* Forward declare instruction definition. */ 1174 typedef struct aarch64_inst aarch64_inst; 1175 1176 /* This structure holds information for a particular opcode. */ 1177 1178 struct aarch64_opcode 1179 { 1180 /* The name of the mnemonic. */ 1181 const char *name; 1182 1183 /* The opcode itself. Those bits which will be filled in with 1184 operands are zeroes. */ 1185 aarch64_insn opcode; 1186 1187 /* The opcode mask. This is used by the disassembler. This is a 1188 mask containing ones indicating those bits which must match the 1189 opcode field, and zeroes indicating those bits which need not 1190 match (and are presumably filled in by operands). */ 1191 aarch64_insn mask; 1192 1193 /* Instruction class. */ 1194 enum aarch64_insn_class iclass; 1195 1196 /* Enumerator identifier. */ 1197 enum aarch64_op op; 1198 1199 /* Which architecture variant provides this instruction. */ 1200 const aarch64_feature_set *avariant; 1201 1202 /* An array of operand codes. Each code is an index into the 1203 operand table. They appear in the order which the operands must 1204 appear in assembly code, and are terminated by a zero. */ 1205 enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM]; 1206 1207 /* A list of operand qualifier code sequence. Each operand qualifier 1208 code qualifies the corresponding operand code. Each operand 1209 qualifier sequence specifies a valid opcode variant and related 1210 constraint on operands. */ 1211 aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM]; 1212 1213 /* Flags providing information about this instruction */ 1214 uint64_t flags; 1215 1216 /* Extra constraints on the instruction that the verifier checks. */ 1217 uint32_t constraints; 1218 1219 /* If nonzero, this operand and operand 0 are both registers and 1220 are required to have the same register number. */ 1221 unsigned char tied_operand; 1222 1223 /* If non-NULL, a function to verify that a given instruction is valid. */ 1224 enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn, 1225 bfd_vma, bool, aarch64_operand_error *, 1226 struct aarch64_instr_sequence *); 1227 }; 1228 1229 typedef struct aarch64_opcode aarch64_opcode; 1230 1231 /* Table describing all the AArch64 opcodes. */ 1232 extern const aarch64_opcode aarch64_opcode_table[]; 1233 1234 /* Opcode flags. */ 1235 #define F_ALIAS (1 << 0) 1236 #define F_HAS_ALIAS (1 << 1) 1237 /* Disassembly preference priority 1-3 (the larger the higher). If nothing 1238 is specified, it is the priority 0 by default, i.e. the lowest priority. */ 1239 #define F_P1 (1 << 2) 1240 #define F_P2 (2 << 2) 1241 #define F_P3 (3 << 2) 1242 /* Flag an instruction that is truly conditional executed, e.g. b.cond. */ 1243 #define F_COND (1 << 4) 1244 /* Instruction has the field of 'sf'. */ 1245 #define F_SF (1 << 5) 1246 /* Instruction has the field of 'size:Q'. */ 1247 #define F_SIZEQ (1 << 6) 1248 /* Floating-point instruction has the field of 'type'. */ 1249 #define F_FPTYPE (1 << 7) 1250 /* AdvSIMD scalar instruction has the field of 'size'. */ 1251 #define F_SSIZE (1 << 8) 1252 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q". */ 1253 #define F_T (1 << 9) 1254 /* Size of GPR operand in AdvSIMD instructions encoded in Q. */ 1255 #define F_GPRSIZE_IN_Q (1 << 10) 1256 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22. */ 1257 #define F_LDS_SIZE (1 << 11) 1258 /* Optional operand; assume maximum of 1 operand can be optional. */ 1259 #define F_OPD0_OPT (1 << 12) 1260 #define F_OPD1_OPT (2 << 12) 1261 #define F_OPD2_OPT (3 << 12) 1262 #define F_OPD3_OPT (4 << 12) 1263 #define F_OPD4_OPT (5 << 12) 1264 /* Default value for the optional operand when omitted from the assembly. */ 1265 #define F_DEFAULT(X) (((X) & 0x1f) << 15) 1266 /* Instruction that is an alias of another instruction needs to be 1267 encoded/decoded by converting it to/from the real form, followed by 1268 the encoding/decoding according to the rules of the real opcode. 1269 This compares to the direct coding using the alias's information. 1270 N.B. this flag requires F_ALIAS to be used together. */ 1271 #define F_CONV (1 << 20) 1272 /* Use together with F_ALIAS to indicate an alias opcode is a programmer 1273 friendly pseudo instruction available only in the assembly code (thus will 1274 not show up in the disassembly). */ 1275 #define F_PSEUDO (1 << 21) 1276 /* Instruction has miscellaneous encoding/decoding rules. */ 1277 #define F_MISC (1 << 22) 1278 /* Instruction has the field of 'N'; used in conjunction with F_SF. */ 1279 #define F_N (1 << 23) 1280 /* Opcode dependent field. */ 1281 #define F_OD(X) (((X) & 0x7) << 24) 1282 /* Instruction has the field of 'sz'. */ 1283 #define F_LSE_SZ (1 << 27) 1284 /* Require an exact qualifier match, even for NIL qualifiers. */ 1285 #define F_STRICT (1ULL << 28) 1286 /* This system instruction is used to read system registers. */ 1287 #define F_SYS_READ (1ULL << 29) 1288 /* This system instruction is used to write system registers. */ 1289 #define F_SYS_WRITE (1ULL << 30) 1290 /* This instruction has an extra constraint on it that imposes a requirement on 1291 subsequent instructions. */ 1292 #define F_SCAN (1ULL << 31) 1293 /* Instruction takes a pair of optional operands. If we specify the Nth operand 1294 to be optional, then we also implicitly specify (N+1)th operand to also be 1295 optional. */ 1296 #define F_OPD_PAIR_OPT (1ULL << 32) 1297 /* This instruction does not allow the full range of values that the 1298 width of fields in the assembler instruction would theoretically 1299 allow. This impacts the constraintts on assembly but yelds no 1300 impact on disassembly. */ 1301 #define F_OPD_NARROW (1ULL << 33) 1302 /* For the instruction with size[22:23] field. */ 1303 #define F_OPD_SIZE (1ULL << 34) 1304 /* RCPC3 instruction has the field of 'size'. */ 1305 #define F_RCPC3_SIZE (1ULL << 35) 1306 /* Next bit is 36. */ 1307 1308 /* Instruction constraints. */ 1309 /* This instruction has a predication constraint on the instruction at PC+4. */ 1310 #define C_SCAN_MOVPRFX (1U << 0) 1311 /* This instruction's operation width is determined by the operand with the 1312 largest element size. */ 1313 #define C_MAX_ELEM (1U << 1) 1314 #define C_SCAN_MOPS_P (1U << 2) 1315 #define C_SCAN_MOPS_M (2U << 2) 1316 #define C_SCAN_MOPS_E (3U << 2) 1317 #define C_SCAN_MOPS_PME (3U << 2) 1318 /* Next bit is 4. */ 1319 1320 static inline bool 1321 alias_opcode_p (const aarch64_opcode *opcode) 1322 { 1323 return (opcode->flags & F_ALIAS) != 0; 1324 } 1325 1326 static inline bool 1327 opcode_has_alias (const aarch64_opcode *opcode) 1328 { 1329 return (opcode->flags & F_HAS_ALIAS) != 0; 1330 } 1331 1332 /* Priority for disassembling preference. */ 1333 static inline int 1334 opcode_priority (const aarch64_opcode *opcode) 1335 { 1336 return (opcode->flags >> 2) & 0x3; 1337 } 1338 1339 static inline bool 1340 pseudo_opcode_p (const aarch64_opcode *opcode) 1341 { 1342 return (opcode->flags & F_PSEUDO) != 0lu; 1343 } 1344 1345 /* Deal with two possible scenarios: If F_OP_PAIR_OPT not set, as is the case 1346 by default, F_OPDn_OPT must equal IDX + 1, else F_OPDn_OPT must be in range 1347 [IDX, IDX + 1]. */ 1348 static inline bool 1349 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx) 1350 { 1351 if (opcode->flags & F_OPD_PAIR_OPT) 1352 return (((opcode->flags >> 12) & 0x7) == idx 1353 || ((opcode->flags >> 12) & 0x7) == idx + 1); 1354 return ((opcode->flags >> 12) & 0x7) == idx + 1; 1355 } 1356 1357 static inline aarch64_insn 1358 get_optional_operand_default_value (const aarch64_opcode *opcode) 1359 { 1360 return (opcode->flags >> 15) & 0x1f; 1361 } 1362 1363 static inline unsigned int 1364 get_opcode_dependent_value (const aarch64_opcode *opcode) 1365 { 1366 return (opcode->flags >> 24) & 0x7; 1367 } 1368 1369 static inline bool 1370 opcode_has_special_coder (const aarch64_opcode *opcode) 1371 { 1372 return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T 1373 | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND 1374 | F_OPD_SIZE | F_RCPC3_SIZE)) != 0; 1375 } 1376 1377 struct aarch64_name_value_pair 1378 { 1379 const char * name; 1380 aarch64_insn value; 1381 }; 1382 1383 extern const struct aarch64_name_value_pair aarch64_operand_modifiers []; 1384 extern const struct aarch64_name_value_pair aarch64_barrier_options [16]; 1385 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4]; 1386 extern const struct aarch64_name_value_pair aarch64_prfops [32]; 1387 extern const struct aarch64_name_value_pair aarch64_hint_options []; 1388 1389 #define AARCH64_MAX_SYSREG_NAME_LEN 32 1390 1391 typedef struct 1392 { 1393 const char * name; 1394 aarch64_insn value; 1395 uint32_t flags; 1396 1397 /* A set of features, all of which are required for this system register to be 1398 available. */ 1399 aarch64_feature_set features; 1400 } aarch64_sys_reg; 1401 1402 extern const aarch64_sys_reg aarch64_sys_regs []; 1403 extern const aarch64_sys_reg aarch64_pstatefields []; 1404 extern bool aarch64_sys_reg_deprecated_p (const uint32_t); 1405 extern bool aarch64_sys_reg_128bit_p (const uint32_t); 1406 extern bool aarch64_sys_reg_alias_p (const uint32_t); 1407 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set, 1408 const aarch64_sys_reg *); 1409 1410 typedef struct 1411 { 1412 const char *name; 1413 uint32_t value; 1414 uint32_t flags ; 1415 1416 /* A set of features, all of which are required for this system instruction to be 1417 available. */ 1418 aarch64_feature_set features; 1419 } aarch64_sys_ins_reg; 1420 1421 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *); 1422 extern bool 1423 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set, 1424 const char *reg_name, 1425 uint32_t, const aarch64_feature_set *); 1426 1427 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic []; 1428 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc []; 1429 extern const aarch64_sys_ins_reg aarch64_sys_regs_at []; 1430 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi []; 1431 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr []; 1432 1433 /* Shift/extending operator kinds. 1434 N.B. order is important; keep aarch64_operand_modifiers synced. */ 1435 enum aarch64_modifier_kind 1436 { 1437 AARCH64_MOD_NONE, 1438 AARCH64_MOD_MSL, 1439 AARCH64_MOD_ROR, 1440 AARCH64_MOD_ASR, 1441 AARCH64_MOD_LSR, 1442 AARCH64_MOD_LSL, 1443 AARCH64_MOD_UXTB, 1444 AARCH64_MOD_UXTH, 1445 AARCH64_MOD_UXTW, 1446 AARCH64_MOD_UXTX, 1447 AARCH64_MOD_SXTB, 1448 AARCH64_MOD_SXTH, 1449 AARCH64_MOD_SXTW, 1450 AARCH64_MOD_SXTX, 1451 AARCH64_MOD_MUL, 1452 AARCH64_MOD_MUL_VL, 1453 }; 1454 1455 bool 1456 aarch64_extend_operator_p (enum aarch64_modifier_kind); 1457 1458 enum aarch64_modifier_kind 1459 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *); 1460 /* Condition. */ 1461 1462 typedef struct 1463 { 1464 /* A list of names with the first one as the disassembly preference; 1465 terminated by NULL if fewer than 3. */ 1466 const char *names[4]; 1467 aarch64_insn value; 1468 } aarch64_cond; 1469 1470 extern const aarch64_cond aarch64_conds[16]; 1471 1472 const aarch64_cond* get_cond_from_value (aarch64_insn value); 1473 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond); 1474 1475 /* Information about a reference to part of ZA. */ 1476 struct aarch64_indexed_za 1477 { 1478 /* Which tile is being accessed. Unused (and 0) for an index into ZA. */ 1479 int regno; 1480 1481 struct 1482 { 1483 /* The 32-bit index register. */ 1484 int regno; 1485 1486 /* The first (or only) immediate offset. */ 1487 int64_t imm; 1488 1489 /* The last immediate offset minus the first immediate offset. 1490 Unlike the range size, this is guaranteed not to overflow 1491 when the end offset > the start offset. */ 1492 uint64_t countm1; 1493 } index; 1494 1495 /* The vector group size, or 0 if none. */ 1496 unsigned group_size : 8; 1497 1498 /* True if a tile access is vertical, false if it is horizontal. 1499 Unused (and 0) for an index into ZA. */ 1500 unsigned v : 1; 1501 }; 1502 1503 /* Information about a list of registers. */ 1504 struct aarch64_reglist 1505 { 1506 unsigned first_regno : 8; 1507 unsigned num_regs : 8; 1508 /* The difference between the nth and the n+1th register. */ 1509 unsigned stride : 8; 1510 /* 1 if it is a list of reg element. */ 1511 unsigned has_index : 1; 1512 /* Lane index; valid only when has_index is 1. */ 1513 int64_t index; 1514 }; 1515 1516 /* Structure representing an operand. */ 1517 1518 struct aarch64_opnd_info 1519 { 1520 enum aarch64_opnd type; 1521 aarch64_opnd_qualifier_t qualifier; 1522 int idx; 1523 1524 union 1525 { 1526 struct 1527 { 1528 unsigned regno; 1529 } reg; 1530 struct 1531 { 1532 unsigned int regno; 1533 int64_t index; 1534 } reglane; 1535 /* e.g. LVn. */ 1536 struct aarch64_reglist reglist; 1537 /* e.g. immediate or pc relative address offset. */ 1538 struct 1539 { 1540 int64_t value; 1541 unsigned is_fp : 1; 1542 } imm; 1543 /* e.g. address in STR (register offset). */ 1544 struct 1545 { 1546 unsigned base_regno; 1547 struct 1548 { 1549 union 1550 { 1551 int imm; 1552 unsigned regno; 1553 }; 1554 unsigned is_reg; 1555 } offset; 1556 unsigned pcrel : 1; /* PC-relative. */ 1557 unsigned writeback : 1; 1558 unsigned preind : 1; /* Pre-indexed. */ 1559 unsigned postind : 1; /* Post-indexed. */ 1560 } addr; 1561 1562 struct 1563 { 1564 /* The encoding of the system register. */ 1565 aarch64_insn value; 1566 1567 /* The system register flags. */ 1568 uint32_t flags; 1569 } sysreg; 1570 1571 /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}] */ 1572 struct aarch64_indexed_za indexed_za; 1573 1574 const aarch64_cond *cond; 1575 /* The encoding of the PSTATE field. */ 1576 aarch64_insn pstatefield; 1577 const aarch64_sys_ins_reg *sysins_op; 1578 const struct aarch64_name_value_pair *barrier; 1579 const struct aarch64_name_value_pair *hint_option; 1580 const struct aarch64_name_value_pair *prfop; 1581 }; 1582 1583 /* Operand shifter; in use when the operand is a register offset address, 1584 add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}. */ 1585 struct 1586 { 1587 enum aarch64_modifier_kind kind; 1588 unsigned operator_present: 1; /* Only valid during encoding. */ 1589 /* Value of the 'S' field in ld/st reg offset; used only in decoding. */ 1590 unsigned amount_present: 1; 1591 int64_t amount; 1592 } shifter; 1593 1594 unsigned skip:1; /* Operand is not completed if there is a fixup needed 1595 to be done on it. In some (but not all) of these 1596 cases, we need to tell libopcodes to skip the 1597 constraint checking and the encoding for this 1598 operand, so that the libopcodes can pick up the 1599 right opcode before the operand is fixed-up. This 1600 flag should only be used during the 1601 assembling/encoding. */ 1602 unsigned present:1; /* Whether this operand is present in the assembly 1603 line; not used during the disassembly. */ 1604 }; 1605 1606 typedef struct aarch64_opnd_info aarch64_opnd_info; 1607 1608 /* Structure representing an instruction. 1609 1610 It is used during both the assembling and disassembling. The assembler 1611 fills an aarch64_inst after a successful parsing and then passes it to the 1612 encoding routine to do the encoding. During the disassembling, the 1613 disassembler calls the decoding routine to decode a binary instruction; on a 1614 successful return, such a structure will be filled with information of the 1615 instruction; then the disassembler uses the information to print out the 1616 instruction. */ 1617 1618 struct aarch64_inst 1619 { 1620 /* The value of the binary instruction. */ 1621 aarch64_insn value; 1622 1623 /* Corresponding opcode entry. */ 1624 const aarch64_opcode *opcode; 1625 1626 /* Condition for a truly conditional-executed instrutions, e.g. b.cond. */ 1627 const aarch64_cond *cond; 1628 1629 /* Operands information. */ 1630 aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM]; 1631 }; 1632 1633 /* Defining the HINT #imm values for the aarch64_hint_options. */ 1634 #define HINT_OPD_CSYNC 0x11 1635 #define HINT_OPD_DSYNC 0x13 1636 #define HINT_OPD_C 0x22 1637 #define HINT_OPD_J 0x24 1638 #define HINT_OPD_JC 0x26 1639 #define HINT_OPD_NULL 0x00 1640 1641 1642 /* Diagnosis related declaration and interface. */ 1643 1644 /* Operand error kind enumerators. 1645 1646 AARCH64_OPDE_RECOVERABLE 1647 Less severe error found during the parsing, very possibly because that 1648 GAS has picked up a wrong instruction template for the parsing. 1649 1650 AARCH64_OPDE_A_SHOULD_FOLLOW_B 1651 The instruction forms (or is expected to form) part of a sequence, 1652 but the preceding instruction in the sequence wasn't the expected one. 1653 The message refers to two strings: the name of the current instruction, 1654 followed by the name of the expected preceding instruction. 1655 1656 AARCH64_OPDE_EXPECTED_A_AFTER_B 1657 Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus 1658 so that the current instruction is assumed to be the incorrect one: 1659 "since the previous instruction was B, the current one should be A". 1660 1661 AARCH64_OPDE_SYNTAX_ERROR 1662 General syntax error; it can be either a user error, or simply because 1663 that GAS is trying a wrong instruction template. 1664 1665 AARCH64_OPDE_FATAL_SYNTAX_ERROR 1666 Definitely a user syntax error. 1667 1668 AARCH64_OPDE_INVALID_VARIANT 1669 No syntax error, but the operands are not a valid combination, e.g. 1670 FMOV D0,S0 1671 1672 The following errors are only reported against an asm string that is 1673 syntactically valid and that has valid operand qualifiers. 1674 1675 AARCH64_OPDE_INVALID_VG_SIZE 1676 Error about a "VGx<n>" modifier in a ZA index not having the 1677 correct <n>. This error effectively forms a pair with 1678 AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number 1679 of vectors that an instruction operates on. However, the "VGx<n>" 1680 modifier is optional, whereas a register list always has a known 1681 and explicit length. It therefore seems better to place more 1682 importance on the register list length when selecting an opcode table 1683 entry. This in turn means that having an incorrect register length 1684 should be more severe than having an incorrect "VGx<n>". 1685 1686 AARCH64_OPDE_REG_LIST_LENGTH 1687 Error about a register list operand having an unexpected number of 1688 registers. This error is low severity because there might be another 1689 opcode entry that supports the given number of registers. 1690 1691 AARCH64_OPDE_REG_LIST_STRIDE 1692 Error about a register list operand having the correct number 1693 (and type) of registers, but an unexpected stride. This error is 1694 more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies 1695 that the length is known to be correct. However, it is lower than 1696 many other errors, since some instructions have forms that share 1697 the same number of registers but have different strides. 1698 1699 AARCH64_OPDE_UNTIED_IMMS 1700 The asm failed to use the same immediate for a destination operand 1701 and a tied source operand. 1702 1703 AARCH64_OPDE_UNTIED_OPERAND 1704 The asm failed to use the same register for a destination operand 1705 and a tied source operand. 1706 1707 AARCH64_OPDE_OUT_OF_RANGE 1708 Error about some immediate value out of a valid range. 1709 1710 AARCH64_OPDE_UNALIGNED 1711 Error about some immediate value not properly aligned (i.e. not being a 1712 multiple times of a certain value). 1713 1714 AARCH64_OPDE_OTHER_ERROR 1715 Error of the highest severity and used for any severe issue that does not 1716 fall into any of the above categories. 1717 1718 AARCH64_OPDE_INVALID_REGNO 1719 A register was syntactically valid and had the right type, but it was 1720 outside the range supported by the associated operand field. This is 1721 a high severity error because there are currently no instructions that 1722 would accept the operands that precede the erroneous one (if any) and 1723 yet still accept a wider range of registers. 1724 1725 AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and 1726 AARCH64_OPDE_FATAL_SYNTAX_ERROR are only deteced by GAS while the 1727 AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as 1728 only libopcodes has the information about the valid variants of each 1729 instruction. 1730 1731 The enumerators have an increasing severity. This is helpful when there are 1732 multiple instruction templates available for a given mnemonic name (e.g. 1733 FMOV); this mechanism will help choose the most suitable template from which 1734 the generated diagnostics can most closely describe the issues, if any. 1735 1736 This enum needs to be kept up-to-date with operand_mismatch_kind_names 1737 in tc-aarch64.c. */ 1738 1739 enum aarch64_operand_error_kind 1740 { 1741 AARCH64_OPDE_NIL, 1742 AARCH64_OPDE_RECOVERABLE, 1743 AARCH64_OPDE_A_SHOULD_FOLLOW_B, 1744 AARCH64_OPDE_EXPECTED_A_AFTER_B, 1745 AARCH64_OPDE_SYNTAX_ERROR, 1746 AARCH64_OPDE_FATAL_SYNTAX_ERROR, 1747 AARCH64_OPDE_INVALID_VARIANT, 1748 AARCH64_OPDE_INVALID_VG_SIZE, 1749 AARCH64_OPDE_REG_LIST_LENGTH, 1750 AARCH64_OPDE_REG_LIST_STRIDE, 1751 AARCH64_OPDE_UNTIED_IMMS, 1752 AARCH64_OPDE_UNTIED_OPERAND, 1753 AARCH64_OPDE_OUT_OF_RANGE, 1754 AARCH64_OPDE_UNALIGNED, 1755 AARCH64_OPDE_OTHER_ERROR, 1756 AARCH64_OPDE_INVALID_REGNO 1757 }; 1758 1759 /* N.B. GAS assumes that this structure work well with shallow copy. */ 1760 struct aarch64_operand_error 1761 { 1762 enum aarch64_operand_error_kind kind; 1763 int index; 1764 const char *error; 1765 /* Some data for extra information. */ 1766 union { 1767 int i; 1768 const char *s; 1769 } data[3]; 1770 bool non_fatal; 1771 }; 1772 1773 /* AArch64 sequence structure used to track instructions with F_SCAN 1774 dependencies for both assembler and disassembler. */ 1775 struct aarch64_instr_sequence 1776 { 1777 /* The instructions in the sequence, starting with the one that 1778 caused it to be opened. */ 1779 aarch64_inst *instr; 1780 /* The number of instructions already in the sequence. */ 1781 int num_added_insns; 1782 /* The number of instructions allocated to the sequence. */ 1783 int num_allocated_insns; 1784 }; 1785 1786 /* Encoding entrypoint. */ 1787 1788 extern bool 1789 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *, 1790 aarch64_insn *, aarch64_opnd_qualifier_t *, 1791 aarch64_operand_error *, aarch64_instr_sequence *); 1792 1793 extern const aarch64_opcode * 1794 aarch64_replace_opcode (struct aarch64_inst *, 1795 const aarch64_opcode *); 1796 1797 /* Given the opcode enumerator OP, return the pointer to the corresponding 1798 opcode entry. */ 1799 1800 extern const aarch64_opcode * 1801 aarch64_get_opcode (enum aarch64_op); 1802 1803 /* An instance of this structure is passed to aarch64_print_operand, and 1804 the callback within this structure is used to apply styling to the 1805 disassembler output. This structure encapsulates the callback and a 1806 state pointer. */ 1807 1808 struct aarch64_styler 1809 { 1810 /* The callback used to apply styling. Returns a string created from FMT 1811 and ARGS with STYLE applied to the string. STYLER is a pointer back 1812 to this object so that the callback can access the state member. 1813 1814 The string returned from this callback must remain valid until the 1815 call to aarch64_print_operand has completed. */ 1816 const char *(*apply_style) (struct aarch64_styler *styler, 1817 enum disassembler_style style, 1818 const char *fmt, 1819 va_list args); 1820 1821 /* A pointer to a state object which can be used by the apply_style 1822 callback function. */ 1823 void *state; 1824 }; 1825 1826 /* Generate the string representation of an operand. */ 1827 extern void 1828 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *, 1829 const aarch64_opnd_info *, int, int *, bfd_vma *, 1830 char **, char *, size_t, 1831 aarch64_feature_set features, 1832 struct aarch64_styler *styler); 1833 1834 /* Miscellaneous interface. */ 1835 1836 extern int 1837 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd); 1838 1839 extern aarch64_opnd_qualifier_t 1840 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int, 1841 const aarch64_opnd_qualifier_t, int); 1842 1843 extern bool 1844 aarch64_is_destructive_by_operands (const aarch64_opcode *); 1845 1846 extern int 1847 aarch64_num_of_operands (const aarch64_opcode *); 1848 1849 extern int 1850 aarch64_stack_pointer_p (const aarch64_opnd_info *); 1851 1852 extern int 1853 aarch64_zero_register_p (const aarch64_opnd_info *); 1854 1855 extern enum err_type 1856 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool, 1857 aarch64_operand_error *); 1858 1859 extern void 1860 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *); 1861 1862 /* Given an operand qualifier, return the expected data element size 1863 of a qualified operand. */ 1864 extern unsigned char 1865 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t); 1866 1867 extern enum aarch64_operand_class 1868 aarch64_get_operand_class (enum aarch64_opnd); 1869 1870 extern const char * 1871 aarch64_get_operand_name (enum aarch64_opnd); 1872 1873 extern const char * 1874 aarch64_get_operand_desc (enum aarch64_opnd); 1875 1876 extern bool 1877 aarch64_sve_dupm_mov_immediate_p (uint64_t, int); 1878 1879 extern bool 1880 aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *); 1881 1882 extern int 1883 calc_ldst_datasize (const aarch64_opnd_info *opnds); 1884 1885 #ifdef DEBUG_AARCH64 1886 extern int debug_dump; 1887 1888 extern void 1889 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2))); 1890 1891 #define DEBUG_TRACE(M, ...) \ 1892 { \ 1893 if (debug_dump) \ 1894 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \ 1895 } 1896 1897 #define DEBUG_TRACE_IF(C, M, ...) \ 1898 { \ 1899 if (debug_dump && (C)) \ 1900 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \ 1901 } 1902 #else /* !DEBUG_AARCH64 */ 1903 #define DEBUG_TRACE(M, ...) ; 1904 #define DEBUG_TRACE_IF(C, M, ...) ; 1905 #endif /* DEBUG_AARCH64 */ 1906 1907 extern const char *const aarch64_sve_pattern_array[32]; 1908 extern const char *const aarch64_sve_prfop_array[16]; 1909 extern const char *const aarch64_rprfmop_array[64]; 1910 extern const char *const aarch64_sme_vlxn_array[2]; 1911 1912 #ifdef __cplusplus 1913 } 1914 #endif 1915 1916 #endif /* OPCODE_AARCH64_H */ 1917