xref: /netbsd-src/external/gpl3/gdb/dist/opcodes/aarch64-opc.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /* aarch64-opc.c -- AArch64 opcode support.
2    Copyright (C) 2009-2019 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28 
29 #include "opintl.h"
30 #include "libiberty.h"
31 
32 #include "aarch64-opc.h"
33 
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37 
38 /* The enumeration strings associated with each value of a 5-bit SVE
39    pattern operand.  A null entry indicates a reserved meaning.  */
40 const char *const aarch64_sve_pattern_array[32] = {
41   /* 0-7.  */
42   "pow2",
43   "vl1",
44   "vl2",
45   "vl3",
46   "vl4",
47   "vl5",
48   "vl6",
49   "vl7",
50   /* 8-15.  */
51   "vl8",
52   "vl16",
53   "vl32",
54   "vl64",
55   "vl128",
56   "vl256",
57   0,
58   0,
59   /* 16-23.  */
60   0,
61   0,
62   0,
63   0,
64   0,
65   0,
66   0,
67   0,
68   /* 24-31.  */
69   0,
70   0,
71   0,
72   0,
73   0,
74   "mul4",
75   "mul3",
76   "all"
77 };
78 
79 /* The enumeration strings associated with each value of a 4-bit SVE
80    prefetch operand.  A null entry indicates a reserved meaning.  */
81 const char *const aarch64_sve_prfop_array[16] = {
82   /* 0-7.  */
83   "pldl1keep",
84   "pldl1strm",
85   "pldl2keep",
86   "pldl2strm",
87   "pldl3keep",
88   "pldl3strm",
89   0,
90   0,
91   /* 8-15.  */
92   "pstl1keep",
93   "pstl1strm",
94   "pstl2keep",
95   "pstl2strm",
96   "pstl3keep",
97   "pstl3strm",
98   0,
99   0
100 };
101 
102 /* Helper functions to determine which operand to be used to encode/decode
103    the size:Q fields for AdvSIMD instructions.  */
104 
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108   return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 	  && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 	  : FALSE);
111 }
112 
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116   return ((qualifier >= AARCH64_OPND_QLF_S_B
117 	  && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 	  : FALSE);
119 }
120 
121 enum data_pattern
122 {
123   DP_UNKNOWN,
124   DP_VECTOR_3SAME,
125   DP_VECTOR_LONG,
126   DP_VECTOR_WIDE,
127   DP_VECTOR_ACROSS_LANES,
128 };
129 
130 static const char significant_operand_index [] =
131 {
132   0,	/* DP_UNKNOWN, by default using operand 0.  */
133   0,	/* DP_VECTOR_3SAME */
134   1,	/* DP_VECTOR_LONG */
135   2,	/* DP_VECTOR_WIDE */
136   1,	/* DP_VECTOR_ACROSS_LANES */
137 };
138 
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140    the data pattern.
141    N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142    corresponds to one of a sequence of operands.  */
143 
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147   if (vector_qualifier_p (qualifiers[0]) == TRUE)
148     {
149       /* e.g. v.4s, v.4s, v.4s
150 	   or v.4h, v.4h, v.h[3].  */
151       if (qualifiers[0] == qualifiers[1]
152 	  && vector_qualifier_p (qualifiers[2]) == TRUE
153 	  && (aarch64_get_qualifier_esize (qualifiers[0])
154 	      == aarch64_get_qualifier_esize (qualifiers[1]))
155 	  && (aarch64_get_qualifier_esize (qualifiers[0])
156 	      == aarch64_get_qualifier_esize (qualifiers[2])))
157 	return DP_VECTOR_3SAME;
158       /* e.g. v.8h, v.8b, v.8b.
159            or v.4s, v.4h, v.h[2].
160 	   or v.8h, v.16b.  */
161       if (vector_qualifier_p (qualifiers[1]) == TRUE
162 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 	  && (aarch64_get_qualifier_esize (qualifiers[0])
164 	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 	return DP_VECTOR_LONG;
166       /* e.g. v.8h, v.8h, v.8b.  */
167       if (qualifiers[0] == qualifiers[1]
168 	  && vector_qualifier_p (qualifiers[2]) == TRUE
169 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 	  && (aarch64_get_qualifier_esize (qualifiers[0])
171 	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 	  && (aarch64_get_qualifier_esize (qualifiers[0])
173 	      == aarch64_get_qualifier_esize (qualifiers[1])))
174 	return DP_VECTOR_WIDE;
175     }
176   else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177     {
178       /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
179       if (vector_qualifier_p (qualifiers[1]) == TRUE
180 	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 	return DP_VECTOR_ACROSS_LANES;
182     }
183 
184   return DP_UNKNOWN;
185 }
186 
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188    the AdvSIMD instructions.  */
189 /* N.B. it is possible to do some optimization that doesn't call
190    get_data_pattern each time when we need to select an operand.  We can
191    either buffer the caculated the result or statically generate the data,
192    however, it is not obvious that the optimization will bring significant
193    benefit.  */
194 
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198   return
199     significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 
202 const aarch64_field fields[] =
203 {
204     {  0,  0 },	/* NIL.  */
205     {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
206     {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
207     {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
208     { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
209     {  5, 19 },	/* imm19: e.g. in CBZ.  */
210     {  5, 19 },	/* immhi: e.g. in ADRP.  */
211     { 29,  2 },	/* immlo: e.g. in ADRP.  */
212     { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
213     { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
214     { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
215     { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
216     {  0,  5 },	/* Rt: in load/store instructions.  */
217     {  0,  5 },	/* Rd: in many integer instructions.  */
218     {  5,  5 },	/* Rn: in many integer instructions.  */
219     { 10,  5 },	/* Rt2: in load/store pair instructions.  */
220     { 10,  5 },	/* Ra: in fp instructions.  */
221     {  5,  3 },	/* op2: in the system instructions.  */
222     {  8,  4 },	/* CRm: in the system instructions.  */
223     { 12,  4 },	/* CRn: in the system instructions.  */
224     { 16,  3 },	/* op1: in the system instructions.  */
225     { 19,  2 },	/* op0: in the system instructions.  */
226     { 10,  3 },	/* imm3: in add/sub extended reg instructions.  */
227     { 12,  4 },	/* cond: condition flags as a source operand.  */
228     { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
229     { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
230     { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
231     { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
232     { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
233     { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
234     { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
235     { 12,  1 },	/* S: in load/store reg offset instructions.  */
236     { 21,  2 },	/* hw: in move wide constant instructions.  */
237     { 22,  2 },	/* opc: in load/store reg offset instructions.  */
238     { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
239     { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
240     { 22,  2 },	/* type: floating point type field in fp data inst.  */
241     { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
242     { 10,  6 },	/* imm6: in add/sub reg shifted instructions.  */
243     { 15,  6 },	/* imm6_2: in rmif instructions.  */
244     { 11,  4 },	/* imm4: in advsimd ext and advsimd ins instructions.  */
245     {  0,  4 },	/* imm4_2: in rmif instructions.  */
246     { 10,  4 },	/* imm4_3: in adddg/subg instructions.  */
247     { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
248     { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
249     { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
250     { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
251     { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
252     {  5, 14 },	/* imm14: in test bit and branch instructions.  */
253     {  5, 16 },	/* imm16: in exception instructions.  */
254     {  0, 26 },	/* imm26: in unconditional branch instructions.  */
255     { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
256     { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
257     { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
258     { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
259     { 22,  1 },	/* S: in LDRAA and LDRAB instructions.  */
260     { 22,  1 },	/* N: in logical (immediate) instructions.  */
261     { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
262     { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
263     { 31,  1 },	/* sf: in integer data processing instructions.  */
264     { 30,  1 },	/* lse_size: in LSE extension atomic instructions.  */
265     { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
266     { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
267     { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
268     { 31,  1 },	/* b5: in the test bit and branch instructions.  */
269     { 19,  5 },	/* b40: in the test bit and branch instructions.  */
270     { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
271     {  4,  1 }, /* SVE_M_4: Merge/zero select, bit 4.  */
272     { 14,  1 }, /* SVE_M_14: Merge/zero select, bit 14.  */
273     { 16,  1 }, /* SVE_M_16: Merge/zero select, bit 16.  */
274     { 17,  1 }, /* SVE_N: SVE equivalent of N.  */
275     {  0,  4 }, /* SVE_Pd: p0-p15, bits [3,0].  */
276     { 10,  3 }, /* SVE_Pg3: p0-p7, bits [12,10].  */
277     {  5,  4 }, /* SVE_Pg4_5: p0-p15, bits [8,5].  */
278     { 10,  4 }, /* SVE_Pg4_10: p0-p15, bits [13,10].  */
279     { 16,  4 }, /* SVE_Pg4_16: p0-p15, bits [19,16].  */
280     { 16,  4 }, /* SVE_Pm: p0-p15, bits [19,16].  */
281     {  5,  4 }, /* SVE_Pn: p0-p15, bits [8,5].  */
282     {  0,  4 }, /* SVE_Pt: p0-p15, bits [3,0].  */
283     {  5,  5 }, /* SVE_Rm: SVE alternative position for Rm.  */
284     { 16,  5 }, /* SVE_Rn: SVE alternative position for Rn.  */
285     {  0,  5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0].  */
286     {  5,  5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5].  */
287     {  5,  5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5].  */
288     {  5,  5 }, /* SVE_Za_5: SVE vector register, bits [9,5].  */
289     { 16,  5 }, /* SVE_Za_16: SVE vector register, bits [20,16].  */
290     {  0,  5 }, /* SVE_Zd: SVE vector register. bits [4,0].  */
291     {  5,  5 }, /* SVE_Zm_5: SVE vector register, bits [9,5].  */
292     { 16,  5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293     {  5,  5 }, /* SVE_Zn: SVE vector register, bits [9,5].  */
294     {  0,  5 }, /* SVE_Zt: SVE vector register, bits [4,0].  */
295     {  5,  1 }, /* SVE_i1: single-bit immediate.  */
296     { 22,  1 }, /* SVE_i3h: high bit of 3-bit immediate.  */
297     { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
298     { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
299     {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
300     { 16,  5 }, /* SVE_imm5b: secondary 5-bit immediate field.  */
301     { 16,  6 }, /* SVE_imm6: 6-bit immediate field.  */
302     { 14,  7 }, /* SVE_imm7: 7-bit immediate field.  */
303     {  5,  8 }, /* SVE_imm8: 8-bit immediate field.  */
304     {  5,  9 }, /* SVE_imm9: 9-bit immediate field.  */
305     { 11,  6 }, /* SVE_immr: SVE equivalent of immr.  */
306     {  5,  6 }, /* SVE_imms: SVE equivalent of imms.  */
307     { 10,  2 }, /* SVE_msz: 2-bit shift amount for ADR.  */
308     {  5,  5 }, /* SVE_pattern: vector pattern enumeration.  */
309     {  0,  4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
310     { 16,  1 }, /* SVE_rot1: 1-bit rotation amount.  */
311     { 10,  2 }, /* SVE_rot2: 2-bit rotation amount.  */
312     { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
313     { 16,  4 }, /* SVE_tsz: triangular size select.  */
314     { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
315     {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
316     { 19,  2 }, /* SVE_tszl_19: triangular size select low, bits [20,19].  */
317     { 14,  1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14).  */
318     { 22,  1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22).  */
319     { 11,  2 }, /* rotate1: FCMLA immediate rotate.  */
320     { 13,  2 }, /* rotate2: Indexed element FCMLA immediate rotate.  */
321     { 12,  1 }, /* rotate3: FCADD immediate rotate.  */
322     { 12,  2 }, /* SM3: Indexed element SM3 2 bits index immediate.  */
323     { 22,  1 }, /* sz: 1-bit element size select.  */
324 };
325 
326 enum aarch64_operand_class
327 aarch64_get_operand_class (enum aarch64_opnd type)
328 {
329   return aarch64_operands[type].op_class;
330 }
331 
332 const char *
333 aarch64_get_operand_name (enum aarch64_opnd type)
334 {
335   return aarch64_operands[type].name;
336 }
337 
338 /* Get operand description string.
339    This is usually for the diagnosis purpose.  */
340 const char *
341 aarch64_get_operand_desc (enum aarch64_opnd type)
342 {
343   return aarch64_operands[type].desc;
344 }
345 
346 /* Table of all conditional affixes.  */
347 const aarch64_cond aarch64_conds[16] =
348 {
349   {{"eq", "none"}, 0x0},
350   {{"ne", "any"}, 0x1},
351   {{"cs", "hs", "nlast"}, 0x2},
352   {{"cc", "lo", "ul", "last"}, 0x3},
353   {{"mi", "first"}, 0x4},
354   {{"pl", "nfrst"}, 0x5},
355   {{"vs"}, 0x6},
356   {{"vc"}, 0x7},
357   {{"hi", "pmore"}, 0x8},
358   {{"ls", "plast"}, 0x9},
359   {{"ge", "tcont"}, 0xa},
360   {{"lt", "tstop"}, 0xb},
361   {{"gt"}, 0xc},
362   {{"le"}, 0xd},
363   {{"al"}, 0xe},
364   {{"nv"}, 0xf},
365 };
366 
367 const aarch64_cond *
368 get_cond_from_value (aarch64_insn value)
369 {
370   assert (value < 16);
371   return &aarch64_conds[(unsigned int) value];
372 }
373 
374 const aarch64_cond *
375 get_inverted_cond (const aarch64_cond *cond)
376 {
377   return &aarch64_conds[cond->value ^ 0x1];
378 }
379 
380 /* Table describing the operand extension/shifting operators; indexed by
381    enum aarch64_modifier_kind.
382 
383    The value column provides the most common values for encoding modifiers,
384    which enables table-driven encoding/decoding for the modifiers.  */
385 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
386 {
387     {"none", 0x0},
388     {"msl",  0x0},
389     {"ror",  0x3},
390     {"asr",  0x2},
391     {"lsr",  0x1},
392     {"lsl",  0x0},
393     {"uxtb", 0x0},
394     {"uxth", 0x1},
395     {"uxtw", 0x2},
396     {"uxtx", 0x3},
397     {"sxtb", 0x4},
398     {"sxth", 0x5},
399     {"sxtw", 0x6},
400     {"sxtx", 0x7},
401     {"mul", 0x0},
402     {"mul vl", 0x0},
403     {NULL, 0},
404 };
405 
406 enum aarch64_modifier_kind
407 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
408 {
409   return desc - aarch64_operand_modifiers;
410 }
411 
412 aarch64_insn
413 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
414 {
415   return aarch64_operand_modifiers[kind].value;
416 }
417 
418 enum aarch64_modifier_kind
419 aarch64_get_operand_modifier_from_value (aarch64_insn value,
420 					 bfd_boolean extend_p)
421 {
422   if (extend_p == TRUE)
423     return AARCH64_MOD_UXTB + value;
424   else
425     return AARCH64_MOD_LSL - value;
426 }
427 
428 bfd_boolean
429 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
430 {
431   return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
432     ? TRUE : FALSE;
433 }
434 
435 static inline bfd_boolean
436 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
437 {
438   return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
439     ? TRUE : FALSE;
440 }
441 
442 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
443 {
444     { "#0x00", 0x0 },
445     { "oshld", 0x1 },
446     { "oshst", 0x2 },
447     { "osh",   0x3 },
448     { "#0x04", 0x4 },
449     { "nshld", 0x5 },
450     { "nshst", 0x6 },
451     { "nsh",   0x7 },
452     { "#0x08", 0x8 },
453     { "ishld", 0x9 },
454     { "ishst", 0xa },
455     { "ish",   0xb },
456     { "#0x0c", 0xc },
457     { "ld",    0xd },
458     { "st",    0xe },
459     { "sy",    0xf },
460 };
461 
462 /* Table describing the operands supported by the aliases of the HINT
463    instruction.
464 
465    The name column is the operand that is accepted for the alias.  The value
466    column is the hint number of the alias.  The list of operands is terminated
467    by NULL in the name column.  */
468 
469 const struct aarch64_name_value_pair aarch64_hint_options[] =
470 {
471   /* BTI.  This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET.  */
472   { " ",	HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
473   { "csync",	HINT_OPD_CSYNC },	/* PSB CSYNC.  */
474   { "c",	HINT_OPD_C },		/* BTI C.  */
475   { "j",	HINT_OPD_J },		/* BTI J.  */
476   { "jc",	HINT_OPD_JC },		/* BTI JC.  */
477   { NULL,	HINT_OPD_NULL },
478 };
479 
480 /* op -> op:       load = 0 instruction = 1 store = 2
481    l  -> level:    1-3
482    t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
483 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
484 const struct aarch64_name_value_pair aarch64_prfops[32] =
485 {
486   { "pldl1keep", B(0, 1, 0) },
487   { "pldl1strm", B(0, 1, 1) },
488   { "pldl2keep", B(0, 2, 0) },
489   { "pldl2strm", B(0, 2, 1) },
490   { "pldl3keep", B(0, 3, 0) },
491   { "pldl3strm", B(0, 3, 1) },
492   { NULL, 0x06 },
493   { NULL, 0x07 },
494   { "plil1keep", B(1, 1, 0) },
495   { "plil1strm", B(1, 1, 1) },
496   { "plil2keep", B(1, 2, 0) },
497   { "plil2strm", B(1, 2, 1) },
498   { "plil3keep", B(1, 3, 0) },
499   { "plil3strm", B(1, 3, 1) },
500   { NULL, 0x0e },
501   { NULL, 0x0f },
502   { "pstl1keep", B(2, 1, 0) },
503   { "pstl1strm", B(2, 1, 1) },
504   { "pstl2keep", B(2, 2, 0) },
505   { "pstl2strm", B(2, 2, 1) },
506   { "pstl3keep", B(2, 3, 0) },
507   { "pstl3strm", B(2, 3, 1) },
508   { NULL, 0x16 },
509   { NULL, 0x17 },
510   { NULL, 0x18 },
511   { NULL, 0x19 },
512   { NULL, 0x1a },
513   { NULL, 0x1b },
514   { NULL, 0x1c },
515   { NULL, 0x1d },
516   { NULL, 0x1e },
517   { NULL, 0x1f },
518 };
519 #undef B
520 
521 /* Utilities on value constraint.  */
522 
523 static inline int
524 value_in_range_p (int64_t value, int low, int high)
525 {
526   return (value >= low && value <= high) ? 1 : 0;
527 }
528 
529 /* Return true if VALUE is a multiple of ALIGN.  */
530 static inline int
531 value_aligned_p (int64_t value, int align)
532 {
533   return (value % align) == 0;
534 }
535 
536 /* A signed value fits in a field.  */
537 static inline int
538 value_fit_signed_field_p (int64_t value, unsigned width)
539 {
540   assert (width < 32);
541   if (width < sizeof (value) * 8)
542     {
543       int64_t lim = (int64_t)1 << (width - 1);
544       if (value >= -lim && value < lim)
545 	return 1;
546     }
547   return 0;
548 }
549 
550 /* An unsigned value fits in a field.  */
551 static inline int
552 value_fit_unsigned_field_p (int64_t value, unsigned width)
553 {
554   assert (width < 32);
555   if (width < sizeof (value) * 8)
556     {
557       int64_t lim = (int64_t)1 << width;
558       if (value >= 0 && value < lim)
559 	return 1;
560     }
561   return 0;
562 }
563 
564 /* Return 1 if OPERAND is SP or WSP.  */
565 int
566 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
567 {
568   return ((aarch64_get_operand_class (operand->type)
569 	   == AARCH64_OPND_CLASS_INT_REG)
570 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
571 	  && operand->reg.regno == 31);
572 }
573 
574 /* Return 1 if OPERAND is XZR or WZP.  */
575 int
576 aarch64_zero_register_p (const aarch64_opnd_info *operand)
577 {
578   return ((aarch64_get_operand_class (operand->type)
579 	   == AARCH64_OPND_CLASS_INT_REG)
580 	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
581 	  && operand->reg.regno == 31);
582 }
583 
584 /* Return true if the operand *OPERAND that has the operand code
585    OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
586    qualified by the qualifier TARGET.  */
587 
588 static inline int
589 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
590 			  aarch64_opnd_qualifier_t target)
591 {
592   switch (operand->qualifier)
593     {
594     case AARCH64_OPND_QLF_W:
595       if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
596 	return 1;
597       break;
598     case AARCH64_OPND_QLF_X:
599       if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
600 	return 1;
601       break;
602     case AARCH64_OPND_QLF_WSP:
603       if (target == AARCH64_OPND_QLF_W
604 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
605 	return 1;
606       break;
607     case AARCH64_OPND_QLF_SP:
608       if (target == AARCH64_OPND_QLF_X
609 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
610 	return 1;
611       break;
612     default:
613       break;
614     }
615 
616   return 0;
617 }
618 
619 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
620    for operand KNOWN_IDX, return the expected qualifier for operand IDX.
621 
622    Return NIL if more than one expected qualifiers are found.  */
623 
624 aarch64_opnd_qualifier_t
625 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
626 				int idx,
627 				const aarch64_opnd_qualifier_t known_qlf,
628 				int known_idx)
629 {
630   int i, saved_i;
631 
632   /* Special case.
633 
634      When the known qualifier is NIL, we have to assume that there is only
635      one qualifier sequence in the *QSEQ_LIST and return the corresponding
636      qualifier directly.  One scenario is that for instruction
637 	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
638      which has only one possible valid qualifier sequence
639 	NIL, S_D
640      the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
641      determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
642 
643      Because the qualifier NIL has dual roles in the qualifier sequence:
644      it can mean no qualifier for the operand, or the qualifer sequence is
645      not in use (when all qualifiers in the sequence are NILs), we have to
646      handle this special case here.  */
647   if (known_qlf == AARCH64_OPND_NIL)
648     {
649       assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
650       return qseq_list[0][idx];
651     }
652 
653   for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
654     {
655       if (qseq_list[i][known_idx] == known_qlf)
656 	{
657 	  if (saved_i != -1)
658 	    /* More than one sequences are found to have KNOWN_QLF at
659 	       KNOWN_IDX.  */
660 	    return AARCH64_OPND_NIL;
661 	  saved_i = i;
662 	}
663     }
664 
665   return qseq_list[saved_i][idx];
666 }
667 
668 enum operand_qualifier_kind
669 {
670   OQK_NIL,
671   OQK_OPD_VARIANT,
672   OQK_VALUE_IN_RANGE,
673   OQK_MISC,
674 };
675 
676 /* Operand qualifier description.  */
677 struct operand_qualifier_data
678 {
679   /* The usage of the three data fields depends on the qualifier kind.  */
680   int data0;
681   int data1;
682   int data2;
683   /* Description.  */
684   const char *desc;
685   /* Kind.  */
686   enum operand_qualifier_kind kind;
687 };
688 
689 /* Indexed by the operand qualifier enumerators.  */
690 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
691 {
692   {0, 0, 0, "NIL", OQK_NIL},
693 
694   /* Operand variant qualifiers.
695      First 3 fields:
696      element size, number of elements and common value for encoding.  */
697 
698   {4, 1, 0x0, "w", OQK_OPD_VARIANT},
699   {8, 1, 0x1, "x", OQK_OPD_VARIANT},
700   {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
701   {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
702 
703   {1, 1, 0x0, "b", OQK_OPD_VARIANT},
704   {2, 1, 0x1, "h", OQK_OPD_VARIANT},
705   {4, 1, 0x2, "s", OQK_OPD_VARIANT},
706   {8, 1, 0x3, "d", OQK_OPD_VARIANT},
707   {16, 1, 0x4, "q", OQK_OPD_VARIANT},
708   {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
709 
710   {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
711   {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
712   {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
713   {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
714   {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
715   {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
716   {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
717   {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
718   {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
719   {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
720   {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
721 
722   {0, 0, 0, "z", OQK_OPD_VARIANT},
723   {0, 0, 0, "m", OQK_OPD_VARIANT},
724 
725   /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc).  */
726   {16, 0, 0, "tag", OQK_OPD_VARIANT},
727 
728   /* Qualifiers constraining the value range.
729      First 3 fields:
730      Lower bound, higher bound, unused.  */
731 
732   {0, 15, 0, "CR",       OQK_VALUE_IN_RANGE},
733   {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
734   {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
735   {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
736   {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
737   {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
738   {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
739 
740   /* Qualifiers for miscellaneous purpose.
741      First 3 fields:
742      unused, unused and unused.  */
743 
744   {0, 0, 0, "lsl", 0},
745   {0, 0, 0, "msl", 0},
746 
747   {0, 0, 0, "retrieving", 0},
748 };
749 
750 static inline bfd_boolean
751 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
752 {
753   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
754     ? TRUE : FALSE;
755 }
756 
757 static inline bfd_boolean
758 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
759 {
760   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
761     ? TRUE : FALSE;
762 }
763 
764 const char*
765 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
766 {
767   return aarch64_opnd_qualifiers[qualifier].desc;
768 }
769 
770 /* Given an operand qualifier, return the expected data element size
771    of a qualified operand.  */
772 unsigned char
773 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
774 {
775   assert (operand_variant_qualifier_p (qualifier) == TRUE);
776   return aarch64_opnd_qualifiers[qualifier].data0;
777 }
778 
779 unsigned char
780 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
781 {
782   assert (operand_variant_qualifier_p (qualifier) == TRUE);
783   return aarch64_opnd_qualifiers[qualifier].data1;
784 }
785 
786 aarch64_insn
787 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
788 {
789   assert (operand_variant_qualifier_p (qualifier) == TRUE);
790   return aarch64_opnd_qualifiers[qualifier].data2;
791 }
792 
793 static int
794 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
795 {
796   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
797   return aarch64_opnd_qualifiers[qualifier].data0;
798 }
799 
800 static int
801 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
802 {
803   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
804   return aarch64_opnd_qualifiers[qualifier].data1;
805 }
806 
807 #ifdef DEBUG_AARCH64
808 void
809 aarch64_verbose (const char *str, ...)
810 {
811   va_list ap;
812   va_start (ap, str);
813   printf ("#### ");
814   vprintf (str, ap);
815   printf ("\n");
816   va_end (ap);
817 }
818 
819 static inline void
820 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
821 {
822   int i;
823   printf ("#### \t");
824   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
825     printf ("%s,", aarch64_get_qualifier_name (*qualifier));
826   printf ("\n");
827 }
828 
829 static void
830 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
831 		       const aarch64_opnd_qualifier_t *qualifier)
832 {
833   int i;
834   aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
835 
836   aarch64_verbose ("dump_match_qualifiers:");
837   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
838     curr[i] = opnd[i].qualifier;
839   dump_qualifier_sequence (curr);
840   aarch64_verbose ("against");
841   dump_qualifier_sequence (qualifier);
842 }
843 #endif /* DEBUG_AARCH64 */
844 
845 /* This function checks if the given instruction INSN is a destructive
846    instruction based on the usage of the registers.  It does not recognize
847    unary destructive instructions.  */
848 bfd_boolean
849 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
850 {
851   int i = 0;
852   const enum aarch64_opnd *opnds = opcode->operands;
853 
854   if (opnds[0] == AARCH64_OPND_NIL)
855     return FALSE;
856 
857   while (opnds[++i] != AARCH64_OPND_NIL)
858     if (opnds[i] == opnds[0])
859       return TRUE;
860 
861   return FALSE;
862 }
863 
864 /* TODO improve this, we can have an extra field at the runtime to
865    store the number of operands rather than calculating it every time.  */
866 
867 int
868 aarch64_num_of_operands (const aarch64_opcode *opcode)
869 {
870   int i = 0;
871   const enum aarch64_opnd *opnds = opcode->operands;
872   while (opnds[i++] != AARCH64_OPND_NIL)
873     ;
874   --i;
875   assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
876   return i;
877 }
878 
879 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
880    If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
881 
882    N.B. on the entry, it is very likely that only some operands in *INST
883    have had their qualifiers been established.
884 
885    If STOP_AT is not -1, the function will only try to match
886    the qualifier sequence for operands before and including the operand
887    of index STOP_AT; and on success *RET will only be filled with the first
888    (STOP_AT+1) qualifiers.
889 
890    A couple examples of the matching algorithm:
891 
892    X,W,NIL should match
893    X,W,NIL
894 
895    NIL,NIL should match
896    X  ,NIL
897 
898    Apart from serving the main encoding routine, this can also be called
899    during or after the operand decoding.  */
900 
901 int
902 aarch64_find_best_match (const aarch64_inst *inst,
903 			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
904 			 int stop_at, aarch64_opnd_qualifier_t *ret)
905 {
906   int found = 0;
907   int i, num_opnds;
908   const aarch64_opnd_qualifier_t *qualifiers;
909 
910   num_opnds = aarch64_num_of_operands (inst->opcode);
911   if (num_opnds == 0)
912     {
913       DEBUG_TRACE ("SUCCEED: no operand");
914       return 1;
915     }
916 
917   if (stop_at < 0 || stop_at >= num_opnds)
918     stop_at = num_opnds - 1;
919 
920   /* For each pattern.  */
921   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
922     {
923       int j;
924       qualifiers = *qualifiers_list;
925 
926       /* Start as positive.  */
927       found = 1;
928 
929       DEBUG_TRACE ("%d", i);
930 #ifdef DEBUG_AARCH64
931       if (debug_dump)
932 	dump_match_qualifiers (inst->operands, qualifiers);
933 #endif
934 
935       /* Most opcodes has much fewer patterns in the list.
936 	 First NIL qualifier indicates the end in the list.   */
937       if (empty_qualifier_sequence_p (qualifiers) == TRUE)
938 	{
939 	  DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
940 	  if (i)
941 	    found = 0;
942 	  break;
943 	}
944 
945       for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
946 	{
947 	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
948 	    {
949 	      /* Either the operand does not have qualifier, or the qualifier
950 		 for the operand needs to be deduced from the qualifier
951 		 sequence.
952 		 In the latter case, any constraint checking related with
953 		 the obtained qualifier should be done later in
954 		 operand_general_constraint_met_p.  */
955 	      continue;
956 	    }
957 	  else if (*qualifiers != inst->operands[j].qualifier)
958 	    {
959 	      /* Unless the target qualifier can also qualify the operand
960 		 (which has already had a non-nil qualifier), non-equal
961 		 qualifiers are generally un-matched.  */
962 	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
963 		continue;
964 	      else
965 		{
966 		  found = 0;
967 		  break;
968 		}
969 	    }
970 	  else
971 	    continue;	/* Equal qualifiers are certainly matched.  */
972 	}
973 
974       /* Qualifiers established.  */
975       if (found == 1)
976 	break;
977     }
978 
979   if (found == 1)
980     {
981       /* Fill the result in *RET.  */
982       int j;
983       qualifiers = *qualifiers_list;
984 
985       DEBUG_TRACE ("complete qualifiers using list %d", i);
986 #ifdef DEBUG_AARCH64
987       if (debug_dump)
988 	dump_qualifier_sequence (qualifiers);
989 #endif
990 
991       for (j = 0; j <= stop_at; ++j, ++qualifiers)
992 	ret[j] = *qualifiers;
993       for (; j < AARCH64_MAX_OPND_NUM; ++j)
994 	ret[j] = AARCH64_OPND_QLF_NIL;
995 
996       DEBUG_TRACE ("SUCCESS");
997       return 1;
998     }
999 
1000   DEBUG_TRACE ("FAIL");
1001   return 0;
1002 }
1003 
1004 /* Operand qualifier matching and resolving.
1005 
1006    Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1007    sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1008 
1009    if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1010    succeeds.  */
1011 
1012 static int
1013 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1014 {
1015   int i, nops;
1016   aarch64_opnd_qualifier_seq_t qualifiers;
1017 
1018   if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1019 			       qualifiers))
1020     {
1021       DEBUG_TRACE ("matching FAIL");
1022       return 0;
1023     }
1024 
1025   if (inst->opcode->flags & F_STRICT)
1026     {
1027       /* Require an exact qualifier match, even for NIL qualifiers.  */
1028       nops = aarch64_num_of_operands (inst->opcode);
1029       for (i = 0; i < nops; ++i)
1030 	if (inst->operands[i].qualifier != qualifiers[i])
1031 	  return FALSE;
1032     }
1033 
1034   /* Update the qualifiers.  */
1035   if (update_p == TRUE)
1036     for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1037       {
1038 	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1039 	  break;
1040 	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1041 			"update %s with %s for operand %d",
1042 			aarch64_get_qualifier_name (inst->operands[i].qualifier),
1043 			aarch64_get_qualifier_name (qualifiers[i]), i);
1044 	inst->operands[i].qualifier = qualifiers[i];
1045       }
1046 
1047   DEBUG_TRACE ("matching SUCCESS");
1048   return 1;
1049 }
1050 
1051 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1052    register by MOVZ.
1053 
1054    IS32 indicates whether value is a 32-bit immediate or not.
1055    If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1056    amount will be returned in *SHIFT_AMOUNT.  */
1057 
1058 bfd_boolean
1059 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1060 {
1061   int amount;
1062 
1063   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1064 
1065   if (is32)
1066     {
1067       /* Allow all zeros or all ones in top 32-bits, so that
1068 	 32-bit constant expressions like ~0x80000000 are
1069 	 permitted.  */
1070       uint64_t ext = value;
1071       if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1072 	/* Immediate out of range.  */
1073 	return FALSE;
1074       value &= (int64_t) 0xffffffff;
1075     }
1076 
1077   /* first, try movz then movn */
1078   amount = -1;
1079   if ((value & ((int64_t) 0xffff << 0)) == value)
1080     amount = 0;
1081   else if ((value & ((int64_t) 0xffff << 16)) == value)
1082     amount = 16;
1083   else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1084     amount = 32;
1085   else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1086     amount = 48;
1087 
1088   if (amount == -1)
1089     {
1090       DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1091       return FALSE;
1092     }
1093 
1094   if (shift_amount != NULL)
1095     *shift_amount = amount;
1096 
1097   DEBUG_TRACE ("exit TRUE with amount %d", amount);
1098 
1099   return TRUE;
1100 }
1101 
1102 /* Build the accepted values for immediate logical SIMD instructions.
1103 
1104    The standard encodings of the immediate value are:
1105      N      imms     immr         SIMD size  R             S
1106      1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
1107      0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
1108      0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
1109      0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
1110      0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
1111      0      11110s   00000r       2       UInt(r)       UInt(s)
1112    where all-ones value of S is reserved.
1113 
1114    Let's call E the SIMD size.
1115 
1116    The immediate value is: S+1 bits '1' rotated to the right by R.
1117 
1118    The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1119    (remember S != E - 1).  */
1120 
1121 #define TOTAL_IMM_NB  5334
1122 
1123 typedef struct
1124 {
1125   uint64_t imm;
1126   aarch64_insn encoding;
1127 } simd_imm_encoding;
1128 
1129 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1130 
1131 static int
1132 simd_imm_encoding_cmp(const void *i1, const void *i2)
1133 {
1134   const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1135   const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1136 
1137   if (imm1->imm < imm2->imm)
1138     return -1;
1139   if (imm1->imm > imm2->imm)
1140     return +1;
1141   return 0;
1142 }
1143 
1144 /* immediate bitfield standard encoding
1145    imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
1146    1         ssssss     rrrrrr      64        rrrrrr ssssss
1147    0         0sssss     0rrrrr      32        rrrrr  sssss
1148    0         10ssss     00rrrr      16        rrrr   ssss
1149    0         110sss     000rrr      8         rrr    sss
1150    0         1110ss     0000rr      4         rr     ss
1151    0         11110s     00000r      2         r      s  */
1152 static inline int
1153 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1154 {
1155   return (is64 << 12) | (r << 6) | s;
1156 }
1157 
1158 static void
1159 build_immediate_table (void)
1160 {
1161   uint32_t log_e, e, s, r, s_mask;
1162   uint64_t mask, imm;
1163   int nb_imms;
1164   int is64;
1165 
1166   nb_imms = 0;
1167   for (log_e = 1; log_e <= 6; log_e++)
1168     {
1169       /* Get element size.  */
1170       e = 1u << log_e;
1171       if (log_e == 6)
1172 	{
1173 	  is64 = 1;
1174 	  mask = 0xffffffffffffffffull;
1175 	  s_mask = 0;
1176 	}
1177       else
1178 	{
1179 	  is64 = 0;
1180 	  mask = (1ull << e) - 1;
1181 	  /* log_e  s_mask
1182 	     1     ((1 << 4) - 1) << 2 = 111100
1183 	     2     ((1 << 3) - 1) << 3 = 111000
1184 	     3     ((1 << 2) - 1) << 4 = 110000
1185 	     4     ((1 << 1) - 1) << 5 = 100000
1186 	     5     ((1 << 0) - 1) << 6 = 000000  */
1187 	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1188 	}
1189       for (s = 0; s < e - 1; s++)
1190 	for (r = 0; r < e; r++)
1191 	  {
1192 	    /* s+1 consecutive bits to 1 (s < 63) */
1193 	    imm = (1ull << (s + 1)) - 1;
1194 	    /* rotate right by r */
1195 	    if (r != 0)
1196 	      imm = (imm >> r) | ((imm << (e - r)) & mask);
1197 	    /* replicate the constant depending on SIMD size */
1198 	    switch (log_e)
1199 	      {
1200 	      case 1: imm = (imm <<  2) | imm;
1201 		/* Fall through.  */
1202 	      case 2: imm = (imm <<  4) | imm;
1203 		/* Fall through.  */
1204 	      case 3: imm = (imm <<  8) | imm;
1205 		/* Fall through.  */
1206 	      case 4: imm = (imm << 16) | imm;
1207 		/* Fall through.  */
1208 	      case 5: imm = (imm << 32) | imm;
1209 		/* Fall through.  */
1210 	      case 6: break;
1211 	      default: abort ();
1212 	      }
1213 	    simd_immediates[nb_imms].imm = imm;
1214 	    simd_immediates[nb_imms].encoding =
1215 	      encode_immediate_bitfield(is64, s | s_mask, r);
1216 	    nb_imms++;
1217 	  }
1218     }
1219   assert (nb_imms == TOTAL_IMM_NB);
1220   qsort(simd_immediates, nb_imms,
1221 	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1222 }
1223 
1224 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1225    be accepted by logical (immediate) instructions
1226    e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1227 
1228    ESIZE is the number of bytes in the decoded immediate value.
1229    If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1230    VALUE will be returned in *ENCODING.  */
1231 
1232 bfd_boolean
1233 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1234 {
1235   simd_imm_encoding imm_enc;
1236   const simd_imm_encoding *imm_encoding;
1237   static bfd_boolean initialized = FALSE;
1238   uint64_t upper;
1239   int i;
1240 
1241   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1242 	       value, esize);
1243 
1244   if (!initialized)
1245     {
1246       build_immediate_table ();
1247       initialized = TRUE;
1248     }
1249 
1250   /* Allow all zeros or all ones in top bits, so that
1251      constant expressions like ~1 are permitted.  */
1252   upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1253   if ((value & ~upper) != value && (value | upper) != value)
1254     return FALSE;
1255 
1256   /* Replicate to a full 64-bit value.  */
1257   value &= ~upper;
1258   for (i = esize * 8; i < 64; i *= 2)
1259     value |= (value << i);
1260 
1261   imm_enc.imm = value;
1262   imm_encoding = (const simd_imm_encoding *)
1263     bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1264             sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1265   if (imm_encoding == NULL)
1266     {
1267       DEBUG_TRACE ("exit with FALSE");
1268       return FALSE;
1269     }
1270   if (encoding != NULL)
1271     *encoding = imm_encoding->encoding;
1272   DEBUG_TRACE ("exit with TRUE");
1273   return TRUE;
1274 }
1275 
1276 /* If 64-bit immediate IMM is in the format of
1277    "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1278    where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1279    of value "abcdefgh".  Otherwise return -1.  */
1280 int
1281 aarch64_shrink_expanded_imm8 (uint64_t imm)
1282 {
1283   int i, ret;
1284   uint32_t byte;
1285 
1286   ret = 0;
1287   for (i = 0; i < 8; i++)
1288     {
1289       byte = (imm >> (8 * i)) & 0xff;
1290       if (byte == 0xff)
1291 	ret |= 1 << i;
1292       else if (byte != 0x00)
1293 	return -1;
1294     }
1295   return ret;
1296 }
1297 
1298 /* Utility inline functions for operand_general_constraint_met_p.  */
1299 
1300 static inline void
1301 set_error (aarch64_operand_error *mismatch_detail,
1302 	   enum aarch64_operand_error_kind kind, int idx,
1303 	   const char* error)
1304 {
1305   if (mismatch_detail == NULL)
1306     return;
1307   mismatch_detail->kind = kind;
1308   mismatch_detail->index = idx;
1309   mismatch_detail->error = error;
1310 }
1311 
1312 static inline void
1313 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1314 		  const char* error)
1315 {
1316   if (mismatch_detail == NULL)
1317     return;
1318   set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1319 }
1320 
1321 static inline void
1322 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1323 			int idx, int lower_bound, int upper_bound,
1324 			const char* error)
1325 {
1326   if (mismatch_detail == NULL)
1327     return;
1328   set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1329   mismatch_detail->data[0] = lower_bound;
1330   mismatch_detail->data[1] = upper_bound;
1331 }
1332 
1333 static inline void
1334 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1335 			    int idx, int lower_bound, int upper_bound)
1336 {
1337   if (mismatch_detail == NULL)
1338     return;
1339   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1340 			  _("immediate value"));
1341 }
1342 
1343 static inline void
1344 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1345 			       int idx, int lower_bound, int upper_bound)
1346 {
1347   if (mismatch_detail == NULL)
1348     return;
1349   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1350 			  _("immediate offset"));
1351 }
1352 
1353 static inline void
1354 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1355 			      int idx, int lower_bound, int upper_bound)
1356 {
1357   if (mismatch_detail == NULL)
1358     return;
1359   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1360 			  _("register number"));
1361 }
1362 
1363 static inline void
1364 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1365 				 int idx, int lower_bound, int upper_bound)
1366 {
1367   if (mismatch_detail == NULL)
1368     return;
1369   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1370 			  _("register element index"));
1371 }
1372 
1373 static inline void
1374 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1375 				   int idx, int lower_bound, int upper_bound)
1376 {
1377   if (mismatch_detail == NULL)
1378     return;
1379   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1380 			  _("shift amount"));
1381 }
1382 
1383 /* Report that the MUL modifier in operand IDX should be in the range
1384    [LOWER_BOUND, UPPER_BOUND].  */
1385 static inline void
1386 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1387 				   int idx, int lower_bound, int upper_bound)
1388 {
1389   if (mismatch_detail == NULL)
1390     return;
1391   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1392 			  _("multiplier"));
1393 }
1394 
1395 static inline void
1396 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1397 		     int alignment)
1398 {
1399   if (mismatch_detail == NULL)
1400     return;
1401   set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1402   mismatch_detail->data[0] = alignment;
1403 }
1404 
1405 static inline void
1406 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1407 		    int expected_num)
1408 {
1409   if (mismatch_detail == NULL)
1410     return;
1411   set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1412   mismatch_detail->data[0] = expected_num;
1413 }
1414 
1415 static inline void
1416 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1417 		 const char* error)
1418 {
1419   if (mismatch_detail == NULL)
1420     return;
1421   set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1422 }
1423 
1424 /* General constraint checking based on operand code.
1425 
1426    Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1427    as the IDXth operand of opcode OPCODE.  Otherwise return 0.
1428 
1429    This function has to be called after the qualifiers for all operands
1430    have been resolved.
1431 
1432    Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1433    i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
1434    of error message during the disassembling where error message is not
1435    wanted.  We avoid the dynamic construction of strings of error messages
1436    here (i.e. in libopcodes), as it is costly and complicated; instead, we
1437    use a combination of error code, static string and some integer data to
1438    represent an error.  */
1439 
1440 static int
1441 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1442 				  enum aarch64_opnd type,
1443 				  const aarch64_opcode *opcode,
1444 				  aarch64_operand_error *mismatch_detail)
1445 {
1446   unsigned num, modifiers, shift;
1447   unsigned char size;
1448   int64_t imm, min_value, max_value;
1449   uint64_t uvalue, mask;
1450   const aarch64_opnd_info *opnd = opnds + idx;
1451   aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1452 
1453   assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1454 
1455   switch (aarch64_operands[type].op_class)
1456     {
1457     case AARCH64_OPND_CLASS_INT_REG:
1458       /* Check pair reg constraints for cas* instructions.  */
1459       if (type == AARCH64_OPND_PAIRREG)
1460 	{
1461 	  assert (idx == 1 || idx == 3);
1462 	  if (opnds[idx - 1].reg.regno % 2 != 0)
1463 	    {
1464 	      set_syntax_error (mismatch_detail, idx - 1,
1465 				_("reg pair must start from even reg"));
1466 	      return 0;
1467 	    }
1468 	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1469 	    {
1470 	      set_syntax_error (mismatch_detail, idx,
1471 				_("reg pair must be contiguous"));
1472 	      return 0;
1473 	    }
1474 	  break;
1475 	}
1476 
1477       /* <Xt> may be optional in some IC and TLBI instructions.  */
1478       if (type == AARCH64_OPND_Rt_SYS)
1479 	{
1480 	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1481 			       == AARCH64_OPND_CLASS_SYSTEM));
1482 	  if (opnds[1].present
1483 	      && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1484 	    {
1485 	      set_other_error (mismatch_detail, idx, _("extraneous register"));
1486 	      return 0;
1487 	    }
1488 	  if (!opnds[1].present
1489 	      && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1490 	    {
1491 	      set_other_error (mismatch_detail, idx, _("missing register"));
1492 	      return 0;
1493 	    }
1494 	}
1495       switch (qualifier)
1496 	{
1497 	case AARCH64_OPND_QLF_WSP:
1498 	case AARCH64_OPND_QLF_SP:
1499 	  if (!aarch64_stack_pointer_p (opnd))
1500 	    {
1501 	      set_other_error (mismatch_detail, idx,
1502 			       _("stack pointer register expected"));
1503 	      return 0;
1504 	    }
1505 	  break;
1506 	default:
1507 	  break;
1508 	}
1509       break;
1510 
1511     case AARCH64_OPND_CLASS_SVE_REG:
1512       switch (type)
1513 	{
1514 	case AARCH64_OPND_SVE_Zm3_INDEX:
1515 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
1516 	case AARCH64_OPND_SVE_Zm4_INDEX:
1517 	  size = get_operand_fields_width (get_operand_from_code (type));
1518 	  shift = get_operand_specific_data (&aarch64_operands[type]);
1519 	  mask = (1 << shift) - 1;
1520 	  if (opnd->reg.regno > mask)
1521 	    {
1522 	      assert (mask == 7 || mask == 15);
1523 	      set_other_error (mismatch_detail, idx,
1524 			       mask == 15
1525 			       ? _("z0-z15 expected")
1526 			       : _("z0-z7 expected"));
1527 	      return 0;
1528 	    }
1529 	  mask = (1 << (size - shift)) - 1;
1530 	  if (!value_in_range_p (opnd->reglane.index, 0, mask))
1531 	    {
1532 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1533 	      return 0;
1534 	    }
1535 	  break;
1536 
1537 	case AARCH64_OPND_SVE_Zn_INDEX:
1538 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1539 	  if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1540 	    {
1541 	      set_elem_idx_out_of_range_error (mismatch_detail, idx,
1542 					       0, 64 / size - 1);
1543 	      return 0;
1544 	    }
1545 	  break;
1546 
1547 	case AARCH64_OPND_SVE_ZnxN:
1548 	case AARCH64_OPND_SVE_ZtxN:
1549 	  if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1550 	    {
1551 	      set_other_error (mismatch_detail, idx,
1552 			       _("invalid register list"));
1553 	      return 0;
1554 	    }
1555 	  break;
1556 
1557 	default:
1558 	  break;
1559 	}
1560       break;
1561 
1562     case AARCH64_OPND_CLASS_PRED_REG:
1563       if (opnd->reg.regno >= 8
1564 	  && get_operand_fields_width (get_operand_from_code (type)) == 3)
1565 	{
1566 	  set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1567 	  return 0;
1568 	}
1569       break;
1570 
1571     case AARCH64_OPND_CLASS_COND:
1572       if (type == AARCH64_OPND_COND1
1573 	  && (opnds[idx].cond->value & 0xe) == 0xe)
1574 	{
1575 	  /* Not allow AL or NV.  */
1576 	  set_syntax_error (mismatch_detail, idx, NULL);
1577 	}
1578       break;
1579 
1580     case AARCH64_OPND_CLASS_ADDRESS:
1581       /* Check writeback.  */
1582       switch (opcode->iclass)
1583 	{
1584 	case ldst_pos:
1585 	case ldst_unscaled:
1586 	case ldstnapair_offs:
1587 	case ldstpair_off:
1588 	case ldst_unpriv:
1589 	  if (opnd->addr.writeback == 1)
1590 	    {
1591 	      set_syntax_error (mismatch_detail, idx,
1592 				_("unexpected address writeback"));
1593 	      return 0;
1594 	    }
1595 	  break;
1596 	case ldst_imm10:
1597 	  if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1598 	    {
1599 	      set_syntax_error (mismatch_detail, idx,
1600 				_("unexpected address writeback"));
1601 	      return 0;
1602 	    }
1603 	  break;
1604 	case ldst_imm9:
1605 	case ldstpair_indexed:
1606 	case asisdlsep:
1607 	case asisdlsop:
1608 	  if (opnd->addr.writeback == 0)
1609 	    {
1610 	      set_syntax_error (mismatch_detail, idx,
1611 				_("address writeback expected"));
1612 	      return 0;
1613 	    }
1614 	  break;
1615 	default:
1616 	  assert (opnd->addr.writeback == 0);
1617 	  break;
1618 	}
1619       switch (type)
1620 	{
1621 	case AARCH64_OPND_ADDR_SIMM7:
1622 	  /* Scaled signed 7 bits immediate offset.  */
1623 	  /* Get the size of the data element that is accessed, which may be
1624 	     different from that of the source register size,
1625 	     e.g. in strb/ldrb.  */
1626 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1627 	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1628 	    {
1629 	      set_offset_out_of_range_error (mismatch_detail, idx,
1630 					     -64 * size, 63 * size);
1631 	      return 0;
1632 	    }
1633 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1634 	    {
1635 	      set_unaligned_error (mismatch_detail, idx, size);
1636 	      return 0;
1637 	    }
1638 	  break;
1639 	case AARCH64_OPND_ADDR_OFFSET:
1640 	case AARCH64_OPND_ADDR_SIMM9:
1641 	  /* Unscaled signed 9 bits immediate offset.  */
1642 	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1643 	    {
1644 	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1645 	      return 0;
1646 	    }
1647 	  break;
1648 
1649 	case AARCH64_OPND_ADDR_SIMM9_2:
1650 	  /* Unscaled signed 9 bits immediate offset, which has to be negative
1651 	     or unaligned.  */
1652 	  size = aarch64_get_qualifier_esize (qualifier);
1653 	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1654 	       && !value_aligned_p (opnd->addr.offset.imm, size))
1655 	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1656 	    return 1;
1657 	  set_other_error (mismatch_detail, idx,
1658 			   _("negative or unaligned offset expected"));
1659 	  return 0;
1660 
1661 	case AARCH64_OPND_ADDR_SIMM10:
1662 	  /* Scaled signed 10 bits immediate offset.  */
1663 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1664 	    {
1665 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1666 	      return 0;
1667 	    }
1668 	  if (!value_aligned_p (opnd->addr.offset.imm, 8))
1669 	    {
1670 	      set_unaligned_error (mismatch_detail, idx, 8);
1671 	      return 0;
1672 	    }
1673 	  break;
1674 
1675 	case AARCH64_OPND_ADDR_SIMM11:
1676 	  /* Signed 11 bits immediate offset (multiple of 16).  */
1677 	  if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1678 	    {
1679 	      set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1680 	      return 0;
1681 	    }
1682 
1683 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
1684 	    {
1685 	      set_unaligned_error (mismatch_detail, idx, 16);
1686 	      return 0;
1687 	    }
1688 	  break;
1689 
1690 	case AARCH64_OPND_ADDR_SIMM13:
1691 	  /* Signed 13 bits immediate offset (multiple of 16).  */
1692 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1693 	    {
1694 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1695 	      return 0;
1696 	    }
1697 
1698 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
1699 	    {
1700 	      set_unaligned_error (mismatch_detail, idx, 16);
1701 	      return 0;
1702 	    }
1703 	  break;
1704 
1705 	case AARCH64_OPND_SIMD_ADDR_POST:
1706 	  /* AdvSIMD load/store multiple structures, post-index.  */
1707 	  assert (idx == 1);
1708 	  if (opnd->addr.offset.is_reg)
1709 	    {
1710 	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1711 		return 1;
1712 	      else
1713 		{
1714 		  set_other_error (mismatch_detail, idx,
1715 				   _("invalid register offset"));
1716 		  return 0;
1717 		}
1718 	    }
1719 	  else
1720 	    {
1721 	      const aarch64_opnd_info *prev = &opnds[idx-1];
1722 	      unsigned num_bytes; /* total number of bytes transferred.  */
1723 	      /* The opcode dependent area stores the number of elements in
1724 		 each structure to be loaded/stored.  */
1725 	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1726 	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1727 		/* Special handling of loading single structure to all lane.  */
1728 		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1729 		  * aarch64_get_qualifier_esize (prev->qualifier);
1730 	      else
1731 		num_bytes = prev->reglist.num_regs
1732 		  * aarch64_get_qualifier_esize (prev->qualifier)
1733 		  * aarch64_get_qualifier_nelem (prev->qualifier);
1734 	      if ((int) num_bytes != opnd->addr.offset.imm)
1735 		{
1736 		  set_other_error (mismatch_detail, idx,
1737 				   _("invalid post-increment amount"));
1738 		  return 0;
1739 		}
1740 	    }
1741 	  break;
1742 
1743 	case AARCH64_OPND_ADDR_REGOFF:
1744 	  /* Get the size of the data element that is accessed, which may be
1745 	     different from that of the source register size,
1746 	     e.g. in strb/ldrb.  */
1747 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1748 	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
1749 	  if (opnd->shifter.amount != 0
1750 	      && opnd->shifter.amount != (int)get_logsz (size))
1751 	    {
1752 	      set_other_error (mismatch_detail, idx,
1753 			       _("invalid shift amount"));
1754 	      return 0;
1755 	    }
1756 	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1757 	     operators.  */
1758 	  switch (opnd->shifter.kind)
1759 	    {
1760 	    case AARCH64_MOD_UXTW:
1761 	    case AARCH64_MOD_LSL:
1762 	    case AARCH64_MOD_SXTW:
1763 	    case AARCH64_MOD_SXTX: break;
1764 	    default:
1765 	      set_other_error (mismatch_detail, idx,
1766 			       _("invalid extend/shift operator"));
1767 	      return 0;
1768 	    }
1769 	  break;
1770 
1771 	case AARCH64_OPND_ADDR_UIMM12:
1772 	  imm = opnd->addr.offset.imm;
1773 	  /* Get the size of the data element that is accessed, which may be
1774 	     different from that of the source register size,
1775 	     e.g. in strb/ldrb.  */
1776 	  size = aarch64_get_qualifier_esize (qualifier);
1777 	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1778 	    {
1779 	      set_offset_out_of_range_error (mismatch_detail, idx,
1780 					     0, 4095 * size);
1781 	      return 0;
1782 	    }
1783 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1784 	    {
1785 	      set_unaligned_error (mismatch_detail, idx, size);
1786 	      return 0;
1787 	    }
1788 	  break;
1789 
1790 	case AARCH64_OPND_ADDR_PCREL14:
1791 	case AARCH64_OPND_ADDR_PCREL19:
1792 	case AARCH64_OPND_ADDR_PCREL21:
1793 	case AARCH64_OPND_ADDR_PCREL26:
1794 	  imm = opnd->imm.value;
1795 	  if (operand_need_shift_by_two (get_operand_from_code (type)))
1796 	    {
1797 	      /* The offset value in a PC-relative branch instruction is alway
1798 		 4-byte aligned and is encoded without the lowest 2 bits.  */
1799 	      if (!value_aligned_p (imm, 4))
1800 		{
1801 		  set_unaligned_error (mismatch_detail, idx, 4);
1802 		  return 0;
1803 		}
1804 	      /* Right shift by 2 so that we can carry out the following check
1805 		 canonically.  */
1806 	      imm >>= 2;
1807 	    }
1808 	  size = get_operand_fields_width (get_operand_from_code (type));
1809 	  if (!value_fit_signed_field_p (imm, size))
1810 	    {
1811 	      set_other_error (mismatch_detail, idx,
1812 			       _("immediate out of range"));
1813 	      return 0;
1814 	    }
1815 	  break;
1816 
1817 	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1818 	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1819 	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1820 	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1821 	  min_value = -8;
1822 	  max_value = 7;
1823 	sve_imm_offset_vl:
1824 	  assert (!opnd->addr.offset.is_reg);
1825 	  assert (opnd->addr.preind);
1826 	  num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1827 	  min_value *= num;
1828 	  max_value *= num;
1829 	  if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1830 	      || (opnd->shifter.operator_present
1831 		  && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1832 	    {
1833 	      set_other_error (mismatch_detail, idx,
1834 			       _("invalid addressing mode"));
1835 	      return 0;
1836 	    }
1837 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1838 	    {
1839 	      set_offset_out_of_range_error (mismatch_detail, idx,
1840 					     min_value, max_value);
1841 	      return 0;
1842 	    }
1843 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1844 	    {
1845 	      set_unaligned_error (mismatch_detail, idx, num);
1846 	      return 0;
1847 	    }
1848 	  break;
1849 
1850 	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1851 	  min_value = -32;
1852 	  max_value = 31;
1853 	  goto sve_imm_offset_vl;
1854 
1855 	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1856 	  min_value = -256;
1857 	  max_value = 255;
1858 	  goto sve_imm_offset_vl;
1859 
1860 	case AARCH64_OPND_SVE_ADDR_RI_U6:
1861 	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1862 	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1863 	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1864 	  min_value = 0;
1865 	  max_value = 63;
1866 	sve_imm_offset:
1867 	  assert (!opnd->addr.offset.is_reg);
1868 	  assert (opnd->addr.preind);
1869 	  num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1870 	  min_value *= num;
1871 	  max_value *= num;
1872 	  if (opnd->shifter.operator_present
1873 	      || opnd->shifter.amount_present)
1874 	    {
1875 	      set_other_error (mismatch_detail, idx,
1876 			       _("invalid addressing mode"));
1877 	      return 0;
1878 	    }
1879 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1880 	    {
1881 	      set_offset_out_of_range_error (mismatch_detail, idx,
1882 					     min_value, max_value);
1883 	      return 0;
1884 	    }
1885 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1886 	    {
1887 	      set_unaligned_error (mismatch_detail, idx, num);
1888 	      return 0;
1889 	    }
1890 	  break;
1891 
1892 	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1893 	  min_value = -8;
1894 	  max_value = 7;
1895 	  goto sve_imm_offset;
1896 
1897 	case AARCH64_OPND_SVE_ADDR_R:
1898 	case AARCH64_OPND_SVE_ADDR_RR:
1899 	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1900 	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1901 	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1902 	case AARCH64_OPND_SVE_ADDR_RX:
1903 	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1904 	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1905 	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1906 	case AARCH64_OPND_SVE_ADDR_RZ:
1907 	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1908 	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1909 	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1910 	  modifiers = 1 << AARCH64_MOD_LSL;
1911 	sve_rr_operand:
1912 	  assert (opnd->addr.offset.is_reg);
1913 	  assert (opnd->addr.preind);
1914 	  if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1915 	      && opnd->addr.offset.regno == 31)
1916 	    {
1917 	      set_other_error (mismatch_detail, idx,
1918 			       _("index register xzr is not allowed"));
1919 	      return 0;
1920 	    }
1921 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1922 	      || (opnd->shifter.amount
1923 		  != get_operand_specific_data (&aarch64_operands[type])))
1924 	    {
1925 	      set_other_error (mismatch_detail, idx,
1926 			       _("invalid addressing mode"));
1927 	      return 0;
1928 	    }
1929 	  break;
1930 
1931 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1932 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1933 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1934 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1935 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1936 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1937 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1938 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1939 	  modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1940 	  goto sve_rr_operand;
1941 
1942 	case AARCH64_OPND_SVE_ADDR_ZI_U5:
1943 	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1944 	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1945 	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1946 	  min_value = 0;
1947 	  max_value = 31;
1948 	  goto sve_imm_offset;
1949 
1950 	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1951 	  modifiers = 1 << AARCH64_MOD_LSL;
1952 	sve_zz_operand:
1953 	  assert (opnd->addr.offset.is_reg);
1954 	  assert (opnd->addr.preind);
1955 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1956 	      || opnd->shifter.amount < 0
1957 	      || opnd->shifter.amount > 3)
1958 	    {
1959 	      set_other_error (mismatch_detail, idx,
1960 			       _("invalid addressing mode"));
1961 	      return 0;
1962 	    }
1963 	  break;
1964 
1965 	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1966 	  modifiers = (1 << AARCH64_MOD_SXTW);
1967 	  goto sve_zz_operand;
1968 
1969 	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1970 	  modifiers = 1 << AARCH64_MOD_UXTW;
1971 	  goto sve_zz_operand;
1972 
1973 	default:
1974 	  break;
1975 	}
1976       break;
1977 
1978     case AARCH64_OPND_CLASS_SIMD_REGLIST:
1979       if (type == AARCH64_OPND_LEt)
1980 	{
1981 	  /* Get the upper bound for the element index.  */
1982 	  num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1983 	  if (!value_in_range_p (opnd->reglist.index, 0, num))
1984 	    {
1985 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1986 	      return 0;
1987 	    }
1988 	}
1989       /* The opcode dependent area stores the number of elements in
1990 	 each structure to be loaded/stored.  */
1991       num = get_opcode_dependent_value (opcode);
1992       switch (type)
1993 	{
1994 	case AARCH64_OPND_LVt:
1995 	  assert (num >= 1 && num <= 4);
1996 	  /* Unless LD1/ST1, the number of registers should be equal to that
1997 	     of the structure elements.  */
1998 	  if (num != 1 && opnd->reglist.num_regs != num)
1999 	    {
2000 	      set_reg_list_error (mismatch_detail, idx, num);
2001 	      return 0;
2002 	    }
2003 	  break;
2004 	case AARCH64_OPND_LVt_AL:
2005 	case AARCH64_OPND_LEt:
2006 	  assert (num >= 1 && num <= 4);
2007 	  /* The number of registers should be equal to that of the structure
2008 	     elements.  */
2009 	  if (opnd->reglist.num_regs != num)
2010 	    {
2011 	      set_reg_list_error (mismatch_detail, idx, num);
2012 	      return 0;
2013 	    }
2014 	  break;
2015 	default:
2016 	  break;
2017 	}
2018       break;
2019 
2020     case AARCH64_OPND_CLASS_IMMEDIATE:
2021       /* Constraint check on immediate operand.  */
2022       imm = opnd->imm.value;
2023       /* E.g. imm_0_31 constrains value to be 0..31.  */
2024       if (qualifier_value_in_range_constraint_p (qualifier)
2025 	  && !value_in_range_p (imm, get_lower_bound (qualifier),
2026 				get_upper_bound (qualifier)))
2027 	{
2028 	  set_imm_out_of_range_error (mismatch_detail, idx,
2029 				      get_lower_bound (qualifier),
2030 				      get_upper_bound (qualifier));
2031 	  return 0;
2032 	}
2033 
2034       switch (type)
2035 	{
2036 	case AARCH64_OPND_AIMM:
2037 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
2038 	    {
2039 	      set_other_error (mismatch_detail, idx,
2040 			       _("invalid shift operator"));
2041 	      return 0;
2042 	    }
2043 	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2044 	    {
2045 	      set_other_error (mismatch_detail, idx,
2046 			       _("shift amount must be 0 or 12"));
2047 	      return 0;
2048 	    }
2049 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2050 	    {
2051 	      set_other_error (mismatch_detail, idx,
2052 			       _("immediate out of range"));
2053 	      return 0;
2054 	    }
2055 	  break;
2056 
2057 	case AARCH64_OPND_HALF:
2058 	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2059 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
2060 	    {
2061 	      set_other_error (mismatch_detail, idx,
2062 			       _("invalid shift operator"));
2063 	      return 0;
2064 	    }
2065 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2066 	  if (!value_aligned_p (opnd->shifter.amount, 16))
2067 	    {
2068 	      set_other_error (mismatch_detail, idx,
2069 			       _("shift amount must be a multiple of 16"));
2070 	      return 0;
2071 	    }
2072 	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2073 	    {
2074 	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
2075 						 0, size * 8 - 16);
2076 	      return 0;
2077 	    }
2078 	  if (opnd->imm.value < 0)
2079 	    {
2080 	      set_other_error (mismatch_detail, idx,
2081 			       _("negative immediate value not allowed"));
2082 	      return 0;
2083 	    }
2084 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2085 	    {
2086 	      set_other_error (mismatch_detail, idx,
2087 			       _("immediate out of range"));
2088 	      return 0;
2089 	    }
2090 	  break;
2091 
2092 	case AARCH64_OPND_IMM_MOV:
2093 	    {
2094 	      int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2095 	      imm = opnd->imm.value;
2096 	      assert (idx == 1);
2097 	      switch (opcode->op)
2098 		{
2099 		case OP_MOV_IMM_WIDEN:
2100 		  imm = ~imm;
2101 		  /* Fall through.  */
2102 		case OP_MOV_IMM_WIDE:
2103 		  if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2104 		    {
2105 		      set_other_error (mismatch_detail, idx,
2106 				       _("immediate out of range"));
2107 		      return 0;
2108 		    }
2109 		  break;
2110 		case OP_MOV_IMM_LOG:
2111 		  if (!aarch64_logical_immediate_p (imm, esize, NULL))
2112 		    {
2113 		      set_other_error (mismatch_detail, idx,
2114 				       _("immediate out of range"));
2115 		      return 0;
2116 		    }
2117 		  break;
2118 		default:
2119 		  assert (0);
2120 		  return 0;
2121 		}
2122 	    }
2123 	  break;
2124 
2125 	case AARCH64_OPND_NZCV:
2126 	case AARCH64_OPND_CCMP_IMM:
2127 	case AARCH64_OPND_EXCEPTION:
2128 	case AARCH64_OPND_UIMM4:
2129 	case AARCH64_OPND_UIMM4_ADDG:
2130 	case AARCH64_OPND_UIMM7:
2131 	case AARCH64_OPND_UIMM3_OP1:
2132 	case AARCH64_OPND_UIMM3_OP2:
2133 	case AARCH64_OPND_SVE_UIMM3:
2134 	case AARCH64_OPND_SVE_UIMM7:
2135 	case AARCH64_OPND_SVE_UIMM8:
2136 	case AARCH64_OPND_SVE_UIMM8_53:
2137 	  size = get_operand_fields_width (get_operand_from_code (type));
2138 	  assert (size < 32);
2139 	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2140 	    {
2141 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2142 					  (1 << size) - 1);
2143 	      return 0;
2144 	    }
2145 	  break;
2146 
2147 	case AARCH64_OPND_UIMM10:
2148 	  /* Scaled unsigned 10 bits immediate offset.  */
2149 	  if (!value_in_range_p (opnd->imm.value, 0, 1008))
2150 	    {
2151 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2152 	      return 0;
2153 	    }
2154 
2155 	  if (!value_aligned_p (opnd->imm.value, 16))
2156 	    {
2157 	      set_unaligned_error (mismatch_detail, idx, 16);
2158 	      return 0;
2159 	    }
2160 	  break;
2161 
2162 	case AARCH64_OPND_SIMM5:
2163 	case AARCH64_OPND_SVE_SIMM5:
2164 	case AARCH64_OPND_SVE_SIMM5B:
2165 	case AARCH64_OPND_SVE_SIMM6:
2166 	case AARCH64_OPND_SVE_SIMM8:
2167 	  size = get_operand_fields_width (get_operand_from_code (type));
2168 	  assert (size < 32);
2169 	  if (!value_fit_signed_field_p (opnd->imm.value, size))
2170 	    {
2171 	      set_imm_out_of_range_error (mismatch_detail, idx,
2172 					  -(1 << (size - 1)),
2173 					  (1 << (size - 1)) - 1);
2174 	      return 0;
2175 	    }
2176 	  break;
2177 
2178 	case AARCH64_OPND_WIDTH:
2179 	  assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2180 		  && opnds[0].type == AARCH64_OPND_Rd);
2181 	  size = get_upper_bound (qualifier);
2182 	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
2183 	    /* lsb+width <= reg.size  */
2184 	    {
2185 	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
2186 					  size - opnds[idx-1].imm.value);
2187 	      return 0;
2188 	    }
2189 	  break;
2190 
2191 	case AARCH64_OPND_LIMM:
2192 	case AARCH64_OPND_SVE_LIMM:
2193 	  {
2194 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2195 	    uint64_t uimm = opnd->imm.value;
2196 	    if (opcode->op == OP_BIC)
2197 	      uimm = ~uimm;
2198 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2199 	      {
2200 		set_other_error (mismatch_detail, idx,
2201 				 _("immediate out of range"));
2202 		return 0;
2203 	      }
2204 	  }
2205 	  break;
2206 
2207 	case AARCH64_OPND_IMM0:
2208 	case AARCH64_OPND_FPIMM0:
2209 	  if (opnd->imm.value != 0)
2210 	    {
2211 	      set_other_error (mismatch_detail, idx,
2212 			       _("immediate zero expected"));
2213 	      return 0;
2214 	    }
2215 	  break;
2216 
2217 	case AARCH64_OPND_IMM_ROT1:
2218 	case AARCH64_OPND_IMM_ROT2:
2219 	case AARCH64_OPND_SVE_IMM_ROT2:
2220 	  if (opnd->imm.value != 0
2221 	      && opnd->imm.value != 90
2222 	      && opnd->imm.value != 180
2223 	      && opnd->imm.value != 270)
2224 	    {
2225 	      set_other_error (mismatch_detail, idx,
2226 			       _("rotate expected to be 0, 90, 180 or 270"));
2227 	      return 0;
2228 	    }
2229 	  break;
2230 
2231 	case AARCH64_OPND_IMM_ROT3:
2232 	case AARCH64_OPND_SVE_IMM_ROT1:
2233 	  if (opnd->imm.value != 90 && opnd->imm.value != 270)
2234 	    {
2235 	      set_other_error (mismatch_detail, idx,
2236 			       _("rotate expected to be 90 or 270"));
2237 	      return 0;
2238 	    }
2239 	  break;
2240 
2241 	case AARCH64_OPND_SHLL_IMM:
2242 	  assert (idx == 2);
2243 	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2244 	  if (opnd->imm.value != size)
2245 	    {
2246 	      set_other_error (mismatch_detail, idx,
2247 			       _("invalid shift amount"));
2248 	      return 0;
2249 	    }
2250 	  break;
2251 
2252 	case AARCH64_OPND_IMM_VLSL:
2253 	  size = aarch64_get_qualifier_esize (qualifier);
2254 	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2255 	    {
2256 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2257 					  size * 8 - 1);
2258 	      return 0;
2259 	    }
2260 	  break;
2261 
2262 	case AARCH64_OPND_IMM_VLSR:
2263 	  size = aarch64_get_qualifier_esize (qualifier);
2264 	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2265 	    {
2266 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2267 	      return 0;
2268 	    }
2269 	  break;
2270 
2271 	case AARCH64_OPND_SIMD_IMM:
2272 	case AARCH64_OPND_SIMD_IMM_SFT:
2273 	  /* Qualifier check.  */
2274 	  switch (qualifier)
2275 	    {
2276 	    case AARCH64_OPND_QLF_LSL:
2277 	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
2278 		{
2279 		  set_other_error (mismatch_detail, idx,
2280 				   _("invalid shift operator"));
2281 		  return 0;
2282 		}
2283 	      break;
2284 	    case AARCH64_OPND_QLF_MSL:
2285 	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
2286 		{
2287 		  set_other_error (mismatch_detail, idx,
2288 				   _("invalid shift operator"));
2289 		  return 0;
2290 		}
2291 	      break;
2292 	    case AARCH64_OPND_QLF_NIL:
2293 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2294 		{
2295 		  set_other_error (mismatch_detail, idx,
2296 				   _("shift is not permitted"));
2297 		  return 0;
2298 		}
2299 	      break;
2300 	    default:
2301 	      assert (0);
2302 	      return 0;
2303 	    }
2304 	  /* Is the immediate valid?  */
2305 	  assert (idx == 1);
2306 	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2307 	    {
2308 	      /* uimm8 or simm8 */
2309 	      if (!value_in_range_p (opnd->imm.value, -128, 255))
2310 		{
2311 		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2312 		  return 0;
2313 		}
2314 	    }
2315 	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2316 	    {
2317 	      /* uimm64 is not
2318 		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2319 		 ffffffffgggggggghhhhhhhh'.  */
2320 	      set_other_error (mismatch_detail, idx,
2321 			       _("invalid value for immediate"));
2322 	      return 0;
2323 	    }
2324 	  /* Is the shift amount valid?  */
2325 	  switch (opnd->shifter.kind)
2326 	    {
2327 	    case AARCH64_MOD_LSL:
2328 	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2329 	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2330 		{
2331 		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2332 						     (size - 1) * 8);
2333 		  return 0;
2334 		}
2335 	      if (!value_aligned_p (opnd->shifter.amount, 8))
2336 		{
2337 		  set_unaligned_error (mismatch_detail, idx, 8);
2338 		  return 0;
2339 		}
2340 	      break;
2341 	    case AARCH64_MOD_MSL:
2342 	      /* Only 8 and 16 are valid shift amount.  */
2343 	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2344 		{
2345 		  set_other_error (mismatch_detail, idx,
2346 				   _("shift amount must be 0 or 16"));
2347 		  return 0;
2348 		}
2349 	      break;
2350 	    default:
2351 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2352 		{
2353 		  set_other_error (mismatch_detail, idx,
2354 				   _("invalid shift operator"));
2355 		  return 0;
2356 		}
2357 	      break;
2358 	    }
2359 	  break;
2360 
2361 	case AARCH64_OPND_FPIMM:
2362 	case AARCH64_OPND_SIMD_FPIMM:
2363 	case AARCH64_OPND_SVE_FPIMM8:
2364 	  if (opnd->imm.is_fp == 0)
2365 	    {
2366 	      set_other_error (mismatch_detail, idx,
2367 			       _("floating-point immediate expected"));
2368 	      return 0;
2369 	    }
2370 	  /* The value is expected to be an 8-bit floating-point constant with
2371 	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
2372 	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2373 	     instruction).  */
2374 	  if (!value_in_range_p (opnd->imm.value, 0, 255))
2375 	    {
2376 	      set_other_error (mismatch_detail, idx,
2377 			       _("immediate out of range"));
2378 	      return 0;
2379 	    }
2380 	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
2381 	    {
2382 	      set_other_error (mismatch_detail, idx,
2383 			       _("invalid shift operator"));
2384 	      return 0;
2385 	    }
2386 	  break;
2387 
2388 	case AARCH64_OPND_SVE_AIMM:
2389 	  min_value = 0;
2390 	sve_aimm:
2391 	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2392 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2393 	  mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2394 	  uvalue = opnd->imm.value;
2395 	  shift = opnd->shifter.amount;
2396 	  if (size == 1)
2397 	    {
2398 	      if (shift != 0)
2399 		{
2400 		  set_other_error (mismatch_detail, idx,
2401 				   _("no shift amount allowed for"
2402 				     " 8-bit constants"));
2403 		  return 0;
2404 		}
2405 	    }
2406 	  else
2407 	    {
2408 	      if (shift != 0 && shift != 8)
2409 		{
2410 		  set_other_error (mismatch_detail, idx,
2411 				   _("shift amount must be 0 or 8"));
2412 		  return 0;
2413 		}
2414 	      if (shift == 0 && (uvalue & 0xff) == 0)
2415 		{
2416 		  shift = 8;
2417 		  uvalue = (int64_t) uvalue / 256;
2418 		}
2419 	    }
2420 	  mask >>= shift;
2421 	  if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2422 	    {
2423 	      set_other_error (mismatch_detail, idx,
2424 			       _("immediate too big for element size"));
2425 	      return 0;
2426 	    }
2427 	  uvalue = (uvalue - min_value) & mask;
2428 	  if (uvalue > 0xff)
2429 	    {
2430 	      set_other_error (mismatch_detail, idx,
2431 			       _("invalid arithmetic immediate"));
2432 	      return 0;
2433 	    }
2434 	  break;
2435 
2436 	case AARCH64_OPND_SVE_ASIMM:
2437 	  min_value = -128;
2438 	  goto sve_aimm;
2439 
2440 	case AARCH64_OPND_SVE_I1_HALF_ONE:
2441 	  assert (opnd->imm.is_fp);
2442 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2443 	    {
2444 	      set_other_error (mismatch_detail, idx,
2445 			       _("floating-point value must be 0.5 or 1.0"));
2446 	      return 0;
2447 	    }
2448 	  break;
2449 
2450 	case AARCH64_OPND_SVE_I1_HALF_TWO:
2451 	  assert (opnd->imm.is_fp);
2452 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2453 	    {
2454 	      set_other_error (mismatch_detail, idx,
2455 			       _("floating-point value must be 0.5 or 2.0"));
2456 	      return 0;
2457 	    }
2458 	  break;
2459 
2460 	case AARCH64_OPND_SVE_I1_ZERO_ONE:
2461 	  assert (opnd->imm.is_fp);
2462 	  if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2463 	    {
2464 	      set_other_error (mismatch_detail, idx,
2465 			       _("floating-point value must be 0.0 or 1.0"));
2466 	      return 0;
2467 	    }
2468 	  break;
2469 
2470 	case AARCH64_OPND_SVE_INV_LIMM:
2471 	  {
2472 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2473 	    uint64_t uimm = ~opnd->imm.value;
2474 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2475 	      {
2476 		set_other_error (mismatch_detail, idx,
2477 				 _("immediate out of range"));
2478 		return 0;
2479 	      }
2480 	  }
2481 	  break;
2482 
2483 	case AARCH64_OPND_SVE_LIMM_MOV:
2484 	  {
2485 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2486 	    uint64_t uimm = opnd->imm.value;
2487 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2488 	      {
2489 		set_other_error (mismatch_detail, idx,
2490 				 _("immediate out of range"));
2491 		return 0;
2492 	      }
2493 	    if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2494 	      {
2495 		set_other_error (mismatch_detail, idx,
2496 				 _("invalid replicated MOV immediate"));
2497 		return 0;
2498 	      }
2499 	  }
2500 	  break;
2501 
2502 	case AARCH64_OPND_SVE_PATTERN_SCALED:
2503 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2504 	  if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2505 	    {
2506 	      set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2507 	      return 0;
2508 	    }
2509 	  break;
2510 
2511 	case AARCH64_OPND_SVE_SHLIMM_PRED:
2512 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2513 	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2514 	  if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2515 	    {
2516 	      set_imm_out_of_range_error (mismatch_detail, idx,
2517 					  0, 8 * size - 1);
2518 	      return 0;
2519 	    }
2520 	  break;
2521 
2522 	case AARCH64_OPND_SVE_SHRIMM_PRED:
2523 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2524 	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2525 	  if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2526 	    {
2527 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2528 	      return 0;
2529 	    }
2530 	  break;
2531 
2532 	default:
2533 	  break;
2534 	}
2535       break;
2536 
2537     case AARCH64_OPND_CLASS_SYSTEM:
2538       switch (type)
2539 	{
2540 	case AARCH64_OPND_PSTATEFIELD:
2541 	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2542 	  /* MSR UAO, #uimm4
2543 	     MSR PAN, #uimm4
2544 	     MSR SSBS,#uimm4
2545 	     The immediate must be #0 or #1.  */
2546 	  if ((opnd->pstatefield == 0x03	/* UAO.  */
2547 	       || opnd->pstatefield == 0x04	/* PAN.  */
2548 	       || opnd->pstatefield == 0x19     /* SSBS.  */
2549 	       || opnd->pstatefield == 0x1a)	/* DIT.  */
2550 	      && opnds[1].imm.value > 1)
2551 	    {
2552 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2553 	      return 0;
2554 	    }
2555 	  /* MSR SPSel, #uimm4
2556 	     Uses uimm4 as a control value to select the stack pointer: if
2557 	     bit 0 is set it selects the current exception level's stack
2558 	     pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2559 	     Bits 1 to 3 of uimm4 are reserved and should be zero.  */
2560 	  if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2561 	    {
2562 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2563 	      return 0;
2564 	    }
2565 	  break;
2566 	default:
2567 	  break;
2568 	}
2569       break;
2570 
2571     case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2572       /* Get the upper bound for the element index.  */
2573       if (opcode->op == OP_FCMLA_ELEM)
2574 	/* FCMLA index range depends on the vector size of other operands
2575 	   and is halfed because complex numbers take two elements.  */
2576 	num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2577 	      * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2578       else
2579 	num = 16;
2580       num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2581       assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2582 
2583       /* Index out-of-range.  */
2584       if (!value_in_range_p (opnd->reglane.index, 0, num))
2585 	{
2586 	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2587 	  return 0;
2588 	}
2589       /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2590 	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
2591 	 number is encoded in "size:M:Rm":
2592 	 size	<Vm>
2593 	 00		RESERVED
2594 	 01		0:Rm
2595 	 10		M:Rm
2596 	 11		RESERVED  */
2597       if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2598 	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
2599 	{
2600 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2601 	  return 0;
2602 	}
2603       break;
2604 
2605     case AARCH64_OPND_CLASS_MODIFIED_REG:
2606       assert (idx == 1 || idx == 2);
2607       switch (type)
2608 	{
2609 	case AARCH64_OPND_Rm_EXT:
2610 	  if (!aarch64_extend_operator_p (opnd->shifter.kind)
2611 	      && opnd->shifter.kind != AARCH64_MOD_LSL)
2612 	    {
2613 	      set_other_error (mismatch_detail, idx,
2614 			       _("extend operator expected"));
2615 	      return 0;
2616 	    }
2617 	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2618 	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
2619 	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
2620 	     case.  */
2621 	  if (!aarch64_stack_pointer_p (opnds + 0)
2622 	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2623 	    {
2624 	      if (!opnd->shifter.operator_present)
2625 		{
2626 		  set_other_error (mismatch_detail, idx,
2627 				   _("missing extend operator"));
2628 		  return 0;
2629 		}
2630 	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2631 		{
2632 		  set_other_error (mismatch_detail, idx,
2633 				   _("'LSL' operator not allowed"));
2634 		  return 0;
2635 		}
2636 	    }
2637 	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
2638 		  || opnd->shifter.kind == AARCH64_MOD_LSL);
2639 	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2640 	    {
2641 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2642 	      return 0;
2643 	    }
2644 	  /* In the 64-bit form, the final register operand is written as Wm
2645 	     for all but the (possibly omitted) UXTX/LSL and SXTX
2646 	     operators.
2647 	     N.B. GAS allows X register to be used with any operator as a
2648 	     programming convenience.  */
2649 	  if (qualifier == AARCH64_OPND_QLF_X
2650 	      && opnd->shifter.kind != AARCH64_MOD_LSL
2651 	      && opnd->shifter.kind != AARCH64_MOD_UXTX
2652 	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
2653 	    {
2654 	      set_other_error (mismatch_detail, idx, _("W register expected"));
2655 	      return 0;
2656 	    }
2657 	  break;
2658 
2659 	case AARCH64_OPND_Rm_SFT:
2660 	  /* ROR is not available to the shifted register operand in
2661 	     arithmetic instructions.  */
2662 	  if (!aarch64_shift_operator_p (opnd->shifter.kind))
2663 	    {
2664 	      set_other_error (mismatch_detail, idx,
2665 			       _("shift operator expected"));
2666 	      return 0;
2667 	    }
2668 	  if (opnd->shifter.kind == AARCH64_MOD_ROR
2669 	      && opcode->iclass != log_shift)
2670 	    {
2671 	      set_other_error (mismatch_detail, idx,
2672 			       _("'ROR' operator not allowed"));
2673 	      return 0;
2674 	    }
2675 	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2676 	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
2677 	    {
2678 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2679 	      return 0;
2680 	    }
2681 	  break;
2682 
2683 	default:
2684 	  break;
2685 	}
2686       break;
2687 
2688     default:
2689       break;
2690     }
2691 
2692   return 1;
2693 }
2694 
2695 /* Main entrypoint for the operand constraint checking.
2696 
2697    Return 1 if operands of *INST meet the constraint applied by the operand
2698    codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2699    not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
2700    adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2701    with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2702    error kind when it is notified that an instruction does not pass the check).
2703 
2704    Un-determined operand qualifiers may get established during the process.  */
2705 
2706 int
2707 aarch64_match_operands_constraint (aarch64_inst *inst,
2708 				   aarch64_operand_error *mismatch_detail)
2709 {
2710   int i;
2711 
2712   DEBUG_TRACE ("enter");
2713 
2714   /* Check for cases where a source register needs to be the same as the
2715      destination register.  Do this before matching qualifiers since if
2716      an instruction has both invalid tying and invalid qualifiers,
2717      the error about qualifiers would suggest several alternative
2718      instructions that also have invalid tying.  */
2719   i = inst->opcode->tied_operand;
2720   if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2721     {
2722       if (mismatch_detail)
2723 	{
2724 	  mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2725 	  mismatch_detail->index = i;
2726 	  mismatch_detail->error = NULL;
2727 	}
2728       return 0;
2729     }
2730 
2731   /* Match operands' qualifier.
2732      *INST has already had qualifier establish for some, if not all, of
2733      its operands; we need to find out whether these established
2734      qualifiers match one of the qualifier sequence in
2735      INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
2736      with the corresponding qualifier in such a sequence.
2737      Only basic operand constraint checking is done here; the more thorough
2738      constraint checking will carried out by operand_general_constraint_met_p,
2739      which has be to called after this in order to get all of the operands'
2740      qualifiers established.  */
2741   if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2742     {
2743       DEBUG_TRACE ("FAIL on operand qualifier matching");
2744       if (mismatch_detail)
2745 	{
2746 	  /* Return an error type to indicate that it is the qualifier
2747 	     matching failure; we don't care about which operand as there
2748 	     are enough information in the opcode table to reproduce it.  */
2749 	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2750 	  mismatch_detail->index = -1;
2751 	  mismatch_detail->error = NULL;
2752 	}
2753       return 0;
2754     }
2755 
2756   /* Match operands' constraint.  */
2757   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2758     {
2759       enum aarch64_opnd type = inst->opcode->operands[i];
2760       if (type == AARCH64_OPND_NIL)
2761 	break;
2762       if (inst->operands[i].skip)
2763 	{
2764 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
2765 	  continue;
2766 	}
2767       if (operand_general_constraint_met_p (inst->operands, i, type,
2768 					    inst->opcode, mismatch_detail) == 0)
2769 	{
2770 	  DEBUG_TRACE ("FAIL on operand %d", i);
2771 	  return 0;
2772 	}
2773     }
2774 
2775   DEBUG_TRACE ("PASS");
2776 
2777   return 1;
2778 }
2779 
2780 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2781    Also updates the TYPE of each INST->OPERANDS with the corresponding
2782    value of OPCODE->OPERANDS.
2783 
2784    Note that some operand qualifiers may need to be manually cleared by
2785    the caller before it further calls the aarch64_opcode_encode; by
2786    doing this, it helps the qualifier matching facilities work
2787    properly.  */
2788 
2789 const aarch64_opcode*
2790 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2791 {
2792   int i;
2793   const aarch64_opcode *old = inst->opcode;
2794 
2795   inst->opcode = opcode;
2796 
2797   /* Update the operand types.  */
2798   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2799     {
2800       inst->operands[i].type = opcode->operands[i];
2801       if (opcode->operands[i] == AARCH64_OPND_NIL)
2802 	break;
2803     }
2804 
2805   DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2806 
2807   return old;
2808 }
2809 
2810 int
2811 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2812 {
2813   int i;
2814   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2815     if (operands[i] == operand)
2816       return i;
2817     else if (operands[i] == AARCH64_OPND_NIL)
2818       break;
2819   return -1;
2820 }
2821 
2822 /* R0...R30, followed by FOR31.  */
2823 #define BANK(R, FOR31) \
2824   { R  (0), R  (1), R  (2), R  (3), R  (4), R  (5), R  (6), R  (7), \
2825     R  (8), R  (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2826     R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2827     R (24), R (25), R (26), R (27), R (28), R (29), R (30),  FOR31 }
2828 /* [0][0]  32-bit integer regs with sp   Wn
2829    [0][1]  64-bit integer regs with sp   Xn  sf=1
2830    [1][0]  32-bit integer regs with #0   Wn
2831    [1][1]  64-bit integer regs with #0   Xn  sf=1 */
2832 static const char *int_reg[2][2][32] = {
2833 #define R32(X) "w" #X
2834 #define R64(X) "x" #X
2835   { BANK (R32, "wsp"), BANK (R64, "sp") },
2836   { BANK (R32, "wzr"), BANK (R64, "xzr") }
2837 #undef R64
2838 #undef R32
2839 };
2840 
2841 /* Names of the SVE vector registers, first with .S suffixes,
2842    then with .D suffixes.  */
2843 
2844 static const char *sve_reg[2][32] = {
2845 #define ZS(X) "z" #X ".s"
2846 #define ZD(X) "z" #X ".d"
2847   BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2848 #undef ZD
2849 #undef ZS
2850 };
2851 #undef BANK
2852 
2853 /* Return the integer register name.
2854    if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
2855 
2856 static inline const char *
2857 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2858 {
2859   const int has_zr = sp_reg_p ? 0 : 1;
2860   const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2861   return int_reg[has_zr][is_64][regno];
2862 }
2863 
2864 /* Like get_int_reg_name, but IS_64 is always 1.  */
2865 
2866 static inline const char *
2867 get_64bit_int_reg_name (int regno, int sp_reg_p)
2868 {
2869   const int has_zr = sp_reg_p ? 0 : 1;
2870   return int_reg[has_zr][1][regno];
2871 }
2872 
2873 /* Get the name of the integer offset register in OPND, using the shift type
2874    to decide whether it's a word or doubleword.  */
2875 
2876 static inline const char *
2877 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2878 {
2879   switch (opnd->shifter.kind)
2880     {
2881     case AARCH64_MOD_UXTW:
2882     case AARCH64_MOD_SXTW:
2883       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2884 
2885     case AARCH64_MOD_LSL:
2886     case AARCH64_MOD_SXTX:
2887       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2888 
2889     default:
2890       abort ();
2891     }
2892 }
2893 
2894 /* Get the name of the SVE vector offset register in OPND, using the operand
2895    qualifier to decide whether the suffix should be .S or .D.  */
2896 
2897 static inline const char *
2898 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2899 {
2900   assert (qualifier == AARCH64_OPND_QLF_S_S
2901 	  || qualifier == AARCH64_OPND_QLF_S_D);
2902   return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2903 }
2904 
2905 /* Types for expanding an encoded 8-bit value to a floating-point value.  */
2906 
2907 typedef union
2908 {
2909   uint64_t i;
2910   double   d;
2911 } double_conv_t;
2912 
2913 typedef union
2914 {
2915   uint32_t i;
2916   float    f;
2917 } single_conv_t;
2918 
2919 typedef union
2920 {
2921   uint32_t i;
2922   float    f;
2923 } half_conv_t;
2924 
2925 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2926    normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2927    (depending on the type of the instruction).  IMM8 will be expanded to a
2928    single-precision floating-point value (SIZE == 4) or a double-precision
2929    floating-point value (SIZE == 8).  A half-precision floating-point value
2930    (SIZE == 2) is expanded to a single-precision floating-point value.  The
2931    expanded value is returned.  */
2932 
2933 static uint64_t
2934 expand_fp_imm (int size, uint32_t imm8)
2935 {
2936   uint64_t imm = 0;
2937   uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2938 
2939   imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
2940   imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
2941   imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
2942   imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2943     | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
2944   if (size == 8)
2945     {
2946       imm = (imm8_7 << (63-32))		/* imm8<7>  */
2947 	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
2948 	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2949 	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2950 	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
2951       imm <<= 32;
2952     }
2953   else if (size == 4 || size == 2)
2954     {
2955       imm = (imm8_7 << 31)	/* imm8<7>              */
2956 	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
2957 	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
2958 	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
2959     }
2960   else
2961     {
2962       /* An unsupported size.  */
2963       assert (0);
2964     }
2965 
2966   return imm;
2967 }
2968 
2969 /* Produce the string representation of the register list operand *OPND
2970    in the buffer pointed by BUF of size SIZE.  PREFIX is the part of
2971    the register name that comes before the register number, such as "v".  */
2972 static void
2973 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2974 		     const char *prefix)
2975 {
2976   const int num_regs = opnd->reglist.num_regs;
2977   const int first_reg = opnd->reglist.first_regno;
2978   const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2979   const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2980   char tb[8];	/* Temporary buffer.  */
2981 
2982   assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2983   assert (num_regs >= 1 && num_regs <= 4);
2984 
2985   /* Prepare the index if any.  */
2986   if (opnd->reglist.has_index)
2987     /* PR 21096: The %100 is to silence a warning about possible truncation.  */
2988     snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2989   else
2990     tb[0] = '\0';
2991 
2992   /* The hyphenated form is preferred for disassembly if there are
2993      more than two registers in the list, and the register numbers
2994      are monotonically increasing in increments of one.  */
2995   if (num_regs > 2 && last_reg > first_reg)
2996     snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2997 	      prefix, last_reg, qlf_name, tb);
2998   else
2999     {
3000       const int reg0 = first_reg;
3001       const int reg1 = (first_reg + 1) & 0x1f;
3002       const int reg2 = (first_reg + 2) & 0x1f;
3003       const int reg3 = (first_reg + 3) & 0x1f;
3004 
3005       switch (num_regs)
3006 	{
3007 	case 1:
3008 	  snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3009 	  break;
3010 	case 2:
3011 	  snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3012 		    prefix, reg1, qlf_name, tb);
3013 	  break;
3014 	case 3:
3015 	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3016 		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3017 		    prefix, reg2, qlf_name, tb);
3018 	  break;
3019 	case 4:
3020 	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3021 		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3022 		    prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3023 	  break;
3024 	}
3025     }
3026 }
3027 
3028 /* Print the register+immediate address in OPND to BUF, which has SIZE
3029    characters.  BASE is the name of the base register.  */
3030 
3031 static void
3032 print_immediate_offset_address (char *buf, size_t size,
3033 				const aarch64_opnd_info *opnd,
3034 				const char *base)
3035 {
3036   if (opnd->addr.writeback)
3037     {
3038       if (opnd->addr.preind)
3039 	snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3040       else
3041 	snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3042     }
3043   else
3044     {
3045       if (opnd->shifter.operator_present)
3046 	{
3047 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3048 	  snprintf (buf, size, "[%s, #%d, mul vl]",
3049 		    base, opnd->addr.offset.imm);
3050 	}
3051       else if (opnd->addr.offset.imm)
3052 	snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3053       else
3054 	snprintf (buf, size, "[%s]", base);
3055     }
3056 }
3057 
3058 /* Produce the string representation of the register offset address operand
3059    *OPND in the buffer pointed by BUF of size SIZE.  BASE and OFFSET are
3060    the names of the base and offset registers.  */
3061 static void
3062 print_register_offset_address (char *buf, size_t size,
3063 			       const aarch64_opnd_info *opnd,
3064 			       const char *base, const char *offset)
3065 {
3066   char tb[16];			/* Temporary buffer.  */
3067   bfd_boolean print_extend_p = TRUE;
3068   bfd_boolean print_amount_p = TRUE;
3069   const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3070 
3071   if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3072 				|| !opnd->shifter.amount_present))
3073     {
3074       /* Not print the shift/extend amount when the amount is zero and
3075          when it is not the special case of 8-bit load/store instruction.  */
3076       print_amount_p = FALSE;
3077       /* Likewise, no need to print the shift operator LSL in such a
3078 	 situation.  */
3079       if (opnd->shifter.kind == AARCH64_MOD_LSL)
3080 	print_extend_p = FALSE;
3081     }
3082 
3083   /* Prepare for the extend/shift.  */
3084   if (print_extend_p)
3085     {
3086       if (print_amount_p)
3087 	snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3088   /* PR 21096: The %100 is to silence a warning about possible truncation.  */
3089 		  (opnd->shifter.amount % 100));
3090       else
3091 	snprintf (tb, sizeof (tb), ", %s", shift_name);
3092     }
3093   else
3094     tb[0] = '\0';
3095 
3096   snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3097 }
3098 
3099 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3100    in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
3101    PC, PCREL_P and ADDRESS are used to pass in and return information about
3102    the PC-relative address calculation, where the PC value is passed in
3103    PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3104    will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3105    calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3106 
3107    The function serves both the disassembler and the assembler diagnostics
3108    issuer, which is the reason why it lives in this file.  */
3109 
3110 void
3111 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3112 		       const aarch64_opcode *opcode,
3113 		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3114 		       bfd_vma *address, char** notes)
3115 {
3116   unsigned int i, num_conds;
3117   const char *name = NULL;
3118   const aarch64_opnd_info *opnd = opnds + idx;
3119   enum aarch64_modifier_kind kind;
3120   uint64_t addr, enum_value;
3121 
3122   buf[0] = '\0';
3123   if (pcrel_p)
3124     *pcrel_p = 0;
3125 
3126   switch (opnd->type)
3127     {
3128     case AARCH64_OPND_Rd:
3129     case AARCH64_OPND_Rn:
3130     case AARCH64_OPND_Rm:
3131     case AARCH64_OPND_Rt:
3132     case AARCH64_OPND_Rt2:
3133     case AARCH64_OPND_Rs:
3134     case AARCH64_OPND_Ra:
3135     case AARCH64_OPND_Rt_SYS:
3136     case AARCH64_OPND_PAIRREG:
3137     case AARCH64_OPND_SVE_Rm:
3138       /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3139 	 the <ic_op>, therefore we use opnd->present to override the
3140 	 generic optional-ness information.  */
3141       if (opnd->type == AARCH64_OPND_Rt_SYS)
3142 	{
3143 	  if (!opnd->present)
3144 	    break;
3145 	}
3146       /* Omit the operand, e.g. RET.  */
3147       else if (optional_operand_p (opcode, idx)
3148 	       && (opnd->reg.regno
3149 		   == get_optional_operand_default_value (opcode)))
3150 	break;
3151       assert (opnd->qualifier == AARCH64_OPND_QLF_W
3152 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3153       snprintf (buf, size, "%s",
3154 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3155       break;
3156 
3157     case AARCH64_OPND_Rd_SP:
3158     case AARCH64_OPND_Rn_SP:
3159     case AARCH64_OPND_SVE_Rn_SP:
3160     case AARCH64_OPND_Rm_SP:
3161       assert (opnd->qualifier == AARCH64_OPND_QLF_W
3162 	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
3163 	      || opnd->qualifier == AARCH64_OPND_QLF_X
3164 	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
3165       snprintf (buf, size, "%s",
3166 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3167       break;
3168 
3169     case AARCH64_OPND_Rm_EXT:
3170       kind = opnd->shifter.kind;
3171       assert (idx == 1 || idx == 2);
3172       if ((aarch64_stack_pointer_p (opnds)
3173 	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3174 	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
3175 	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
3176 	       && kind == AARCH64_MOD_UXTW)
3177 	      || (opnd->qualifier == AARCH64_OPND_QLF_X
3178 		  && kind == AARCH64_MOD_UXTX)))
3179 	{
3180 	  /* 'LSL' is the preferred form in this case.  */
3181 	  kind = AARCH64_MOD_LSL;
3182 	  if (opnd->shifter.amount == 0)
3183 	    {
3184 	      /* Shifter omitted.  */
3185 	      snprintf (buf, size, "%s",
3186 			get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3187 	      break;
3188 	    }
3189 	}
3190       if (opnd->shifter.amount)
3191 	snprintf (buf, size, "%s, %s #%" PRIi64,
3192 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3193 		  aarch64_operand_modifiers[kind].name,
3194 		  opnd->shifter.amount);
3195       else
3196 	snprintf (buf, size, "%s, %s",
3197 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3198 		  aarch64_operand_modifiers[kind].name);
3199       break;
3200 
3201     case AARCH64_OPND_Rm_SFT:
3202       assert (opnd->qualifier == AARCH64_OPND_QLF_W
3203 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3204       if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3205 	snprintf (buf, size, "%s",
3206 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3207       else
3208 	snprintf (buf, size, "%s, %s #%" PRIi64,
3209 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3210 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
3211 		  opnd->shifter.amount);
3212       break;
3213 
3214     case AARCH64_OPND_Fd:
3215     case AARCH64_OPND_Fn:
3216     case AARCH64_OPND_Fm:
3217     case AARCH64_OPND_Fa:
3218     case AARCH64_OPND_Ft:
3219     case AARCH64_OPND_Ft2:
3220     case AARCH64_OPND_Sd:
3221     case AARCH64_OPND_Sn:
3222     case AARCH64_OPND_Sm:
3223     case AARCH64_OPND_SVE_VZn:
3224     case AARCH64_OPND_SVE_Vd:
3225     case AARCH64_OPND_SVE_Vm:
3226     case AARCH64_OPND_SVE_Vn:
3227       snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3228 		opnd->reg.regno);
3229       break;
3230 
3231     case AARCH64_OPND_Va:
3232     case AARCH64_OPND_Vd:
3233     case AARCH64_OPND_Vn:
3234     case AARCH64_OPND_Vm:
3235       snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3236 		aarch64_get_qualifier_name (opnd->qualifier));
3237       break;
3238 
3239     case AARCH64_OPND_Ed:
3240     case AARCH64_OPND_En:
3241     case AARCH64_OPND_Em:
3242     case AARCH64_OPND_Em16:
3243     case AARCH64_OPND_SM3_IMM2:
3244       snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3245 		aarch64_get_qualifier_name (opnd->qualifier),
3246 		opnd->reglane.index);
3247       break;
3248 
3249     case AARCH64_OPND_VdD1:
3250     case AARCH64_OPND_VnD1:
3251       snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3252       break;
3253 
3254     case AARCH64_OPND_LVn:
3255     case AARCH64_OPND_LVt:
3256     case AARCH64_OPND_LVt_AL:
3257     case AARCH64_OPND_LEt:
3258       print_register_list (buf, size, opnd, "v");
3259       break;
3260 
3261     case AARCH64_OPND_SVE_Pd:
3262     case AARCH64_OPND_SVE_Pg3:
3263     case AARCH64_OPND_SVE_Pg4_5:
3264     case AARCH64_OPND_SVE_Pg4_10:
3265     case AARCH64_OPND_SVE_Pg4_16:
3266     case AARCH64_OPND_SVE_Pm:
3267     case AARCH64_OPND_SVE_Pn:
3268     case AARCH64_OPND_SVE_Pt:
3269       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3270 	snprintf (buf, size, "p%d", opnd->reg.regno);
3271       else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3272 	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3273 	snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3274 		  aarch64_get_qualifier_name (opnd->qualifier));
3275       else
3276 	snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3277 		  aarch64_get_qualifier_name (opnd->qualifier));
3278       break;
3279 
3280     case AARCH64_OPND_SVE_Za_5:
3281     case AARCH64_OPND_SVE_Za_16:
3282     case AARCH64_OPND_SVE_Zd:
3283     case AARCH64_OPND_SVE_Zm_5:
3284     case AARCH64_OPND_SVE_Zm_16:
3285     case AARCH64_OPND_SVE_Zn:
3286     case AARCH64_OPND_SVE_Zt:
3287       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3288 	snprintf (buf, size, "z%d", opnd->reg.regno);
3289       else
3290 	snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3291 		  aarch64_get_qualifier_name (opnd->qualifier));
3292       break;
3293 
3294     case AARCH64_OPND_SVE_ZnxN:
3295     case AARCH64_OPND_SVE_ZtxN:
3296       print_register_list (buf, size, opnd, "z");
3297       break;
3298 
3299     case AARCH64_OPND_SVE_Zm3_INDEX:
3300     case AARCH64_OPND_SVE_Zm3_22_INDEX:
3301     case AARCH64_OPND_SVE_Zm4_INDEX:
3302     case AARCH64_OPND_SVE_Zn_INDEX:
3303       snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3304 		aarch64_get_qualifier_name (opnd->qualifier),
3305 		opnd->reglane.index);
3306       break;
3307 
3308     case AARCH64_OPND_CRn:
3309     case AARCH64_OPND_CRm:
3310       snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3311       break;
3312 
3313     case AARCH64_OPND_IDX:
3314     case AARCH64_OPND_MASK:
3315     case AARCH64_OPND_IMM:
3316     case AARCH64_OPND_IMM_2:
3317     case AARCH64_OPND_WIDTH:
3318     case AARCH64_OPND_UIMM3_OP1:
3319     case AARCH64_OPND_UIMM3_OP2:
3320     case AARCH64_OPND_BIT_NUM:
3321     case AARCH64_OPND_IMM_VLSL:
3322     case AARCH64_OPND_IMM_VLSR:
3323     case AARCH64_OPND_SHLL_IMM:
3324     case AARCH64_OPND_IMM0:
3325     case AARCH64_OPND_IMMR:
3326     case AARCH64_OPND_IMMS:
3327     case AARCH64_OPND_FBITS:
3328     case AARCH64_OPND_SIMM5:
3329     case AARCH64_OPND_SVE_SHLIMM_PRED:
3330     case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3331     case AARCH64_OPND_SVE_SHRIMM_PRED:
3332     case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3333     case AARCH64_OPND_SVE_SIMM5:
3334     case AARCH64_OPND_SVE_SIMM5B:
3335     case AARCH64_OPND_SVE_SIMM6:
3336     case AARCH64_OPND_SVE_SIMM8:
3337     case AARCH64_OPND_SVE_UIMM3:
3338     case AARCH64_OPND_SVE_UIMM7:
3339     case AARCH64_OPND_SVE_UIMM8:
3340     case AARCH64_OPND_SVE_UIMM8_53:
3341     case AARCH64_OPND_IMM_ROT1:
3342     case AARCH64_OPND_IMM_ROT2:
3343     case AARCH64_OPND_IMM_ROT3:
3344     case AARCH64_OPND_SVE_IMM_ROT1:
3345     case AARCH64_OPND_SVE_IMM_ROT2:
3346       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3347       break;
3348 
3349     case AARCH64_OPND_SVE_I1_HALF_ONE:
3350     case AARCH64_OPND_SVE_I1_HALF_TWO:
3351     case AARCH64_OPND_SVE_I1_ZERO_ONE:
3352       {
3353 	single_conv_t c;
3354 	c.i = opnd->imm.value;
3355 	snprintf (buf, size, "#%.1f", c.f);
3356 	break;
3357       }
3358 
3359     case AARCH64_OPND_SVE_PATTERN:
3360       if (optional_operand_p (opcode, idx)
3361 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3362 	break;
3363       enum_value = opnd->imm.value;
3364       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3365       if (aarch64_sve_pattern_array[enum_value])
3366 	snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3367       else
3368 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3369       break;
3370 
3371     case AARCH64_OPND_SVE_PATTERN_SCALED:
3372       if (optional_operand_p (opcode, idx)
3373 	  && !opnd->shifter.operator_present
3374 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3375 	break;
3376       enum_value = opnd->imm.value;
3377       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3378       if (aarch64_sve_pattern_array[opnd->imm.value])
3379 	snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3380       else
3381 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3382       if (opnd->shifter.operator_present)
3383 	{
3384 	  size_t len = strlen (buf);
3385 	  snprintf (buf + len, size - len, ", %s #%" PRIi64,
3386 		    aarch64_operand_modifiers[opnd->shifter.kind].name,
3387 		    opnd->shifter.amount);
3388 	}
3389       break;
3390 
3391     case AARCH64_OPND_SVE_PRFOP:
3392       enum_value = opnd->imm.value;
3393       assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3394       if (aarch64_sve_prfop_array[enum_value])
3395 	snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3396       else
3397 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3398       break;
3399 
3400     case AARCH64_OPND_IMM_MOV:
3401       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3402 	{
3403 	case 4:	/* e.g. MOV Wd, #<imm32>.  */
3404 	    {
3405 	      int imm32 = opnd->imm.value;
3406 	      snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3407 	    }
3408 	  break;
3409 	case 8:	/* e.g. MOV Xd, #<imm64>.  */
3410 	  snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3411 		    opnd->imm.value, opnd->imm.value);
3412 	  break;
3413 	default: assert (0);
3414 	}
3415       break;
3416 
3417     case AARCH64_OPND_FPIMM0:
3418       snprintf (buf, size, "#0.0");
3419       break;
3420 
3421     case AARCH64_OPND_LIMM:
3422     case AARCH64_OPND_AIMM:
3423     case AARCH64_OPND_HALF:
3424     case AARCH64_OPND_SVE_INV_LIMM:
3425     case AARCH64_OPND_SVE_LIMM:
3426     case AARCH64_OPND_SVE_LIMM_MOV:
3427       if (opnd->shifter.amount)
3428 	snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3429 		  opnd->shifter.amount);
3430       else
3431 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3432       break;
3433 
3434     case AARCH64_OPND_SIMD_IMM:
3435     case AARCH64_OPND_SIMD_IMM_SFT:
3436       if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3437 	  || opnd->shifter.kind == AARCH64_MOD_NONE)
3438 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3439       else
3440 	snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3441 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
3442 		  opnd->shifter.amount);
3443       break;
3444 
3445     case AARCH64_OPND_SVE_AIMM:
3446     case AARCH64_OPND_SVE_ASIMM:
3447       if (opnd->shifter.amount)
3448 	snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3449 		  opnd->shifter.amount);
3450       else
3451 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3452       break;
3453 
3454     case AARCH64_OPND_FPIMM:
3455     case AARCH64_OPND_SIMD_FPIMM:
3456     case AARCH64_OPND_SVE_FPIMM8:
3457       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3458 	{
3459 	case 2:	/* e.g. FMOV <Hd>, #<imm>.  */
3460 	    {
3461 	      half_conv_t c;
3462 	      c.i = expand_fp_imm (2, opnd->imm.value);
3463 	      snprintf (buf, size,  "#%.18e", c.f);
3464 	    }
3465 	  break;
3466 	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
3467 	    {
3468 	      single_conv_t c;
3469 	      c.i = expand_fp_imm (4, opnd->imm.value);
3470 	      snprintf (buf, size,  "#%.18e", c.f);
3471 	    }
3472 	  break;
3473 	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
3474 	    {
3475 	      double_conv_t c;
3476 	      c.i = expand_fp_imm (8, opnd->imm.value);
3477 	      snprintf (buf, size,  "#%.18e", c.d);
3478 	    }
3479 	  break;
3480 	default: assert (0);
3481 	}
3482       break;
3483 
3484     case AARCH64_OPND_CCMP_IMM:
3485     case AARCH64_OPND_NZCV:
3486     case AARCH64_OPND_EXCEPTION:
3487     case AARCH64_OPND_UIMM4:
3488     case AARCH64_OPND_UIMM4_ADDG:
3489     case AARCH64_OPND_UIMM7:
3490     case AARCH64_OPND_UIMM10:
3491       if (optional_operand_p (opcode, idx) == TRUE
3492 	  && (opnd->imm.value ==
3493 	      (int64_t) get_optional_operand_default_value (opcode)))
3494 	/* Omit the operand, e.g. DCPS1.  */
3495 	break;
3496       snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3497       break;
3498 
3499     case AARCH64_OPND_COND:
3500     case AARCH64_OPND_COND1:
3501       snprintf (buf, size, "%s", opnd->cond->names[0]);
3502       num_conds = ARRAY_SIZE (opnd->cond->names);
3503       for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3504 	{
3505 	  size_t len = strlen (buf);
3506 	  if (i == 1)
3507 	    snprintf (buf + len, size - len, "  // %s = %s",
3508 		      opnd->cond->names[0], opnd->cond->names[i]);
3509 	  else
3510 	    snprintf (buf + len, size - len, ", %s",
3511 		      opnd->cond->names[i]);
3512 	}
3513       break;
3514 
3515     case AARCH64_OPND_ADDR_ADRP:
3516       addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3517 	+ opnd->imm.value;
3518       if (pcrel_p)
3519 	*pcrel_p = 1;
3520       if (address)
3521 	*address = addr;
3522       /* This is not necessary during the disassembling, as print_address_func
3523 	 in the disassemble_info will take care of the printing.  But some
3524 	 other callers may be still interested in getting the string in *STR,
3525 	 so here we do snprintf regardless.  */
3526       snprintf (buf, size, "#0x%" PRIx64, addr);
3527       break;
3528 
3529     case AARCH64_OPND_ADDR_PCREL14:
3530     case AARCH64_OPND_ADDR_PCREL19:
3531     case AARCH64_OPND_ADDR_PCREL21:
3532     case AARCH64_OPND_ADDR_PCREL26:
3533       addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3534       if (pcrel_p)
3535 	*pcrel_p = 1;
3536       if (address)
3537 	*address = addr;
3538       /* This is not necessary during the disassembling, as print_address_func
3539 	 in the disassemble_info will take care of the printing.  But some
3540 	 other callers may be still interested in getting the string in *STR,
3541 	 so here we do snprintf regardless.  */
3542       snprintf (buf, size, "#0x%" PRIx64, addr);
3543       break;
3544 
3545     case AARCH64_OPND_ADDR_SIMPLE:
3546     case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3547     case AARCH64_OPND_SIMD_ADDR_POST:
3548       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3549       if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3550 	{
3551 	  if (opnd->addr.offset.is_reg)
3552 	    snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3553 	  else
3554 	    snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3555 	}
3556       else
3557 	snprintf (buf, size, "[%s]", name);
3558       break;
3559 
3560     case AARCH64_OPND_ADDR_REGOFF:
3561     case AARCH64_OPND_SVE_ADDR_R:
3562     case AARCH64_OPND_SVE_ADDR_RR:
3563     case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3564     case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3565     case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3566     case AARCH64_OPND_SVE_ADDR_RX:
3567     case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3568     case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3569     case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3570       print_register_offset_address
3571 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3572 	 get_offset_int_reg_name (opnd));
3573       break;
3574 
3575     case AARCH64_OPND_SVE_ADDR_RZ:
3576     case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3577     case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3578     case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3579     case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3580     case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3581     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3582     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3583     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3584     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3585     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3586     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3587       print_register_offset_address
3588 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3589 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3590       break;
3591 
3592     case AARCH64_OPND_ADDR_SIMM7:
3593     case AARCH64_OPND_ADDR_SIMM9:
3594     case AARCH64_OPND_ADDR_SIMM9_2:
3595     case AARCH64_OPND_ADDR_SIMM10:
3596     case AARCH64_OPND_ADDR_SIMM11:
3597     case AARCH64_OPND_ADDR_SIMM13:
3598     case AARCH64_OPND_ADDR_OFFSET:
3599     case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3600     case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3601     case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3602     case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3603     case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3604     case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3605     case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3606     case AARCH64_OPND_SVE_ADDR_RI_U6:
3607     case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3608     case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3609     case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3610       print_immediate_offset_address
3611 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3612       break;
3613 
3614     case AARCH64_OPND_SVE_ADDR_ZI_U5:
3615     case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3616     case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3617     case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3618       print_immediate_offset_address
3619 	(buf, size, opnd,
3620 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3621       break;
3622 
3623     case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3624     case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3625     case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3626       print_register_offset_address
3627 	(buf, size, opnd,
3628 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3629 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3630       break;
3631 
3632     case AARCH64_OPND_ADDR_UIMM12:
3633       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3634       if (opnd->addr.offset.imm)
3635 	snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3636       else
3637 	snprintf (buf, size, "[%s]", name);
3638       break;
3639 
3640     case AARCH64_OPND_SYSREG:
3641       for (i = 0; aarch64_sys_regs[i].name; ++i)
3642 	{
3643 	  bfd_boolean exact_match
3644 	    = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3645 	       == opnd->sysreg.flags;
3646 
3647 	  /* Try and find an exact match, But if that fails, return the first
3648 	     partial match that was found.  */
3649 	  if (aarch64_sys_regs[i].value == opnd->sysreg.value
3650 	      && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3651 	      && (name == NULL || exact_match))
3652 	    {
3653 	      name = aarch64_sys_regs[i].name;
3654 	      if (exact_match)
3655 		{
3656 		  if (notes)
3657 		    *notes = NULL;
3658 		  break;
3659 		}
3660 
3661 	      /* If we didn't match exactly, that means the presense of a flag
3662 		 indicates what we didn't want for this instruction.  e.g. If
3663 		 F_REG_READ is there, that means we were looking for a write
3664 		 register.  See aarch64_ext_sysreg.  */
3665 	      if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3666 		*notes = _("reading from a write-only register");
3667 	      else if (aarch64_sys_regs[i].flags & F_REG_READ)
3668 		*notes = _("writing to a read-only register");
3669 	    }
3670 	}
3671 
3672       if (name)
3673 	snprintf (buf, size, "%s", name);
3674       else
3675 	{
3676 	  /* Implementation defined system register.  */
3677 	  unsigned int value = opnd->sysreg.value;
3678 	  snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3679 		    (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3680 		    value & 0x7);
3681 	}
3682       break;
3683 
3684     case AARCH64_OPND_PSTATEFIELD:
3685       for (i = 0; aarch64_pstatefields[i].name; ++i)
3686 	if (aarch64_pstatefields[i].value == opnd->pstatefield)
3687 	  break;
3688       assert (aarch64_pstatefields[i].name);
3689       snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3690       break;
3691 
3692     case AARCH64_OPND_SYSREG_AT:
3693     case AARCH64_OPND_SYSREG_DC:
3694     case AARCH64_OPND_SYSREG_IC:
3695     case AARCH64_OPND_SYSREG_TLBI:
3696     case AARCH64_OPND_SYSREG_SR:
3697       snprintf (buf, size, "%s", opnd->sysins_op->name);
3698       break;
3699 
3700     case AARCH64_OPND_BARRIER:
3701       snprintf (buf, size, "%s", opnd->barrier->name);
3702       break;
3703 
3704     case AARCH64_OPND_BARRIER_ISB:
3705       /* Operand can be omitted, e.g. in DCPS1.  */
3706       if (! optional_operand_p (opcode, idx)
3707 	  || (opnd->barrier->value
3708 	      != get_optional_operand_default_value (opcode)))
3709 	snprintf (buf, size, "#0x%x", opnd->barrier->value);
3710       break;
3711 
3712     case AARCH64_OPND_PRFOP:
3713       if (opnd->prfop->name != NULL)
3714 	snprintf (buf, size, "%s", opnd->prfop->name);
3715       else
3716 	snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3717       break;
3718 
3719     case AARCH64_OPND_BARRIER_PSB:
3720     case AARCH64_OPND_BTI_TARGET:
3721       if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3722 	snprintf (buf, size, "%s", opnd->hint_option->name);
3723       break;
3724 
3725     default:
3726       assert (0);
3727     }
3728 }
3729 
3730 #define CPENC(op0,op1,crn,crm,op2) \
3731   ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3732   /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3733 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3734   /* for 3.9.10 System Instructions */
3735 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3736 
3737 #define C0  0
3738 #define C1  1
3739 #define C2  2
3740 #define C3  3
3741 #define C4  4
3742 #define C5  5
3743 #define C6  6
3744 #define C7  7
3745 #define C8  8
3746 #define C9  9
3747 #define C10 10
3748 #define C11 11
3749 #define C12 12
3750 #define C13 13
3751 #define C14 14
3752 #define C15 15
3753 
3754 /* TODO there is one more issues need to be resolved
3755    1. handle cpu-implementation-defined system registers.  */
3756 const aarch64_sys_reg aarch64_sys_regs [] =
3757 {
3758   { "spsr_el1",         CPEN_(0,C0,0),	0 }, /* = spsr_svc */
3759   { "spsr_el12",	CPEN_ (5, C0, 0), F_ARCHEXT },
3760   { "elr_el1",          CPEN_(0,C0,1),	0 },
3761   { "elr_el12",	CPEN_ (5, C0, 1), F_ARCHEXT },
3762   { "sp_el0",           CPEN_(0,C1,0),	0 },
3763   { "spsel",            CPEN_(0,C2,0),	0 },
3764   { "daif",             CPEN_(3,C2,1),	0 },
3765   { "currentel",        CPEN_(0,C2,2),	F_REG_READ }, /* RO */
3766   { "pan",		CPEN_(0,C2,3),	F_ARCHEXT },
3767   { "uao",		CPEN_ (0, C2, 4), F_ARCHEXT },
3768   { "nzcv",             CPEN_(3,C2,0),	0 },
3769   { "ssbs",		CPEN_(3,C2,6),  F_ARCHEXT },
3770   { "fpcr",             CPEN_(3,C4,0),	0 },
3771   { "fpsr",             CPEN_(3,C4,1),	0 },
3772   { "dspsr_el0",        CPEN_(3,C5,0),	0 },
3773   { "dlr_el0",          CPEN_(3,C5,1),	0 },
3774   { "spsr_el2",         CPEN_(4,C0,0),	0 }, /* = spsr_hyp */
3775   { "elr_el2",          CPEN_(4,C0,1),	0 },
3776   { "sp_el1",           CPEN_(4,C1,0),	0 },
3777   { "spsr_irq",         CPEN_(4,C3,0),	0 },
3778   { "spsr_abt",         CPEN_(4,C3,1),	0 },
3779   { "spsr_und",         CPEN_(4,C3,2),	0 },
3780   { "spsr_fiq",         CPEN_(4,C3,3),	0 },
3781   { "spsr_el3",         CPEN_(6,C0,0),	0 },
3782   { "elr_el3",          CPEN_(6,C0,1),	0 },
3783   { "sp_el2",           CPEN_(6,C1,0),	0 },
3784   { "spsr_svc",         CPEN_(0,C0,0),	F_DEPRECATED }, /* = spsr_el1 */
3785   { "spsr_hyp",         CPEN_(4,C0,0),	F_DEPRECATED }, /* = spsr_el2 */
3786   { "midr_el1",         CPENC(3,0,C0,C0,0),	F_REG_READ }, /* RO */
3787   { "ctr_el0",          CPENC(3,3,C0,C0,1),	F_REG_READ }, /* RO */
3788   { "mpidr_el1",        CPENC(3,0,C0,C0,5),	F_REG_READ }, /* RO */
3789   { "revidr_el1",       CPENC(3,0,C0,C0,6),	F_REG_READ }, /* RO */
3790   { "aidr_el1",         CPENC(3,1,C0,C0,7),	F_REG_READ }, /* RO */
3791   { "dczid_el0",        CPENC(3,3,C0,C0,7),	F_REG_READ }, /* RO */
3792   { "id_dfr0_el1",      CPENC(3,0,C0,C1,2),	F_REG_READ }, /* RO */
3793   { "id_pfr0_el1",      CPENC(3,0,C0,C1,0),	F_REG_READ }, /* RO */
3794   { "id_pfr1_el1",      CPENC(3,0,C0,C1,1),	F_REG_READ }, /* RO */
3795   { "id_pfr2_el1",      CPENC(3,0,C0,C3,4),	F_ARCHEXT | F_REG_READ}, /* RO */
3796   { "id_afr0_el1",      CPENC(3,0,C0,C1,3),	F_REG_READ }, /* RO */
3797   { "id_mmfr0_el1",     CPENC(3,0,C0,C1,4),	F_REG_READ }, /* RO */
3798   { "id_mmfr1_el1",     CPENC(3,0,C0,C1,5),	F_REG_READ }, /* RO */
3799   { "id_mmfr2_el1",     CPENC(3,0,C0,C1,6),	F_REG_READ }, /* RO */
3800   { "id_mmfr3_el1",     CPENC(3,0,C0,C1,7),	F_REG_READ }, /* RO */
3801   { "id_mmfr4_el1",     CPENC(3,0,C0,C2,6),	F_REG_READ }, /* RO */
3802   { "id_isar0_el1",     CPENC(3,0,C0,C2,0),	F_REG_READ }, /* RO */
3803   { "id_isar1_el1",     CPENC(3,0,C0,C2,1),	F_REG_READ }, /* RO */
3804   { "id_isar2_el1",     CPENC(3,0,C0,C2,2),	F_REG_READ }, /* RO */
3805   { "id_isar3_el1",     CPENC(3,0,C0,C2,3),	F_REG_READ }, /* RO */
3806   { "id_isar4_el1",     CPENC(3,0,C0,C2,4),	F_REG_READ }, /* RO */
3807   { "id_isar5_el1",     CPENC(3,0,C0,C2,5),	F_REG_READ }, /* RO */
3808   { "mvfr0_el1",        CPENC(3,0,C0,C3,0),	F_REG_READ }, /* RO */
3809   { "mvfr1_el1",        CPENC(3,0,C0,C3,1),	F_REG_READ }, /* RO */
3810   { "mvfr2_el1",        CPENC(3,0,C0,C3,2),	F_REG_READ }, /* RO */
3811   { "ccsidr_el1",       CPENC(3,1,C0,C0,0),	F_REG_READ }, /* RO */
3812   { "id_aa64pfr0_el1",  CPENC(3,0,C0,C4,0),	F_REG_READ }, /* RO */
3813   { "id_aa64pfr1_el1",  CPENC(3,0,C0,C4,1),	F_REG_READ }, /* RO */
3814   { "id_aa64dfr0_el1",  CPENC(3,0,C0,C5,0),	F_REG_READ }, /* RO */
3815   { "id_aa64dfr1_el1",  CPENC(3,0,C0,C5,1),	F_REG_READ }, /* RO */
3816   { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0),	F_REG_READ }, /* RO */
3817   { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1),	F_REG_READ }, /* RO */
3818   { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0),	F_REG_READ }, /* RO */
3819   { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1),	F_REG_READ }, /* RO */
3820   { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3821   { "id_aa64afr0_el1",  CPENC(3,0,C0,C5,4),	F_REG_READ }, /* RO */
3822   { "id_aa64afr1_el1",  CPENC(3,0,C0,C5,5),	F_REG_READ }, /* RO */
3823   { "id_aa64zfr0_el1",  CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3824   { "clidr_el1",        CPENC(3,1,C0,C0,1),	F_REG_READ }, /* RO */
3825   { "csselr_el1",       CPENC(3,2,C0,C0,0),	0 },
3826   { "vpidr_el2",        CPENC(3,4,C0,C0,0),	0 },
3827   { "vmpidr_el2",       CPENC(3,4,C0,C0,5),	0 },
3828   { "sctlr_el1",        CPENC(3,0,C1,C0,0),	0 },
3829   { "sctlr_el2",        CPENC(3,4,C1,C0,0),	0 },
3830   { "sctlr_el3",        CPENC(3,6,C1,C0,0),	0 },
3831   { "sctlr_el12",	CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3832   { "actlr_el1",        CPENC(3,0,C1,C0,1),	0 },
3833   { "actlr_el2",        CPENC(3,4,C1,C0,1),	0 },
3834   { "actlr_el3",        CPENC(3,6,C1,C0,1),	0 },
3835   { "cpacr_el1",        CPENC(3,0,C1,C0,2),	0 },
3836   { "cpacr_el12",	CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3837   { "cptr_el2",         CPENC(3,4,C1,C1,2),	0 },
3838   { "cptr_el3",         CPENC(3,6,C1,C1,2),	0 },
3839   { "scr_el3",          CPENC(3,6,C1,C1,0),	0 },
3840   { "hcr_el2",          CPENC(3,4,C1,C1,0),	0 },
3841   { "mdcr_el2",         CPENC(3,4,C1,C1,1),	0 },
3842   { "mdcr_el3",         CPENC(3,6,C1,C3,1),	0 },
3843   { "hstr_el2",         CPENC(3,4,C1,C1,3),	0 },
3844   { "hacr_el2",         CPENC(3,4,C1,C1,7),	0 },
3845   { "zcr_el1",          CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3846   { "zcr_el12",         CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3847   { "zcr_el2",          CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3848   { "zcr_el3",          CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3849   { "zidr_el1",         CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3850   { "ttbr0_el1",        CPENC(3,0,C2,C0,0),	0 },
3851   { "ttbr1_el1",        CPENC(3,0,C2,C0,1),	0 },
3852   { "ttbr0_el2",        CPENC(3,4,C2,C0,0),	0 },
3853   { "ttbr1_el2",	CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3854   { "ttbr0_el3",        CPENC(3,6,C2,C0,0),	0 },
3855   { "ttbr0_el12",	CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3856   { "ttbr1_el12",	CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3857   { "vttbr_el2",        CPENC(3,4,C2,C1,0),	0 },
3858   { "tcr_el1",          CPENC(3,0,C2,C0,2),	0 },
3859   { "tcr_el2",          CPENC(3,4,C2,C0,2),	0 },
3860   { "tcr_el3",          CPENC(3,6,C2,C0,2),	0 },
3861   { "tcr_el12",		CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3862   { "vtcr_el2",         CPENC(3,4,C2,C1,2),	0 },
3863   { "apiakeylo_el1",	CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3864   { "apiakeyhi_el1",	CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3865   { "apibkeylo_el1",	CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3866   { "apibkeyhi_el1",	CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3867   { "apdakeylo_el1",	CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3868   { "apdakeyhi_el1",	CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3869   { "apdbkeylo_el1",	CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3870   { "apdbkeyhi_el1",	CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3871   { "apgakeylo_el1",	CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3872   { "apgakeyhi_el1",	CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3873   { "afsr0_el1",        CPENC(3,0,C5,C1,0),	0 },
3874   { "afsr1_el1",        CPENC(3,0,C5,C1,1),	0 },
3875   { "afsr0_el2",        CPENC(3,4,C5,C1,0),	0 },
3876   { "afsr1_el2",        CPENC(3,4,C5,C1,1),	0 },
3877   { "afsr0_el3",        CPENC(3,6,C5,C1,0),	0 },
3878   { "afsr0_el12",	CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3879   { "afsr1_el3",        CPENC(3,6,C5,C1,1),	0 },
3880   { "afsr1_el12",	CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3881   { "esr_el1",          CPENC(3,0,C5,C2,0),	0 },
3882   { "esr_el2",          CPENC(3,4,C5,C2,0),	0 },
3883   { "esr_el3",          CPENC(3,6,C5,C2,0),	0 },
3884   { "esr_el12",		CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3885   { "vsesr_el2",	CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3886   { "fpexc32_el2",      CPENC(3,4,C5,C3,0),	0 },
3887   { "erridr_el1",	CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3888   { "errselr_el1",	CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3889   { "erxfr_el1",	CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3890   { "erxctlr_el1",	CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3891   { "erxstatus_el1",	CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3892   { "erxaddr_el1",	CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3893   { "erxmisc0_el1",	CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3894   { "erxmisc1_el1",	CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3895   { "far_el1",          CPENC(3,0,C6,C0,0),	0 },
3896   { "far_el2",          CPENC(3,4,C6,C0,0),	0 },
3897   { "far_el3",          CPENC(3,6,C6,C0,0),	0 },
3898   { "far_el12",		CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3899   { "hpfar_el2",        CPENC(3,4,C6,C0,4),	0 },
3900   { "par_el1",          CPENC(3,0,C7,C4,0),	0 },
3901   { "mair_el1",         CPENC(3,0,C10,C2,0),	0 },
3902   { "mair_el2",         CPENC(3,4,C10,C2,0),	0 },
3903   { "mair_el3",         CPENC(3,6,C10,C2,0),	0 },
3904   { "mair_el12",	CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3905   { "amair_el1",        CPENC(3,0,C10,C3,0),	0 },
3906   { "amair_el2",        CPENC(3,4,C10,C3,0),	0 },
3907   { "amair_el3",        CPENC(3,6,C10,C3,0),	0 },
3908   { "amair_el12",	CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3909   { "vbar_el1",         CPENC(3,0,C12,C0,0),	0 },
3910   { "vbar_el2",         CPENC(3,4,C12,C0,0),	0 },
3911   { "vbar_el3",         CPENC(3,6,C12,C0,0),	0 },
3912   { "vbar_el12",	CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3913   { "rvbar_el1",        CPENC(3,0,C12,C0,1),	F_REG_READ }, /* RO */
3914   { "rvbar_el2",        CPENC(3,4,C12,C0,1),	F_REG_READ }, /* RO */
3915   { "rvbar_el3",        CPENC(3,6,C12,C0,1),	F_REG_READ }, /* RO */
3916   { "rmr_el1",          CPENC(3,0,C12,C0,2),	0 },
3917   { "rmr_el2",          CPENC(3,4,C12,C0,2),	0 },
3918   { "rmr_el3",          CPENC(3,6,C12,C0,2),	0 },
3919   { "isr_el1",          CPENC(3,0,C12,C1,0),	F_REG_READ }, /* RO */
3920   { "disr_el1",		CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3921   { "vdisr_el2",	CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3922   { "contextidr_el1",   CPENC(3,0,C13,C0,1),	0 },
3923   { "contextidr_el2",	CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3924   { "contextidr_el12",	CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3925   { "rndr",		CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3926   { "rndrrs",		CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3927   { "tco",		CPENC(3,3,C4,C2,7), F_ARCHEXT },
3928   { "tfsre0_el1",	CPENC(3,0,C6,C6,1), F_ARCHEXT },
3929   { "tfsr_el1",		CPENC(3,0,C6,C5,0), F_ARCHEXT },
3930   { "tfsr_el2",		CPENC(3,4,C6,C5,0), F_ARCHEXT },
3931   { "tfsr_el3",		CPENC(3,6,C6,C6,0), F_ARCHEXT },
3932   { "tfsr_el12",	CPENC(3,5,C6,C6,0), F_ARCHEXT },
3933   { "rgsr_el1",		CPENC(3,0,C1,C0,5), F_ARCHEXT },
3934   { "gcr_el1",		CPENC(3,0,C1,C0,6), F_ARCHEXT },
3935   { "tpidr_el0",        CPENC(3,3,C13,C0,2),	0 },
3936   { "tpidrro_el0",      CPENC(3,3,C13,C0,3),	0 }, /* RW */
3937   { "tpidr_el1",        CPENC(3,0,C13,C0,4),	0 },
3938   { "tpidr_el2",        CPENC(3,4,C13,C0,2),	0 },
3939   { "tpidr_el3",        CPENC(3,6,C13,C0,2),	0 },
3940   { "scxtnum_el0",      CPENC(3,3,C13,C0,7), F_ARCHEXT },
3941   { "scxtnum_el1",      CPENC(3,0,C13,C0,7), F_ARCHEXT },
3942   { "scxtnum_el2",      CPENC(3,4,C13,C0,7), F_ARCHEXT },
3943   { "scxtnum_el12",     CPENC(3,5,C13,C0,7), F_ARCHEXT },
3944   { "scxtnum_el3",      CPENC(3,6,C13,C0,7), F_ARCHEXT },
3945   { "teecr32_el1",      CPENC(2,2,C0, C0,0),	0 }, /* See section 3.9.7.1 */
3946   { "cntfrq_el0",       CPENC(3,3,C14,C0,0),	0 }, /* RW */
3947   { "cntpct_el0",       CPENC(3,3,C14,C0,1),	F_REG_READ }, /* RO */
3948   { "cntvct_el0",       CPENC(3,3,C14,C0,2),	F_REG_READ }, /* RO */
3949   { "cntvoff_el2",      CPENC(3,4,C14,C0,3),	0 },
3950   { "cntkctl_el1",      CPENC(3,0,C14,C1,0),	0 },
3951   { "cntkctl_el12",	CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3952   { "cnthctl_el2",      CPENC(3,4,C14,C1,0),	0 },
3953   { "cntp_tval_el0",    CPENC(3,3,C14,C2,0),	0 },
3954   { "cntp_tval_el02",	CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3955   { "cntp_ctl_el0",     CPENC(3,3,C14,C2,1),	0 },
3956   { "cntp_ctl_el02",	CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3957   { "cntp_cval_el0",    CPENC(3,3,C14,C2,2),	0 },
3958   { "cntp_cval_el02",	CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3959   { "cntv_tval_el0",    CPENC(3,3,C14,C3,0),	0 },
3960   { "cntv_tval_el02",	CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3961   { "cntv_ctl_el0",     CPENC(3,3,C14,C3,1),	0 },
3962   { "cntv_ctl_el02",	CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3963   { "cntv_cval_el0",    CPENC(3,3,C14,C3,2),	0 },
3964   { "cntv_cval_el02",	CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3965   { "cnthp_tval_el2",   CPENC(3,4,C14,C2,0),	0 },
3966   { "cnthp_ctl_el2",    CPENC(3,4,C14,C2,1),	0 },
3967   { "cnthp_cval_el2",   CPENC(3,4,C14,C2,2),	0 },
3968   { "cntps_tval_el1",   CPENC(3,7,C14,C2,0),	0 },
3969   { "cntps_ctl_el1",    CPENC(3,7,C14,C2,1),	0 },
3970   { "cntps_cval_el1",   CPENC(3,7,C14,C2,2),	0 },
3971   { "cnthv_tval_el2",	CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3972   { "cnthv_ctl_el2",	CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3973   { "cnthv_cval_el2",	CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3974   { "dacr32_el2",       CPENC(3,4,C3,C0,0),	0 },
3975   { "ifsr32_el2",       CPENC(3,4,C5,C0,1),	0 },
3976   { "teehbr32_el1",     CPENC(2,2,C1,C0,0),	0 },
3977   { "sder32_el3",       CPENC(3,6,C1,C1,1),	0 },
3978   { "mdscr_el1",         CPENC(2,0,C0, C2, 2),	0 },
3979   { "mdccsr_el0",        CPENC(2,3,C0, C1, 0),	F_REG_READ  },  /* r */
3980   { "mdccint_el1",       CPENC(2,0,C0, C2, 0),	0 },
3981   { "dbgdtr_el0",        CPENC(2,3,C0, C4, 0),	0 },
3982   { "dbgdtrrx_el0",      CPENC(2,3,C0, C5, 0),	F_REG_READ  },  /* r */
3983   { "dbgdtrtx_el0",      CPENC(2,3,C0, C5, 0),	F_REG_WRITE },  /* w */
3984   { "osdtrrx_el1",       CPENC(2,0,C0, C0, 2),	0 },
3985   { "osdtrtx_el1",       CPENC(2,0,C0, C3, 2),	0 },
3986   { "oseccr_el1",        CPENC(2,0,C0, C6, 2),	0 },
3987   { "dbgvcr32_el2",      CPENC(2,4,C0, C7, 0),	0 },
3988   { "dbgbvr0_el1",       CPENC(2,0,C0, C0, 4),	0 },
3989   { "dbgbvr1_el1",       CPENC(2,0,C0, C1, 4),	0 },
3990   { "dbgbvr2_el1",       CPENC(2,0,C0, C2, 4),	0 },
3991   { "dbgbvr3_el1",       CPENC(2,0,C0, C3, 4),	0 },
3992   { "dbgbvr4_el1",       CPENC(2,0,C0, C4, 4),	0 },
3993   { "dbgbvr5_el1",       CPENC(2,0,C0, C5, 4),	0 },
3994   { "dbgbvr6_el1",       CPENC(2,0,C0, C6, 4),	0 },
3995   { "dbgbvr7_el1",       CPENC(2,0,C0, C7, 4),	0 },
3996   { "dbgbvr8_el1",       CPENC(2,0,C0, C8, 4),	0 },
3997   { "dbgbvr9_el1",       CPENC(2,0,C0, C9, 4),	0 },
3998   { "dbgbvr10_el1",      CPENC(2,0,C0, C10,4),	0 },
3999   { "dbgbvr11_el1",      CPENC(2,0,C0, C11,4),	0 },
4000   { "dbgbvr12_el1",      CPENC(2,0,C0, C12,4),	0 },
4001   { "dbgbvr13_el1",      CPENC(2,0,C0, C13,4),	0 },
4002   { "dbgbvr14_el1",      CPENC(2,0,C0, C14,4),	0 },
4003   { "dbgbvr15_el1",      CPENC(2,0,C0, C15,4),	0 },
4004   { "dbgbcr0_el1",       CPENC(2,0,C0, C0, 5),	0 },
4005   { "dbgbcr1_el1",       CPENC(2,0,C0, C1, 5),	0 },
4006   { "dbgbcr2_el1",       CPENC(2,0,C0, C2, 5),	0 },
4007   { "dbgbcr3_el1",       CPENC(2,0,C0, C3, 5),	0 },
4008   { "dbgbcr4_el1",       CPENC(2,0,C0, C4, 5),	0 },
4009   { "dbgbcr5_el1",       CPENC(2,0,C0, C5, 5),	0 },
4010   { "dbgbcr6_el1",       CPENC(2,0,C0, C6, 5),	0 },
4011   { "dbgbcr7_el1",       CPENC(2,0,C0, C7, 5),	0 },
4012   { "dbgbcr8_el1",       CPENC(2,0,C0, C8, 5),	0 },
4013   { "dbgbcr9_el1",       CPENC(2,0,C0, C9, 5),	0 },
4014   { "dbgbcr10_el1",      CPENC(2,0,C0, C10,5),	0 },
4015   { "dbgbcr11_el1",      CPENC(2,0,C0, C11,5),	0 },
4016   { "dbgbcr12_el1",      CPENC(2,0,C0, C12,5),	0 },
4017   { "dbgbcr13_el1",      CPENC(2,0,C0, C13,5),	0 },
4018   { "dbgbcr14_el1",      CPENC(2,0,C0, C14,5),	0 },
4019   { "dbgbcr15_el1",      CPENC(2,0,C0, C15,5),	0 },
4020   { "dbgwvr0_el1",       CPENC(2,0,C0, C0, 6),	0 },
4021   { "dbgwvr1_el1",       CPENC(2,0,C0, C1, 6),	0 },
4022   { "dbgwvr2_el1",       CPENC(2,0,C0, C2, 6),	0 },
4023   { "dbgwvr3_el1",       CPENC(2,0,C0, C3, 6),	0 },
4024   { "dbgwvr4_el1",       CPENC(2,0,C0, C4, 6),	0 },
4025   { "dbgwvr5_el1",       CPENC(2,0,C0, C5, 6),	0 },
4026   { "dbgwvr6_el1",       CPENC(2,0,C0, C6, 6),	0 },
4027   { "dbgwvr7_el1",       CPENC(2,0,C0, C7, 6),	0 },
4028   { "dbgwvr8_el1",       CPENC(2,0,C0, C8, 6),	0 },
4029   { "dbgwvr9_el1",       CPENC(2,0,C0, C9, 6),	0 },
4030   { "dbgwvr10_el1",      CPENC(2,0,C0, C10,6),	0 },
4031   { "dbgwvr11_el1",      CPENC(2,0,C0, C11,6),	0 },
4032   { "dbgwvr12_el1",      CPENC(2,0,C0, C12,6),	0 },
4033   { "dbgwvr13_el1",      CPENC(2,0,C0, C13,6),	0 },
4034   { "dbgwvr14_el1",      CPENC(2,0,C0, C14,6),	0 },
4035   { "dbgwvr15_el1",      CPENC(2,0,C0, C15,6),	0 },
4036   { "dbgwcr0_el1",       CPENC(2,0,C0, C0, 7),	0 },
4037   { "dbgwcr1_el1",       CPENC(2,0,C0, C1, 7),	0 },
4038   { "dbgwcr2_el1",       CPENC(2,0,C0, C2, 7),	0 },
4039   { "dbgwcr3_el1",       CPENC(2,0,C0, C3, 7),	0 },
4040   { "dbgwcr4_el1",       CPENC(2,0,C0, C4, 7),	0 },
4041   { "dbgwcr5_el1",       CPENC(2,0,C0, C5, 7),	0 },
4042   { "dbgwcr6_el1",       CPENC(2,0,C0, C6, 7),	0 },
4043   { "dbgwcr7_el1",       CPENC(2,0,C0, C7, 7),	0 },
4044   { "dbgwcr8_el1",       CPENC(2,0,C0, C8, 7),	0 },
4045   { "dbgwcr9_el1",       CPENC(2,0,C0, C9, 7),	0 },
4046   { "dbgwcr10_el1",      CPENC(2,0,C0, C10,7),	0 },
4047   { "dbgwcr11_el1",      CPENC(2,0,C0, C11,7),	0 },
4048   { "dbgwcr12_el1",      CPENC(2,0,C0, C12,7),	0 },
4049   { "dbgwcr13_el1",      CPENC(2,0,C0, C13,7),	0 },
4050   { "dbgwcr14_el1",      CPENC(2,0,C0, C14,7),	0 },
4051   { "dbgwcr15_el1",      CPENC(2,0,C0, C15,7),	0 },
4052   { "mdrar_el1",         CPENC(2,0,C1, C0, 0),	F_REG_READ  },  /* r */
4053   { "oslar_el1",         CPENC(2,0,C1, C0, 4),	F_REG_WRITE },  /* w */
4054   { "oslsr_el1",         CPENC(2,0,C1, C1, 4),	F_REG_READ  },  /* r */
4055   { "osdlr_el1",         CPENC(2,0,C1, C3, 4),	0 },
4056   { "dbgprcr_el1",       CPENC(2,0,C1, C4, 4),	0 },
4057   { "dbgclaimset_el1",   CPENC(2,0,C7, C8, 6),	0 },
4058   { "dbgclaimclr_el1",   CPENC(2,0,C7, C9, 6),	0 },
4059   { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6),	F_REG_READ  },  /* r */
4060   { "pmblimitr_el1",	 CPENC (3, 0, C9, C10, 0), F_ARCHEXT },  /* rw */
4061   { "pmbptr_el1",	 CPENC (3, 0, C9, C10, 1), F_ARCHEXT },  /* rw */
4062   { "pmbsr_el1",	 CPENC (3, 0, C9, C10, 3), F_ARCHEXT },  /* rw */
4063   { "pmbidr_el1",	 CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ },  /* ro */
4064   { "pmscr_el1",	 CPENC (3, 0, C9, C9, 0),  F_ARCHEXT },  /* rw */
4065   { "pmsicr_el1",	 CPENC (3, 0, C9, C9, 2),  F_ARCHEXT },  /* rw */
4066   { "pmsirr_el1",	 CPENC (3, 0, C9, C9, 3),  F_ARCHEXT },  /* rw */
4067   { "pmsfcr_el1",	 CPENC (3, 0, C9, C9, 4),  F_ARCHEXT },  /* rw */
4068   { "pmsevfr_el1",	 CPENC (3, 0, C9, C9, 5),  F_ARCHEXT },  /* rw */
4069   { "pmslatfr_el1",	 CPENC (3, 0, C9, C9, 6),  F_ARCHEXT },  /* rw */
4070   { "pmsidr_el1",	 CPENC (3, 0, C9, C9, 7),  F_ARCHEXT },  /* rw */
4071   { "pmscr_el2",	 CPENC (3, 4, C9, C9, 0),  F_ARCHEXT },  /* rw */
4072   { "pmscr_el12",	 CPENC (3, 5, C9, C9, 0),  F_ARCHEXT },  /* rw */
4073   { "pmcr_el0",          CPENC(3,3,C9,C12, 0),	0 },
4074   { "pmcntenset_el0",    CPENC(3,3,C9,C12, 1),	0 },
4075   { "pmcntenclr_el0",    CPENC(3,3,C9,C12, 2),	0 },
4076   { "pmovsclr_el0",      CPENC(3,3,C9,C12, 3),	0 },
4077   { "pmswinc_el0",       CPENC(3,3,C9,C12, 4),	F_REG_WRITE },  /* w */
4078   { "pmselr_el0",        CPENC(3,3,C9,C12, 5),	0 },
4079   { "pmceid0_el0",       CPENC(3,3,C9,C12, 6),	F_REG_READ  },  /* r */
4080   { "pmceid1_el0",       CPENC(3,3,C9,C12, 7),	F_REG_READ  },  /* r */
4081   { "pmccntr_el0",       CPENC(3,3,C9,C13, 0),	0 },
4082   { "pmxevtyper_el0",    CPENC(3,3,C9,C13, 1),	0 },
4083   { "pmxevcntr_el0",     CPENC(3,3,C9,C13, 2),	0 },
4084   { "pmuserenr_el0",     CPENC(3,3,C9,C14, 0),	0 },
4085   { "pmintenset_el1",    CPENC(3,0,C9,C14, 1),	0 },
4086   { "pmintenclr_el1",    CPENC(3,0,C9,C14, 2),	0 },
4087   { "pmovsset_el0",      CPENC(3,3,C9,C14, 3),	0 },
4088   { "pmevcntr0_el0",     CPENC(3,3,C14,C8, 0),	0 },
4089   { "pmevcntr1_el0",     CPENC(3,3,C14,C8, 1),	0 },
4090   { "pmevcntr2_el0",     CPENC(3,3,C14,C8, 2),	0 },
4091   { "pmevcntr3_el0",     CPENC(3,3,C14,C8, 3),	0 },
4092   { "pmevcntr4_el0",     CPENC(3,3,C14,C8, 4),	0 },
4093   { "pmevcntr5_el0",     CPENC(3,3,C14,C8, 5),	0 },
4094   { "pmevcntr6_el0",     CPENC(3,3,C14,C8, 6),	0 },
4095   { "pmevcntr7_el0",     CPENC(3,3,C14,C8, 7),	0 },
4096   { "pmevcntr8_el0",     CPENC(3,3,C14,C9, 0),	0 },
4097   { "pmevcntr9_el0",     CPENC(3,3,C14,C9, 1),	0 },
4098   { "pmevcntr10_el0",    CPENC(3,3,C14,C9, 2),	0 },
4099   { "pmevcntr11_el0",    CPENC(3,3,C14,C9, 3),	0 },
4100   { "pmevcntr12_el0",    CPENC(3,3,C14,C9, 4),	0 },
4101   { "pmevcntr13_el0",    CPENC(3,3,C14,C9, 5),	0 },
4102   { "pmevcntr14_el0",    CPENC(3,3,C14,C9, 6),	0 },
4103   { "pmevcntr15_el0",    CPENC(3,3,C14,C9, 7),	0 },
4104   { "pmevcntr16_el0",    CPENC(3,3,C14,C10,0),	0 },
4105   { "pmevcntr17_el0",    CPENC(3,3,C14,C10,1),	0 },
4106   { "pmevcntr18_el0",    CPENC(3,3,C14,C10,2),	0 },
4107   { "pmevcntr19_el0",    CPENC(3,3,C14,C10,3),	0 },
4108   { "pmevcntr20_el0",    CPENC(3,3,C14,C10,4),	0 },
4109   { "pmevcntr21_el0",    CPENC(3,3,C14,C10,5),	0 },
4110   { "pmevcntr22_el0",    CPENC(3,3,C14,C10,6),	0 },
4111   { "pmevcntr23_el0",    CPENC(3,3,C14,C10,7),	0 },
4112   { "pmevcntr24_el0",    CPENC(3,3,C14,C11,0),	0 },
4113   { "pmevcntr25_el0",    CPENC(3,3,C14,C11,1),	0 },
4114   { "pmevcntr26_el0",    CPENC(3,3,C14,C11,2),	0 },
4115   { "pmevcntr27_el0",    CPENC(3,3,C14,C11,3),	0 },
4116   { "pmevcntr28_el0",    CPENC(3,3,C14,C11,4),	0 },
4117   { "pmevcntr29_el0",    CPENC(3,3,C14,C11,5),	0 },
4118   { "pmevcntr30_el0",    CPENC(3,3,C14,C11,6),	0 },
4119   { "pmevtyper0_el0",    CPENC(3,3,C14,C12,0),	0 },
4120   { "pmevtyper1_el0",    CPENC(3,3,C14,C12,1),	0 },
4121   { "pmevtyper2_el0",    CPENC(3,3,C14,C12,2),	0 },
4122   { "pmevtyper3_el0",    CPENC(3,3,C14,C12,3),	0 },
4123   { "pmevtyper4_el0",    CPENC(3,3,C14,C12,4),	0 },
4124   { "pmevtyper5_el0",    CPENC(3,3,C14,C12,5),	0 },
4125   { "pmevtyper6_el0",    CPENC(3,3,C14,C12,6),	0 },
4126   { "pmevtyper7_el0",    CPENC(3,3,C14,C12,7),	0 },
4127   { "pmevtyper8_el0",    CPENC(3,3,C14,C13,0),	0 },
4128   { "pmevtyper9_el0",    CPENC(3,3,C14,C13,1),	0 },
4129   { "pmevtyper10_el0",   CPENC(3,3,C14,C13,2),	0 },
4130   { "pmevtyper11_el0",   CPENC(3,3,C14,C13,3),	0 },
4131   { "pmevtyper12_el0",   CPENC(3,3,C14,C13,4),	0 },
4132   { "pmevtyper13_el0",   CPENC(3,3,C14,C13,5),	0 },
4133   { "pmevtyper14_el0",   CPENC(3,3,C14,C13,6),	0 },
4134   { "pmevtyper15_el0",   CPENC(3,3,C14,C13,7),	0 },
4135   { "pmevtyper16_el0",   CPENC(3,3,C14,C14,0),	0 },
4136   { "pmevtyper17_el0",   CPENC(3,3,C14,C14,1),	0 },
4137   { "pmevtyper18_el0",   CPENC(3,3,C14,C14,2),	0 },
4138   { "pmevtyper19_el0",   CPENC(3,3,C14,C14,3),	0 },
4139   { "pmevtyper20_el0",   CPENC(3,3,C14,C14,4),	0 },
4140   { "pmevtyper21_el0",   CPENC(3,3,C14,C14,5),	0 },
4141   { "pmevtyper22_el0",   CPENC(3,3,C14,C14,6),	0 },
4142   { "pmevtyper23_el0",   CPENC(3,3,C14,C14,7),	0 },
4143   { "pmevtyper24_el0",   CPENC(3,3,C14,C15,0),	0 },
4144   { "pmevtyper25_el0",   CPENC(3,3,C14,C15,1),	0 },
4145   { "pmevtyper26_el0",   CPENC(3,3,C14,C15,2),	0 },
4146   { "pmevtyper27_el0",   CPENC(3,3,C14,C15,3),	0 },
4147   { "pmevtyper28_el0",   CPENC(3,3,C14,C15,4),	0 },
4148   { "pmevtyper29_el0",   CPENC(3,3,C14,C15,5),	0 },
4149   { "pmevtyper30_el0",   CPENC(3,3,C14,C15,6),	0 },
4150   { "pmccfiltr_el0",     CPENC(3,3,C14,C15,7),	0 },
4151 
4152   { "dit",		 CPEN_ (3, C2, 5), F_ARCHEXT },
4153   { "vstcr_el2",	 CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4154   { "vsttbr_el2",	 CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4155   { "cnthvs_tval_el2",	 CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4156   { "cnthvs_cval_el2",	 CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4157   { "cnthvs_ctl_el2",	 CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4158   { "cnthps_tval_el2",	 CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4159   { "cnthps_cval_el2",	 CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4160   { "cnthps_ctl_el2",	 CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4161   { "sder32_el2",	 CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4162   { "vncr_el2",		 CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4163   { 0,          CPENC(0,0,0,0,0),	0 },
4164 };
4165 
4166 bfd_boolean
4167 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4168 {
4169   return (reg->flags & F_DEPRECATED) != 0;
4170 }
4171 
4172 bfd_boolean
4173 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4174 			     const aarch64_sys_reg *reg)
4175 {
4176   if (!(reg->flags & F_ARCHEXT))
4177     return TRUE;
4178 
4179   /* PAN.  Values are from aarch64_sys_regs.  */
4180   if (reg->value == CPEN_(0,C2,3)
4181       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4182     return FALSE;
4183 
4184   /* SCXTNUM_ELx registers.  */
4185   if ((reg->value == CPENC (3, 3, C13, C0, 7)
4186        || reg->value == CPENC (3, 0, C13, C0, 7)
4187        || reg->value == CPENC (3, 4, C13, C0, 7)
4188        || reg->value == CPENC (3, 6, C13, C0, 7)
4189        || reg->value == CPENC (3, 5, C13, C0, 7))
4190       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4191       return FALSE;
4192 
4193   /* ID_PFR2_EL1 register.  */
4194   if (reg->value == CPENC(3, 0, C0, C3, 4)
4195       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4196     return FALSE;
4197 
4198   /* SSBS.  Values are from aarch64_sys_regs.  */
4199   if (reg->value == CPEN_(3,C2,6)
4200       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4201     return FALSE;
4202 
4203   /* Virtualization host extensions: system registers.  */
4204   if ((reg->value == CPENC (3, 4, C2, C0, 1)
4205        || reg->value == CPENC (3, 4, C13, C0, 1)
4206        || reg->value == CPENC (3, 4, C14, C3, 0)
4207        || reg->value == CPENC (3, 4, C14, C3, 1)
4208        || reg->value == CPENC (3, 4, C14, C3, 2))
4209       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4210       return FALSE;
4211 
4212   /* Virtualization host extensions: *_el12 names of *_el1 registers.  */
4213   if ((reg->value == CPEN_ (5, C0, 0)
4214        || reg->value == CPEN_ (5, C0, 1)
4215        || reg->value == CPENC (3, 5, C1, C0, 0)
4216        || reg->value == CPENC (3, 5, C1, C0, 2)
4217        || reg->value == CPENC (3, 5, C2, C0, 0)
4218        || reg->value == CPENC (3, 5, C2, C0, 1)
4219        || reg->value == CPENC (3, 5, C2, C0, 2)
4220        || reg->value == CPENC (3, 5, C5, C1, 0)
4221        || reg->value == CPENC (3, 5, C5, C1, 1)
4222        || reg->value == CPENC (3, 5, C5, C2, 0)
4223        || reg->value == CPENC (3, 5, C6, C0, 0)
4224        || reg->value == CPENC (3, 5, C10, C2, 0)
4225        || reg->value == CPENC (3, 5, C10, C3, 0)
4226        || reg->value == CPENC (3, 5, C12, C0, 0)
4227        || reg->value == CPENC (3, 5, C13, C0, 1)
4228        || reg->value == CPENC (3, 5, C14, C1, 0))
4229       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4230     return FALSE;
4231 
4232   /* Virtualization host extensions: *_el02 names of *_el0 registers.  */
4233   if ((reg->value == CPENC (3, 5, C14, C2, 0)
4234        || reg->value == CPENC (3, 5, C14, C2, 1)
4235        || reg->value == CPENC (3, 5, C14, C2, 2)
4236        || reg->value == CPENC (3, 5, C14, C3, 0)
4237        || reg->value == CPENC (3, 5, C14, C3, 1)
4238        || reg->value == CPENC (3, 5, C14, C3, 2))
4239       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4240     return FALSE;
4241 
4242   /* ARMv8.2 features.  */
4243 
4244   /* ID_AA64MMFR2_EL1.  */
4245   if (reg->value == CPENC (3, 0, C0, C7, 2)
4246       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4247     return FALSE;
4248 
4249   /* PSTATE.UAO.  */
4250   if (reg->value == CPEN_ (0, C2, 4)
4251       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4252     return FALSE;
4253 
4254   /* RAS extension.  */
4255 
4256   /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4257      ERXMISC0_EL1 AND ERXMISC1_EL1.  */
4258   if ((reg->value == CPENC (3, 0, C5, C3, 0)
4259        || reg->value == CPENC (3, 0, C5, C3, 1)
4260        || reg->value == CPENC (3, 0, C5, C3, 2)
4261        || reg->value == CPENC (3, 0, C5, C3, 3)
4262        || reg->value == CPENC (3, 0, C5, C4, 0)
4263        || reg->value == CPENC (3, 0, C5, C4, 1)
4264        || reg->value == CPENC (3, 0, C5, C4, 2)
4265        || reg->value == CPENC (3, 0, C5, C4, 3)
4266        || reg->value == CPENC (3, 0, C5, C5, 0)
4267        || reg->value == CPENC (3, 0, C5, C5, 1))
4268       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4269     return FALSE;
4270 
4271   /* VSESR_EL2, DISR_EL1 and VDISR_EL2.  */
4272   if ((reg->value == CPENC (3, 4, C5, C2, 3)
4273        || reg->value == CPENC (3, 0, C12, C1, 1)
4274        || reg->value == CPENC (3, 4, C12, C1, 1))
4275       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4276     return FALSE;
4277 
4278   /* Statistical Profiling extension.  */
4279   if ((reg->value == CPENC (3, 0, C9, C10, 0)
4280        || reg->value == CPENC (3, 0, C9, C10, 1)
4281        || reg->value == CPENC (3, 0, C9, C10, 3)
4282        || reg->value == CPENC (3, 0, C9, C10, 7)
4283        || reg->value == CPENC (3, 0, C9, C9, 0)
4284        || reg->value == CPENC (3, 0, C9, C9, 2)
4285        || reg->value == CPENC (3, 0, C9, C9, 3)
4286        || reg->value == CPENC (3, 0, C9, C9, 4)
4287        || reg->value == CPENC (3, 0, C9, C9, 5)
4288        || reg->value == CPENC (3, 0, C9, C9, 6)
4289        || reg->value == CPENC (3, 0, C9, C9, 7)
4290        || reg->value == CPENC (3, 4, C9, C9, 0)
4291        || reg->value == CPENC (3, 5, C9, C9, 0))
4292       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4293     return FALSE;
4294 
4295   /* ARMv8.3 Pointer authentication keys.  */
4296   if ((reg->value == CPENC (3, 0, C2, C1, 0)
4297        || reg->value == CPENC (3, 0, C2, C1, 1)
4298        || reg->value == CPENC (3, 0, C2, C1, 2)
4299        || reg->value == CPENC (3, 0, C2, C1, 3)
4300        || reg->value == CPENC (3, 0, C2, C2, 0)
4301        || reg->value == CPENC (3, 0, C2, C2, 1)
4302        || reg->value == CPENC (3, 0, C2, C2, 2)
4303        || reg->value == CPENC (3, 0, C2, C2, 3)
4304        || reg->value == CPENC (3, 0, C2, C3, 0)
4305        || reg->value == CPENC (3, 0, C2, C3, 1))
4306       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4307     return FALSE;
4308 
4309   /* SVE.  */
4310   if ((reg->value == CPENC (3, 0, C0, C4, 4)
4311        || reg->value == CPENC (3, 0, C1, C2, 0)
4312        || reg->value == CPENC (3, 4, C1, C2, 0)
4313        || reg->value == CPENC (3, 6, C1, C2, 0)
4314        || reg->value == CPENC (3, 5, C1, C2, 0)
4315        || reg->value == CPENC (3, 0, C0, C0, 7))
4316       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4317     return FALSE;
4318 
4319   /* ARMv8.4 features.  */
4320 
4321   /* PSTATE.DIT.  */
4322   if (reg->value == CPEN_ (3, C2, 5)
4323       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4324     return FALSE;
4325 
4326   /* Virtualization extensions.  */
4327   if ((reg->value == CPENC(3, 4, C2, C6, 2)
4328        || reg->value == CPENC(3, 4, C2, C6, 0)
4329        || reg->value == CPENC(3, 4, C14, C4, 0)
4330        || reg->value == CPENC(3, 4, C14, C4, 2)
4331        || reg->value == CPENC(3, 4, C14, C4, 1)
4332        || reg->value == CPENC(3, 4, C14, C5, 0)
4333        || reg->value == CPENC(3, 4, C14, C5, 2)
4334        || reg->value == CPENC(3, 4, C14, C5, 1)
4335        || reg->value == CPENC(3, 4, C1, C3, 1)
4336        || reg->value == CPENC(3, 4, C2, C2, 0))
4337       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4338     return FALSE;
4339 
4340   /* ARMv8.4 TLB instructions.  */
4341   if ((reg->value == CPENS (0, C8, C1, 0)
4342        || reg->value == CPENS (0, C8, C1, 1)
4343        || reg->value == CPENS (0, C8, C1, 2)
4344        || reg->value == CPENS (0, C8, C1, 3)
4345        || reg->value == CPENS (0, C8, C1, 5)
4346        || reg->value == CPENS (0, C8, C1, 7)
4347        || reg->value == CPENS (4, C8, C4, 0)
4348        || reg->value == CPENS (4, C8, C4, 4)
4349        || reg->value == CPENS (4, C8, C1, 1)
4350        || reg->value == CPENS (4, C8, C1, 5)
4351        || reg->value == CPENS (4, C8, C1, 6)
4352        || reg->value == CPENS (6, C8, C1, 1)
4353        || reg->value == CPENS (6, C8, C1, 5)
4354        || reg->value == CPENS (4, C8, C1, 0)
4355        || reg->value == CPENS (4, C8, C1, 4)
4356        || reg->value == CPENS (6, C8, C1, 0)
4357        || reg->value == CPENS (0, C8, C6, 1)
4358        || reg->value == CPENS (0, C8, C6, 3)
4359        || reg->value == CPENS (0, C8, C6, 5)
4360        || reg->value == CPENS (0, C8, C6, 7)
4361        || reg->value == CPENS (0, C8, C2, 1)
4362        || reg->value == CPENS (0, C8, C2, 3)
4363        || reg->value == CPENS (0, C8, C2, 5)
4364        || reg->value == CPENS (0, C8, C2, 7)
4365        || reg->value == CPENS (0, C8, C5, 1)
4366        || reg->value == CPENS (0, C8, C5, 3)
4367        || reg->value == CPENS (0, C8, C5, 5)
4368        || reg->value == CPENS (0, C8, C5, 7)
4369        || reg->value == CPENS (4, C8, C0, 2)
4370        || reg->value == CPENS (4, C8, C0, 6)
4371        || reg->value == CPENS (4, C8, C4, 2)
4372        || reg->value == CPENS (4, C8, C4, 6)
4373        || reg->value == CPENS (4, C8, C4, 3)
4374        || reg->value == CPENS (4, C8, C4, 7)
4375        || reg->value == CPENS (4, C8, C6, 1)
4376        || reg->value == CPENS (4, C8, C6, 5)
4377        || reg->value == CPENS (4, C8, C2, 1)
4378        || reg->value == CPENS (4, C8, C2, 5)
4379        || reg->value == CPENS (4, C8, C5, 1)
4380        || reg->value == CPENS (4, C8, C5, 5)
4381        || reg->value == CPENS (6, C8, C6, 1)
4382        || reg->value == CPENS (6, C8, C6, 5)
4383        || reg->value == CPENS (6, C8, C2, 1)
4384        || reg->value == CPENS (6, C8, C2, 5)
4385        || reg->value == CPENS (6, C8, C5, 1)
4386        || reg->value == CPENS (6, C8, C5, 5))
4387       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4388     return FALSE;
4389 
4390   /* Random Number Instructions.  For now they are available
4391      (and optional) only with ARMv8.5-A.  */
4392   if ((reg->value == CPENC (3, 3, C2, C4, 0)
4393        || reg->value == CPENC (3, 3, C2, C4, 1))
4394       && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4395 	   && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4396     return FALSE;
4397 
4398   /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG.  */
4399   if ((reg->value == CPENC (3, 3, C4, C2, 7)
4400        || reg->value == CPENC (3, 0, C6, C6, 1)
4401        || reg->value == CPENC (3, 0, C6, C5, 0)
4402        || reg->value == CPENC (3, 4, C6, C5, 0)
4403        || reg->value == CPENC (3, 6, C6, C6, 0)
4404        || reg->value == CPENC (3, 5, C6, C6, 0)
4405        || reg->value == CPENC (3, 0, C1, C0, 5)
4406        || reg->value == CPENC (3, 0, C1, C0, 6))
4407       && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)))
4408     return FALSE;
4409 
4410   return TRUE;
4411 }
4412 
4413 /* The CPENC below is fairly misleading, the fields
4414    here are not in CPENC form. They are in op2op1 form. The fields are encoded
4415    by ins_pstatefield, which just shifts the value by the width of the fields
4416    in a loop. So if you CPENC them only the first value will be set, the rest
4417    are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4418    value of 0b110000000001000000 (0x30040) while what you want is
4419    0b011010 (0x1a).  */
4420 const aarch64_sys_reg aarch64_pstatefields [] =
4421 {
4422   { "spsel",            0x05,	0 },
4423   { "daifset",          0x1e,	0 },
4424   { "daifclr",          0x1f,	0 },
4425   { "pan",		0x04,	F_ARCHEXT },
4426   { "uao",		0x03,	F_ARCHEXT },
4427   { "ssbs",		0x19,   F_ARCHEXT },
4428   { "dit",		0x1a,	F_ARCHEXT },
4429   { "tco",		0x1c,	F_ARCHEXT },
4430   { 0,          CPENC(0,0,0,0,0), 0 },
4431 };
4432 
4433 bfd_boolean
4434 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4435 				 const aarch64_sys_reg *reg)
4436 {
4437   if (!(reg->flags & F_ARCHEXT))
4438     return TRUE;
4439 
4440   /* PAN.  Values are from aarch64_pstatefields.  */
4441   if (reg->value == 0x04
4442       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4443     return FALSE;
4444 
4445   /* UAO.  Values are from aarch64_pstatefields.  */
4446   if (reg->value == 0x03
4447       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4448     return FALSE;
4449 
4450   /* SSBS.  Values are from aarch64_pstatefields.  */
4451   if (reg->value == 0x19
4452       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4453     return FALSE;
4454 
4455   /* DIT.  Values are from aarch64_pstatefields.  */
4456   if (reg->value == 0x1a
4457       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4458     return FALSE;
4459 
4460   /* TCO.  Values are from aarch64_pstatefields.  */
4461   if (reg->value == 0x1c
4462       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4463     return FALSE;
4464 
4465   return TRUE;
4466 }
4467 
4468 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4469 {
4470     { "ialluis", CPENS(0,C7,C1,0), 0 },
4471     { "iallu",   CPENS(0,C7,C5,0), 0 },
4472     { "ivau",    CPENS (3, C7, C5, 1), F_HASXT },
4473     { 0, CPENS(0,0,0,0), 0 }
4474 };
4475 
4476 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4477 {
4478     { "zva",	    CPENS (3, C7, C4, 1),  F_HASXT },
4479     { "gva",	    CPENS (3, C7, C4, 3),  F_HASXT | F_ARCHEXT },
4480     { "gzva",	    CPENS (3, C7, C4, 4),  F_HASXT | F_ARCHEXT },
4481     { "ivac",       CPENS (0, C7, C6, 1),  F_HASXT },
4482     { "igvac",      CPENS (0, C7, C6, 3),  F_HASXT | F_ARCHEXT },
4483     { "igsw",       CPENS (0, C7, C6, 4),  F_HASXT | F_ARCHEXT },
4484     { "isw",	    CPENS (0, C7, C6, 2),  F_HASXT },
4485     { "igdvac",	    CPENS (0, C7, C6, 5),  F_HASXT | F_ARCHEXT },
4486     { "igdsw",	    CPENS (0, C7, C6, 6),  F_HASXT | F_ARCHEXT },
4487     { "cvac",       CPENS (3, C7, C10, 1), F_HASXT },
4488     { "cgvac",      CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4489     { "cgdvac",     CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4490     { "csw",	    CPENS (0, C7, C10, 2), F_HASXT },
4491     { "cgsw",       CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4492     { "cgdsw",	    CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4493     { "cvau",       CPENS (3, C7, C11, 1), F_HASXT },
4494     { "cvap",       CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4495     { "cgvap",      CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4496     { "cgdvap",     CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4497     { "cvadp",      CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4498     { "cgvadp",     CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4499     { "cgdvadp",    CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4500     { "civac",      CPENS (3, C7, C14, 1), F_HASXT },
4501     { "cigvac",     CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4502     { "cigdvac",    CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4503     { "cisw",       CPENS (0, C7, C14, 2), F_HASXT },
4504     { "cigsw",      CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4505     { "cigdsw",     CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4506     { 0,       CPENS(0,0,0,0), 0 }
4507 };
4508 
4509 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4510 {
4511     { "s1e1r",      CPENS (0, C7, C8, 0), F_HASXT },
4512     { "s1e1w",      CPENS (0, C7, C8, 1), F_HASXT },
4513     { "s1e0r",      CPENS (0, C7, C8, 2), F_HASXT },
4514     { "s1e0w",      CPENS (0, C7, C8, 3), F_HASXT },
4515     { "s12e1r",     CPENS (4, C7, C8, 4), F_HASXT },
4516     { "s12e1w",     CPENS (4, C7, C8, 5), F_HASXT },
4517     { "s12e0r",     CPENS (4, C7, C8, 6), F_HASXT },
4518     { "s12e0w",     CPENS (4, C7, C8, 7), F_HASXT },
4519     { "s1e2r",      CPENS (4, C7, C8, 0), F_HASXT },
4520     { "s1e2w",      CPENS (4, C7, C8, 1), F_HASXT },
4521     { "s1e3r",      CPENS (6, C7, C8, 0), F_HASXT },
4522     { "s1e3w",      CPENS (6, C7, C8, 1), F_HASXT },
4523     { "s1e1rp",     CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4524     { "s1e1wp",     CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4525     { 0,       CPENS(0,0,0,0), 0 }
4526 };
4527 
4528 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4529 {
4530     { "vmalle1",   CPENS(0,C8,C7,0), 0 },
4531     { "vae1",      CPENS (0, C8, C7, 1), F_HASXT },
4532     { "aside1",    CPENS (0, C8, C7, 2), F_HASXT },
4533     { "vaae1",     CPENS (0, C8, C7, 3), F_HASXT },
4534     { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4535     { "vae1is",    CPENS (0, C8, C3, 1), F_HASXT },
4536     { "aside1is",  CPENS (0, C8, C3, 2), F_HASXT },
4537     { "vaae1is",   CPENS (0, C8, C3, 3), F_HASXT },
4538     { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4539     { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4540     { "ipas2e1",   CPENS (4, C8, C4, 1), F_HASXT },
4541     { "ipas2le1",  CPENS (4, C8, C4, 5), F_HASXT },
4542     { "vae2",      CPENS (4, C8, C7, 1), F_HASXT },
4543     { "vae2is",    CPENS (4, C8, C3, 1), F_HASXT },
4544     { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4545     { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4546     { "vae3",      CPENS (6, C8, C7, 1), F_HASXT },
4547     { "vae3is",    CPENS (6, C8, C3, 1), F_HASXT },
4548     { "alle2",     CPENS(4,C8,C7,0), 0 },
4549     { "alle2is",   CPENS(4,C8,C3,0), 0 },
4550     { "alle1",     CPENS(4,C8,C7,4), 0 },
4551     { "alle1is",   CPENS(4,C8,C3,4), 0 },
4552     { "alle3",     CPENS(6,C8,C7,0), 0 },
4553     { "alle3is",   CPENS(6,C8,C3,0), 0 },
4554     { "vale1is",   CPENS (0, C8, C3, 5), F_HASXT },
4555     { "vale2is",   CPENS (4, C8, C3, 5), F_HASXT },
4556     { "vale3is",   CPENS (6, C8, C3, 5), F_HASXT },
4557     { "vaale1is",  CPENS (0, C8, C3, 7), F_HASXT },
4558     { "vale1",     CPENS (0, C8, C7, 5), F_HASXT },
4559     { "vale2",     CPENS (4, C8, C7, 5), F_HASXT },
4560     { "vale3",     CPENS (6, C8, C7, 5), F_HASXT },
4561     { "vaale1",    CPENS (0, C8, C7, 7), F_HASXT },
4562 
4563     { "vmalle1os",    CPENS (0, C8, C1, 0), F_ARCHEXT },
4564     { "vae1os",       CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4565     { "aside1os",     CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4566     { "vaae1os",      CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4567     { "vale1os",      CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4568     { "vaale1os",     CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4569     { "ipas2e1os",    CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4570     { "ipas2le1os",   CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4571     { "vae2os",       CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4572     { "vale2os",      CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4573     { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4574     { "vae3os",       CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4575     { "vale3os",      CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4576     { "alle2os",      CPENS (4, C8, C1, 0), F_ARCHEXT },
4577     { "alle1os",      CPENS (4, C8, C1, 4), F_ARCHEXT },
4578     { "alle3os",      CPENS (6, C8, C1, 0), F_ARCHEXT },
4579 
4580     { "rvae1",      CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4581     { "rvaae1",     CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4582     { "rvale1",     CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4583     { "rvaale1",    CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4584     { "rvae1is",    CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4585     { "rvaae1is",   CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4586     { "rvale1is",   CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4587     { "rvaale1is",  CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4588     { "rvae1os",    CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4589     { "rvaae1os",   CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4590     { "rvale1os",   CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4591     { "rvaale1os",  CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4592     { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4593     { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4594     { "ripas2e1",   CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4595     { "ripas2le1",  CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4596     { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4597     { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4598     { "rvae2",      CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4599     { "rvale2",     CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4600     { "rvae2is",    CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4601     { "rvale2is",   CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4602     { "rvae2os",    CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4603     { "rvale2os",   CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4604     { "rvae3",      CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4605     { "rvale3",     CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4606     { "rvae3is",    CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4607     { "rvale3is",   CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4608     { "rvae3os",    CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4609     { "rvale3os",   CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4610 
4611     { 0,       CPENS(0,0,0,0), 0 }
4612 };
4613 
4614 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4615 {
4616     /* RCTX is somewhat unique in a way that it has different values
4617        (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4618        Thus op2 is masked out and instead encoded directly in the
4619        aarch64_opcode_table entries for the respective instructions.  */
4620     { "rctx",   CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4621 
4622     { 0,       CPENS(0,0,0,0), 0 }
4623 };
4624 
4625 bfd_boolean
4626 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4627 {
4628   return (sys_ins_reg->flags & F_HASXT) != 0;
4629 }
4630 
4631 extern bfd_boolean
4632 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4633 				 const aarch64_sys_ins_reg *reg)
4634 {
4635   if (!(reg->flags & F_ARCHEXT))
4636     return TRUE;
4637 
4638   /* DC CVAP.  Values are from aarch64_sys_regs_dc.  */
4639   if (reg->value == CPENS (3, C7, C12, 1)
4640       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4641     return FALSE;
4642 
4643   /* DC CVADP.  Values are from aarch64_sys_regs_dc.  */
4644   if (reg->value == CPENS (3, C7, C13, 1)
4645       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4646     return FALSE;
4647 
4648   /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension.  */
4649   if ((reg->value == CPENS (0, C7, C6, 3)
4650        || reg->value == CPENS (0, C7, C6, 4)
4651        || reg->value == CPENS (0, C7, C10, 4)
4652        || reg->value == CPENS (0, C7, C14, 4)
4653        || reg->value == CPENS (3, C7, C10, 3)
4654        || reg->value == CPENS (3, C7, C12, 3)
4655        || reg->value == CPENS (3, C7, C13, 3)
4656        || reg->value == CPENS (3, C7, C14, 3)
4657        || reg->value == CPENS (3, C7, C4, 3)
4658        || reg->value == CPENS (0, C7, C6, 5)
4659        || reg->value == CPENS (0, C7, C6, 6)
4660        || reg->value == CPENS (0, C7, C10, 6)
4661        || reg->value == CPENS (0, C7, C14, 6)
4662        || reg->value == CPENS (3, C7, C10, 5)
4663        || reg->value == CPENS (3, C7, C12, 5)
4664        || reg->value == CPENS (3, C7, C13, 5)
4665        || reg->value == CPENS (3, C7, C14, 5)
4666        || reg->value == CPENS (3, C7, C4, 4))
4667       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4668     return FALSE;
4669 
4670   /* AT S1E1RP, AT S1E1WP.  Values are from aarch64_sys_regs_at.  */
4671   if ((reg->value == CPENS (0, C7, C9, 0)
4672        || reg->value == CPENS (0, C7, C9, 1))
4673       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4674     return FALSE;
4675 
4676   /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4677   if (reg->value == CPENS (3, C7, C3, 0)
4678       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4679     return FALSE;
4680 
4681   return TRUE;
4682 }
4683 
4684 #undef C0
4685 #undef C1
4686 #undef C2
4687 #undef C3
4688 #undef C4
4689 #undef C5
4690 #undef C6
4691 #undef C7
4692 #undef C8
4693 #undef C9
4694 #undef C10
4695 #undef C11
4696 #undef C12
4697 #undef C13
4698 #undef C14
4699 #undef C15
4700 
4701 #define BIT(INSN,BT)     (((INSN) >> (BT)) & 1)
4702 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4703 
4704 static enum err_type
4705 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4706 	      const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4707 	      bfd_boolean encoding ATTRIBUTE_UNUSED,
4708 	      aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4709 	      aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4710 {
4711   int t  = BITS (insn, 4, 0);
4712   int n  = BITS (insn, 9, 5);
4713   int t2 = BITS (insn, 14, 10);
4714 
4715   if (BIT (insn, 23))
4716     {
4717       /* Write back enabled.  */
4718       if ((t == n || t2 == n) && n != 31)
4719 	return ERR_UND;
4720     }
4721 
4722   if (BIT (insn, 22))
4723     {
4724       /* Load */
4725       if (t == t2)
4726 	return ERR_UND;
4727     }
4728 
4729   return ERR_OK;
4730 }
4731 
4732 /* Verifier for vector by element 3 operands functions where the
4733    conditions `if sz:L == 11 then UNDEFINED` holds.  */
4734 
4735 static enum err_type
4736 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4737 		bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4738 		aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4739 		aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4740 {
4741   const aarch64_insn undef_pattern = 0x3;
4742   aarch64_insn value;
4743 
4744   assert (inst->opcode);
4745   assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4746   value = encoding ? inst->value : insn;
4747   assert (value);
4748 
4749   if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4750     return ERR_UND;
4751 
4752   return ERR_OK;
4753 }
4754 
4755 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4756    If INST is NULL the given insn_sequence is cleared and the sequence is left
4757    uninitialized.  */
4758 
4759 void
4760 init_insn_sequence (const struct aarch64_inst *inst,
4761 		    aarch64_instr_sequence *insn_sequence)
4762 {
4763   int num_req_entries = 0;
4764   insn_sequence->next_insn = 0;
4765   insn_sequence->num_insns = num_req_entries;
4766   if (insn_sequence->instr)
4767     XDELETE (insn_sequence->instr);
4768   insn_sequence->instr = NULL;
4769 
4770   if (inst)
4771     {
4772       insn_sequence->instr = XNEW (aarch64_inst);
4773       memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4774     }
4775 
4776   /* Handle all the cases here.  May need to think of something smarter than
4777      a giant if/else chain if this grows.  At that time, a lookup table may be
4778      best.  */
4779   if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4780     num_req_entries = 1;
4781 
4782   if (insn_sequence->current_insns)
4783     XDELETEVEC (insn_sequence->current_insns);
4784   insn_sequence->current_insns = NULL;
4785 
4786   if (num_req_entries != 0)
4787     {
4788       size_t size = num_req_entries * sizeof (aarch64_inst);
4789       insn_sequence->current_insns
4790 	= (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4791       memset (insn_sequence->current_insns, 0, size);
4792     }
4793 }
4794 
4795 
4796 /*  This function verifies that the instruction INST adheres to its specified
4797     constraints.  If it does then ERR_OK is returned, if not then ERR_VFI is
4798     returned and MISMATCH_DETAIL contains the reason why verification failed.
4799 
4800     The function is called both during assembly and disassembly.  If assembling
4801     then ENCODING will be TRUE, else FALSE.  If dissassembling PC will be set
4802     and will contain the PC of the current instruction w.r.t to the section.
4803 
4804     If ENCODING and PC=0 then you are at a start of a section.  The constraints
4805     are verified against the given state insn_sequence which is updated as it
4806     transitions through the verification.  */
4807 
4808 enum err_type
4809 verify_constraints (const struct aarch64_inst *inst,
4810 		    const aarch64_insn insn ATTRIBUTE_UNUSED,
4811 		    bfd_vma pc,
4812 		    bfd_boolean encoding,
4813 		    aarch64_operand_error *mismatch_detail,
4814 		    aarch64_instr_sequence *insn_sequence)
4815 {
4816   assert (inst);
4817   assert (inst->opcode);
4818 
4819   const struct aarch64_opcode *opcode = inst->opcode;
4820   if (!opcode->constraints && !insn_sequence->instr)
4821     return ERR_OK;
4822 
4823   assert (insn_sequence);
4824 
4825   enum err_type res = ERR_OK;
4826 
4827   /* This instruction puts a constraint on the insn_sequence.  */
4828   if (opcode->flags & F_SCAN)
4829     {
4830       if (insn_sequence->instr)
4831 	{
4832 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4833 	  mismatch_detail->error = _("instruction opens new dependency "
4834 				     "sequence without ending previous one");
4835 	  mismatch_detail->index = -1;
4836 	  mismatch_detail->non_fatal = TRUE;
4837 	  res = ERR_VFI;
4838 	}
4839 
4840       init_insn_sequence (inst, insn_sequence);
4841       return res;
4842     }
4843 
4844   /* Verify constraints on an existing sequence.  */
4845   if (insn_sequence->instr)
4846     {
4847       const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4848       /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4849 	 closed a previous one that we should have.  */
4850       if (!encoding && pc == 0)
4851 	{
4852 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4853 	  mismatch_detail->error = _("previous `movprfx' sequence not closed");
4854 	  mismatch_detail->index = -1;
4855 	  mismatch_detail->non_fatal = TRUE;
4856 	  res = ERR_VFI;
4857 	  /* Reset the sequence.  */
4858 	  init_insn_sequence (NULL, insn_sequence);
4859 	  return res;
4860 	}
4861 
4862       /* Validate C_SCAN_MOVPRFX constraints.  Move this to a lookup table.  */
4863       if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4864 	{
4865 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4866 	     instruction for better error messages.  */
4867 	  if (!opcode->avariant || !(*opcode->avariant & AARCH64_FEATURE_SVE))
4868 	    {
4869 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4870 	      mismatch_detail->error = _("SVE instruction expected after "
4871 					 "`movprfx'");
4872 	      mismatch_detail->index = -1;
4873 	      mismatch_detail->non_fatal = TRUE;
4874 	      res = ERR_VFI;
4875 	      goto done;
4876 	    }
4877 
4878 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4879 	     instruction that is allowed to be used with a MOVPRFX.  */
4880 	  if (!(opcode->constraints & C_SCAN_MOVPRFX))
4881 	    {
4882 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4883 	      mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4884 					 "expected");
4885 	      mismatch_detail->index = -1;
4886 	      mismatch_detail->non_fatal = TRUE;
4887 	      res = ERR_VFI;
4888 	      goto done;
4889 	    }
4890 
4891 	  /* Next check for usage of the predicate register.  */
4892 	  aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4893 	  aarch64_opnd_info blk_pred, inst_pred;
4894 	  memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4895 	  memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4896 	  bfd_boolean predicated = FALSE;
4897 	  assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4898 
4899 	  /* Determine if the movprfx instruction used is predicated or not.  */
4900 	  if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4901 	    {
4902 	      predicated = TRUE;
4903 	      blk_pred = insn_sequence->instr->operands[1];
4904 	    }
4905 
4906 	  unsigned char max_elem_size = 0;
4907 	  unsigned char current_elem_size;
4908 	  int num_op_used = 0, last_op_usage = 0;
4909 	  int i, inst_pred_idx = -1;
4910 	  int num_ops = aarch64_num_of_operands (opcode);
4911 	  for (i = 0; i < num_ops; i++)
4912 	    {
4913 	      aarch64_opnd_info inst_op = inst->operands[i];
4914 	      switch (inst_op.type)
4915 		{
4916 		  case AARCH64_OPND_SVE_Zd:
4917 		  case AARCH64_OPND_SVE_Zm_5:
4918 		  case AARCH64_OPND_SVE_Zm_16:
4919 		  case AARCH64_OPND_SVE_Zn:
4920 		  case AARCH64_OPND_SVE_Zt:
4921 		  case AARCH64_OPND_SVE_Vm:
4922 		  case AARCH64_OPND_SVE_Vn:
4923 		  case AARCH64_OPND_Va:
4924 		  case AARCH64_OPND_Vn:
4925 		  case AARCH64_OPND_Vm:
4926 		  case AARCH64_OPND_Sn:
4927 		  case AARCH64_OPND_Sm:
4928 		  case AARCH64_OPND_Rn:
4929 		  case AARCH64_OPND_Rm:
4930 		  case AARCH64_OPND_Rn_SP:
4931 		  case AARCH64_OPND_Rm_SP:
4932 		    if (inst_op.reg.regno == blk_dest.reg.regno)
4933 		      {
4934 			num_op_used++;
4935 			last_op_usage = i;
4936 		      }
4937 		    current_elem_size
4938 		      = aarch64_get_qualifier_esize (inst_op.qualifier);
4939 		    if (current_elem_size > max_elem_size)
4940 		      max_elem_size = current_elem_size;
4941 		    break;
4942 		  case AARCH64_OPND_SVE_Pd:
4943 		  case AARCH64_OPND_SVE_Pg3:
4944 		  case AARCH64_OPND_SVE_Pg4_5:
4945 		  case AARCH64_OPND_SVE_Pg4_10:
4946 		  case AARCH64_OPND_SVE_Pg4_16:
4947 		  case AARCH64_OPND_SVE_Pm:
4948 		  case AARCH64_OPND_SVE_Pn:
4949 		  case AARCH64_OPND_SVE_Pt:
4950 		    inst_pred = inst_op;
4951 		    inst_pred_idx = i;
4952 		    break;
4953 		  default:
4954 		    break;
4955 		}
4956 	    }
4957 
4958 	   assert (max_elem_size != 0);
4959 	   aarch64_opnd_info inst_dest = inst->operands[0];
4960 	   /* Determine the size that should be used to compare against the
4961 	      movprfx size.  */
4962 	   current_elem_size
4963 	     = opcode->constraints & C_MAX_ELEM
4964 	       ? max_elem_size
4965 	       : aarch64_get_qualifier_esize (inst_dest.qualifier);
4966 
4967 	  /* If movprfx is predicated do some extra checks.  */
4968 	  if (predicated)
4969 	    {
4970 	      /* The instruction must be predicated.  */
4971 	      if (inst_pred_idx < 0)
4972 		{
4973 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4974 		  mismatch_detail->error = _("predicated instruction expected "
4975 					     "after `movprfx'");
4976 		  mismatch_detail->index = -1;
4977 		  mismatch_detail->non_fatal = TRUE;
4978 		  res = ERR_VFI;
4979 		  goto done;
4980 		}
4981 
4982 	      /* The instruction must have a merging predicate.  */
4983 	      if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
4984 		{
4985 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4986 		  mismatch_detail->error = _("merging predicate expected due "
4987 					     "to preceding `movprfx'");
4988 		  mismatch_detail->index = inst_pred_idx;
4989 		  mismatch_detail->non_fatal = TRUE;
4990 		  res = ERR_VFI;
4991 		  goto done;
4992 		}
4993 
4994 	      /* The same register must be used in instruction.  */
4995 	      if (blk_pred.reg.regno != inst_pred.reg.regno)
4996 		{
4997 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4998 		  mismatch_detail->error = _("predicate register differs "
4999 					     "from that in preceding "
5000 					     "`movprfx'");
5001 		  mismatch_detail->index = inst_pred_idx;
5002 		  mismatch_detail->non_fatal = TRUE;
5003 		  res = ERR_VFI;
5004 		  goto done;
5005 		}
5006 	    }
5007 
5008 	  /* Destructive operations by definition must allow one usage of the
5009 	     same register.  */
5010 	  int allowed_usage
5011 	    = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5012 
5013 	  /* Operand is not used at all.  */
5014 	  if (num_op_used == 0)
5015 	    {
5016 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5017 	      mismatch_detail->error = _("output register of preceding "
5018 					 "`movprfx' not used in current "
5019 					 "instruction");
5020 	      mismatch_detail->index = 0;
5021 	      mismatch_detail->non_fatal = TRUE;
5022 	      res = ERR_VFI;
5023 	      goto done;
5024 	    }
5025 
5026 	  /* We now know it's used, now determine exactly where it's used.  */
5027 	  if (blk_dest.reg.regno != inst_dest.reg.regno)
5028 	    {
5029 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5030 	      mismatch_detail->error = _("output register of preceding "
5031 					 "`movprfx' expected as output");
5032 	      mismatch_detail->index = 0;
5033 	      mismatch_detail->non_fatal = TRUE;
5034 	      res = ERR_VFI;
5035 	      goto done;
5036 	    }
5037 
5038 	  /* Operand used more than allowed for the specific opcode type.  */
5039 	  if (num_op_used > allowed_usage)
5040 	    {
5041 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5042 	      mismatch_detail->error = _("output register of preceding "
5043 					 "`movprfx' used as input");
5044 	      mismatch_detail->index = last_op_usage;
5045 	      mismatch_detail->non_fatal = TRUE;
5046 	      res = ERR_VFI;
5047 	      goto done;
5048 	    }
5049 
5050 	  /* Now the only thing left is the qualifiers checks.  The register
5051 	     must have the same maximum element size.  */
5052 	  if (inst_dest.qualifier
5053 	      && blk_dest.qualifier
5054 	      && current_elem_size
5055 		 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5056 	    {
5057 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5058 	      mismatch_detail->error = _("register size not compatible with "
5059 					 "previous `movprfx'");
5060 	      mismatch_detail->index = 0;
5061 	      mismatch_detail->non_fatal = TRUE;
5062 	      res = ERR_VFI;
5063 	      goto done;
5064 	    }
5065 	}
5066 
5067 done:
5068       /* Add the new instruction to the sequence.  */
5069       memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5070 	      inst, sizeof (aarch64_inst));
5071 
5072       /* Check if sequence is now full.  */
5073       if (insn_sequence->next_insn >= insn_sequence->num_insns)
5074 	{
5075 	  /* Sequence is full, but we don't have anything special to do for now,
5076 	     so clear and reset it.  */
5077 	  init_insn_sequence (NULL, insn_sequence);
5078 	}
5079     }
5080 
5081   return res;
5082 }
5083 
5084 
5085 /* Return true if VALUE cannot be moved into an SVE register using DUP
5086    (with any element size, not just ESIZE) and if using DUPM would
5087    therefore be OK.  ESIZE is the number of bytes in the immediate.  */
5088 
5089 bfd_boolean
5090 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5091 {
5092   int64_t svalue = uvalue;
5093   uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5094 
5095   if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5096     return FALSE;
5097   if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5098     {
5099       svalue = (int32_t) uvalue;
5100       if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5101 	{
5102 	  svalue = (int16_t) uvalue;
5103 	  if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5104 	    return FALSE;
5105 	}
5106     }
5107   if ((svalue & 0xff) == 0)
5108     svalue /= 256;
5109   return svalue < -128 || svalue >= 128;
5110 }
5111 
5112 /* Include the opcode description table as well as the operand description
5113    table.  */
5114 #define VERIFIER(x) verify_##x
5115 #include "aarch64-tbl.h"
5116