xref: /netbsd-src/external/gpl3/gdb/dist/opcodes/aarch64-opc.c (revision a04395531661c5e8d314125d5ae77d4cbedd5d73)
1 /* aarch64-opc.c -- AArch64 opcode support.
2    Copyright (C) 2009-2020 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28 
29 #include "opintl.h"
30 #include "libiberty.h"
31 
32 #include "aarch64-opc.h"
33 
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37 
38 /* The enumeration strings associated with each value of a 5-bit SVE
39    pattern operand.  A null entry indicates a reserved meaning.  */
40 const char *const aarch64_sve_pattern_array[32] = {
41   /* 0-7.  */
42   "pow2",
43   "vl1",
44   "vl2",
45   "vl3",
46   "vl4",
47   "vl5",
48   "vl6",
49   "vl7",
50   /* 8-15.  */
51   "vl8",
52   "vl16",
53   "vl32",
54   "vl64",
55   "vl128",
56   "vl256",
57   0,
58   0,
59   /* 16-23.  */
60   0,
61   0,
62   0,
63   0,
64   0,
65   0,
66   0,
67   0,
68   /* 24-31.  */
69   0,
70   0,
71   0,
72   0,
73   0,
74   "mul4",
75   "mul3",
76   "all"
77 };
78 
79 /* The enumeration strings associated with each value of a 4-bit SVE
80    prefetch operand.  A null entry indicates a reserved meaning.  */
81 const char *const aarch64_sve_prfop_array[16] = {
82   /* 0-7.  */
83   "pldl1keep",
84   "pldl1strm",
85   "pldl2keep",
86   "pldl2strm",
87   "pldl3keep",
88   "pldl3strm",
89   0,
90   0,
91   /* 8-15.  */
92   "pstl1keep",
93   "pstl1strm",
94   "pstl2keep",
95   "pstl2strm",
96   "pstl3keep",
97   "pstl3strm",
98   0,
99   0
100 };
101 
102 /* Helper functions to determine which operand to be used to encode/decode
103    the size:Q fields for AdvSIMD instructions.  */
104 
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108   return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 	  && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 	  : FALSE);
111 }
112 
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116   return ((qualifier >= AARCH64_OPND_QLF_S_B
117 	  && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 	  : FALSE);
119 }
120 
121 enum data_pattern
122 {
123   DP_UNKNOWN,
124   DP_VECTOR_3SAME,
125   DP_VECTOR_LONG,
126   DP_VECTOR_WIDE,
127   DP_VECTOR_ACROSS_LANES,
128 };
129 
130 static const char significant_operand_index [] =
131 {
132   0,	/* DP_UNKNOWN, by default using operand 0.  */
133   0,	/* DP_VECTOR_3SAME */
134   1,	/* DP_VECTOR_LONG */
135   2,	/* DP_VECTOR_WIDE */
136   1,	/* DP_VECTOR_ACROSS_LANES */
137 };
138 
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140    the data pattern.
141    N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142    corresponds to one of a sequence of operands.  */
143 
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147   if (vector_qualifier_p (qualifiers[0]) == TRUE)
148     {
149       /* e.g. v.4s, v.4s, v.4s
150 	   or v.4h, v.4h, v.h[3].  */
151       if (qualifiers[0] == qualifiers[1]
152 	  && vector_qualifier_p (qualifiers[2]) == TRUE
153 	  && (aarch64_get_qualifier_esize (qualifiers[0])
154 	      == aarch64_get_qualifier_esize (qualifiers[1]))
155 	  && (aarch64_get_qualifier_esize (qualifiers[0])
156 	      == aarch64_get_qualifier_esize (qualifiers[2])))
157 	return DP_VECTOR_3SAME;
158       /* e.g. v.8h, v.8b, v.8b.
159            or v.4s, v.4h, v.h[2].
160 	   or v.8h, v.16b.  */
161       if (vector_qualifier_p (qualifiers[1]) == TRUE
162 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 	  && (aarch64_get_qualifier_esize (qualifiers[0])
164 	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 	return DP_VECTOR_LONG;
166       /* e.g. v.8h, v.8h, v.8b.  */
167       if (qualifiers[0] == qualifiers[1]
168 	  && vector_qualifier_p (qualifiers[2]) == TRUE
169 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 	  && (aarch64_get_qualifier_esize (qualifiers[0])
171 	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 	  && (aarch64_get_qualifier_esize (qualifiers[0])
173 	      == aarch64_get_qualifier_esize (qualifiers[1])))
174 	return DP_VECTOR_WIDE;
175     }
176   else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177     {
178       /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
179       if (vector_qualifier_p (qualifiers[1]) == TRUE
180 	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 	return DP_VECTOR_ACROSS_LANES;
182     }
183 
184   return DP_UNKNOWN;
185 }
186 
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188    the AdvSIMD instructions.  */
189 /* N.B. it is possible to do some optimization that doesn't call
190    get_data_pattern each time when we need to select an operand.  We can
191    either buffer the caculated the result or statically generate the data,
192    however, it is not obvious that the optimization will bring significant
193    benefit.  */
194 
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198   return
199     significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 
202 const aarch64_field fields[] =
203 {
204     {  0,  0 },	/* NIL.  */
205     {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
206     {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
207     {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
208     { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
209     {  5, 19 },	/* imm19: e.g. in CBZ.  */
210     {  5, 19 },	/* immhi: e.g. in ADRP.  */
211     { 29,  2 },	/* immlo: e.g. in ADRP.  */
212     { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
213     { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
214     { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
215     { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
216     {  0,  5 },	/* Rt: in load/store instructions.  */
217     {  0,  5 },	/* Rd: in many integer instructions.  */
218     {  5,  5 },	/* Rn: in many integer instructions.  */
219     { 10,  5 },	/* Rt2: in load/store pair instructions.  */
220     { 10,  5 },	/* Ra: in fp instructions.  */
221     {  5,  3 },	/* op2: in the system instructions.  */
222     {  8,  4 },	/* CRm: in the system instructions.  */
223     { 12,  4 },	/* CRn: in the system instructions.  */
224     { 16,  3 },	/* op1: in the system instructions.  */
225     { 19,  2 },	/* op0: in the system instructions.  */
226     { 10,  3 },	/* imm3: in add/sub extended reg instructions.  */
227     { 12,  4 },	/* cond: condition flags as a source operand.  */
228     { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
229     { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
230     { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
231     { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
232     { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
233     { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
234     { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
235     { 12,  1 },	/* S: in load/store reg offset instructions.  */
236     { 21,  2 },	/* hw: in move wide constant instructions.  */
237     { 22,  2 },	/* opc: in load/store reg offset instructions.  */
238     { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
239     { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
240     { 22,  2 },	/* type: floating point type field in fp data inst.  */
241     { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
242     { 10,  6 },	/* imm6: in add/sub reg shifted instructions.  */
243     { 15,  6 },	/* imm6_2: in rmif instructions.  */
244     { 11,  4 },	/* imm4: in advsimd ext and advsimd ins instructions.  */
245     {  0,  4 },	/* imm4_2: in rmif instructions.  */
246     { 10,  4 },	/* imm4_3: in adddg/subg instructions.  */
247     { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
248     { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
249     { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
250     { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
251     { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
252     {  5, 14 },	/* imm14: in test bit and branch instructions.  */
253     {  5, 16 },	/* imm16: in exception instructions.  */
254     {  0, 16 },	/* imm16_2: in udf instruction. */
255     {  0, 26 },	/* imm26: in unconditional branch instructions.  */
256     { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
257     { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
258     { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
259     { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
260     { 22,  1 },	/* S: in LDRAA and LDRAB instructions.  */
261     { 22,  1 },	/* N: in logical (immediate) instructions.  */
262     { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
263     { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
264     { 31,  1 },	/* sf: in integer data processing instructions.  */
265     { 30,  1 },	/* lse_size: in LSE extension atomic instructions.  */
266     { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
267     { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
268     { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
269     { 31,  1 },	/* b5: in the test bit and branch instructions.  */
270     { 19,  5 },	/* b40: in the test bit and branch instructions.  */
271     { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
272     {  4,  1 }, /* SVE_M_4: Merge/zero select, bit 4.  */
273     { 14,  1 }, /* SVE_M_14: Merge/zero select, bit 14.  */
274     { 16,  1 }, /* SVE_M_16: Merge/zero select, bit 16.  */
275     { 17,  1 }, /* SVE_N: SVE equivalent of N.  */
276     {  0,  4 }, /* SVE_Pd: p0-p15, bits [3,0].  */
277     { 10,  3 }, /* SVE_Pg3: p0-p7, bits [12,10].  */
278     {  5,  4 }, /* SVE_Pg4_5: p0-p15, bits [8,5].  */
279     { 10,  4 }, /* SVE_Pg4_10: p0-p15, bits [13,10].  */
280     { 16,  4 }, /* SVE_Pg4_16: p0-p15, bits [19,16].  */
281     { 16,  4 }, /* SVE_Pm: p0-p15, bits [19,16].  */
282     {  5,  4 }, /* SVE_Pn: p0-p15, bits [8,5].  */
283     {  0,  4 }, /* SVE_Pt: p0-p15, bits [3,0].  */
284     {  5,  5 }, /* SVE_Rm: SVE alternative position for Rm.  */
285     { 16,  5 }, /* SVE_Rn: SVE alternative position for Rn.  */
286     {  0,  5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0].  */
287     {  5,  5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5].  */
288     {  5,  5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5].  */
289     {  5,  5 }, /* SVE_Za_5: SVE vector register, bits [9,5].  */
290     { 16,  5 }, /* SVE_Za_16: SVE vector register, bits [20,16].  */
291     {  0,  5 }, /* SVE_Zd: SVE vector register. bits [4,0].  */
292     {  5,  5 }, /* SVE_Zm_5: SVE vector register, bits [9,5].  */
293     { 16,  5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
294     {  5,  5 }, /* SVE_Zn: SVE vector register, bits [9,5].  */
295     {  0,  5 }, /* SVE_Zt: SVE vector register, bits [4,0].  */
296     {  5,  1 }, /* SVE_i1: single-bit immediate.  */
297     { 22,  1 }, /* SVE_i3h: high bit of 3-bit immediate.  */
298     { 11,  1 }, /* SVE_i3l: low bit of 3-bit immediate.  */
299     { 19,  2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19].  */
300     { 20,  1 }, /* SVE_i2h: high bit of 2bit immediate, bits.  */
301     { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
302     { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
303     {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
304     { 16,  5 }, /* SVE_imm5b: secondary 5-bit immediate field.  */
305     { 16,  6 }, /* SVE_imm6: 6-bit immediate field.  */
306     { 14,  7 }, /* SVE_imm7: 7-bit immediate field.  */
307     {  5,  8 }, /* SVE_imm8: 8-bit immediate field.  */
308     {  5,  9 }, /* SVE_imm9: 9-bit immediate field.  */
309     { 11,  6 }, /* SVE_immr: SVE equivalent of immr.  */
310     {  5,  6 }, /* SVE_imms: SVE equivalent of imms.  */
311     { 10,  2 }, /* SVE_msz: 2-bit shift amount for ADR.  */
312     {  5,  5 }, /* SVE_pattern: vector pattern enumeration.  */
313     {  0,  4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
314     { 16,  1 }, /* SVE_rot1: 1-bit rotation amount.  */
315     { 10,  2 }, /* SVE_rot2: 2-bit rotation amount.  */
316     { 10,  1 }, /* SVE_rot3: 1-bit rotation amount at bit 10.  */
317     { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
318     { 17,  2 }, /* SVE_size: 2-bit element size, bits [18,17].  */
319     { 30,  1 }, /* SVE_sz2: 1-bit element size select.  */
320     { 16,  4 }, /* SVE_tsz: triangular size select.  */
321     { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
322     {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
323     { 19,  2 }, /* SVE_tszl_19: triangular size select low, bits [20,19].  */
324     { 14,  1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14).  */
325     { 22,  1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22).  */
326     { 11,  2 }, /* rotate1: FCMLA immediate rotate.  */
327     { 13,  2 }, /* rotate2: Indexed element FCMLA immediate rotate.  */
328     { 12,  1 }, /* rotate3: FCADD immediate rotate.  */
329     { 12,  2 }, /* SM3: Indexed element SM3 2 bits index immediate.  */
330     { 22,  1 }, /* sz: 1-bit element size select.  */
331 };
332 
333 enum aarch64_operand_class
334 aarch64_get_operand_class (enum aarch64_opnd type)
335 {
336   return aarch64_operands[type].op_class;
337 }
338 
339 const char *
340 aarch64_get_operand_name (enum aarch64_opnd type)
341 {
342   return aarch64_operands[type].name;
343 }
344 
345 /* Get operand description string.
346    This is usually for the diagnosis purpose.  */
347 const char *
348 aarch64_get_operand_desc (enum aarch64_opnd type)
349 {
350   return aarch64_operands[type].desc;
351 }
352 
353 /* Table of all conditional affixes.  */
354 const aarch64_cond aarch64_conds[16] =
355 {
356   {{"eq", "none"}, 0x0},
357   {{"ne", "any"}, 0x1},
358   {{"cs", "hs", "nlast"}, 0x2},
359   {{"cc", "lo", "ul", "last"}, 0x3},
360   {{"mi", "first"}, 0x4},
361   {{"pl", "nfrst"}, 0x5},
362   {{"vs"}, 0x6},
363   {{"vc"}, 0x7},
364   {{"hi", "pmore"}, 0x8},
365   {{"ls", "plast"}, 0x9},
366   {{"ge", "tcont"}, 0xa},
367   {{"lt", "tstop"}, 0xb},
368   {{"gt"}, 0xc},
369   {{"le"}, 0xd},
370   {{"al"}, 0xe},
371   {{"nv"}, 0xf},
372 };
373 
374 const aarch64_cond *
375 get_cond_from_value (aarch64_insn value)
376 {
377   assert (value < 16);
378   return &aarch64_conds[(unsigned int) value];
379 }
380 
381 const aarch64_cond *
382 get_inverted_cond (const aarch64_cond *cond)
383 {
384   return &aarch64_conds[cond->value ^ 0x1];
385 }
386 
387 /* Table describing the operand extension/shifting operators; indexed by
388    enum aarch64_modifier_kind.
389 
390    The value column provides the most common values for encoding modifiers,
391    which enables table-driven encoding/decoding for the modifiers.  */
392 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
393 {
394     {"none", 0x0},
395     {"msl",  0x0},
396     {"ror",  0x3},
397     {"asr",  0x2},
398     {"lsr",  0x1},
399     {"lsl",  0x0},
400     {"uxtb", 0x0},
401     {"uxth", 0x1},
402     {"uxtw", 0x2},
403     {"uxtx", 0x3},
404     {"sxtb", 0x4},
405     {"sxth", 0x5},
406     {"sxtw", 0x6},
407     {"sxtx", 0x7},
408     {"mul", 0x0},
409     {"mul vl", 0x0},
410     {NULL, 0},
411 };
412 
413 enum aarch64_modifier_kind
414 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
415 {
416   return desc - aarch64_operand_modifiers;
417 }
418 
419 aarch64_insn
420 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
421 {
422   return aarch64_operand_modifiers[kind].value;
423 }
424 
425 enum aarch64_modifier_kind
426 aarch64_get_operand_modifier_from_value (aarch64_insn value,
427 					 bfd_boolean extend_p)
428 {
429   if (extend_p == TRUE)
430     return AARCH64_MOD_UXTB + value;
431   else
432     return AARCH64_MOD_LSL - value;
433 }
434 
435 bfd_boolean
436 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
437 {
438   return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
439     ? TRUE : FALSE;
440 }
441 
442 static inline bfd_boolean
443 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
444 {
445   return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
446     ? TRUE : FALSE;
447 }
448 
449 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
450 {
451     { "#0x00", 0x0 },
452     { "oshld", 0x1 },
453     { "oshst", 0x2 },
454     { "osh",   0x3 },
455     { "#0x04", 0x4 },
456     { "nshld", 0x5 },
457     { "nshst", 0x6 },
458     { "nsh",   0x7 },
459     { "#0x08", 0x8 },
460     { "ishld", 0x9 },
461     { "ishst", 0xa },
462     { "ish",   0xb },
463     { "#0x0c", 0xc },
464     { "ld",    0xd },
465     { "st",    0xe },
466     { "sy",    0xf },
467 };
468 
469 /* Table describing the operands supported by the aliases of the HINT
470    instruction.
471 
472    The name column is the operand that is accepted for the alias.  The value
473    column is the hint number of the alias.  The list of operands is terminated
474    by NULL in the name column.  */
475 
476 const struct aarch64_name_value_pair aarch64_hint_options[] =
477 {
478   /* BTI.  This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET.  */
479   { " ",	HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
480   { "csync",	HINT_OPD_CSYNC },	/* PSB CSYNC.  */
481   { "c",	HINT_OPD_C },		/* BTI C.  */
482   { "j",	HINT_OPD_J },		/* BTI J.  */
483   { "jc",	HINT_OPD_JC },		/* BTI JC.  */
484   { NULL,	HINT_OPD_NULL },
485 };
486 
487 /* op -> op:       load = 0 instruction = 1 store = 2
488    l  -> level:    1-3
489    t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
490 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
491 const struct aarch64_name_value_pair aarch64_prfops[32] =
492 {
493   { "pldl1keep", B(0, 1, 0) },
494   { "pldl1strm", B(0, 1, 1) },
495   { "pldl2keep", B(0, 2, 0) },
496   { "pldl2strm", B(0, 2, 1) },
497   { "pldl3keep", B(0, 3, 0) },
498   { "pldl3strm", B(0, 3, 1) },
499   { NULL, 0x06 },
500   { NULL, 0x07 },
501   { "plil1keep", B(1, 1, 0) },
502   { "plil1strm", B(1, 1, 1) },
503   { "plil2keep", B(1, 2, 0) },
504   { "plil2strm", B(1, 2, 1) },
505   { "plil3keep", B(1, 3, 0) },
506   { "plil3strm", B(1, 3, 1) },
507   { NULL, 0x0e },
508   { NULL, 0x0f },
509   { "pstl1keep", B(2, 1, 0) },
510   { "pstl1strm", B(2, 1, 1) },
511   { "pstl2keep", B(2, 2, 0) },
512   { "pstl2strm", B(2, 2, 1) },
513   { "pstl3keep", B(2, 3, 0) },
514   { "pstl3strm", B(2, 3, 1) },
515   { NULL, 0x16 },
516   { NULL, 0x17 },
517   { NULL, 0x18 },
518   { NULL, 0x19 },
519   { NULL, 0x1a },
520   { NULL, 0x1b },
521   { NULL, 0x1c },
522   { NULL, 0x1d },
523   { NULL, 0x1e },
524   { NULL, 0x1f },
525 };
526 #undef B
527 
528 /* Utilities on value constraint.  */
529 
530 static inline int
531 value_in_range_p (int64_t value, int low, int high)
532 {
533   return (value >= low && value <= high) ? 1 : 0;
534 }
535 
536 /* Return true if VALUE is a multiple of ALIGN.  */
537 static inline int
538 value_aligned_p (int64_t value, int align)
539 {
540   return (value % align) == 0;
541 }
542 
543 /* A signed value fits in a field.  */
544 static inline int
545 value_fit_signed_field_p (int64_t value, unsigned width)
546 {
547   assert (width < 32);
548   if (width < sizeof (value) * 8)
549     {
550       int64_t lim = (uint64_t) 1 << (width - 1);
551       if (value >= -lim && value < lim)
552 	return 1;
553     }
554   return 0;
555 }
556 
557 /* An unsigned value fits in a field.  */
558 static inline int
559 value_fit_unsigned_field_p (int64_t value, unsigned width)
560 {
561   assert (width < 32);
562   if (width < sizeof (value) * 8)
563     {
564       int64_t lim = (uint64_t) 1 << width;
565       if (value >= 0 && value < lim)
566 	return 1;
567     }
568   return 0;
569 }
570 
571 /* Return 1 if OPERAND is SP or WSP.  */
572 int
573 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
574 {
575   return ((aarch64_get_operand_class (operand->type)
576 	   == AARCH64_OPND_CLASS_INT_REG)
577 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
578 	  && operand->reg.regno == 31);
579 }
580 
581 /* Return 1 if OPERAND is XZR or WZP.  */
582 int
583 aarch64_zero_register_p (const aarch64_opnd_info *operand)
584 {
585   return ((aarch64_get_operand_class (operand->type)
586 	   == AARCH64_OPND_CLASS_INT_REG)
587 	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
588 	  && operand->reg.regno == 31);
589 }
590 
591 /* Return true if the operand *OPERAND that has the operand code
592    OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
593    qualified by the qualifier TARGET.  */
594 
595 static inline int
596 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
597 			  aarch64_opnd_qualifier_t target)
598 {
599   switch (operand->qualifier)
600     {
601     case AARCH64_OPND_QLF_W:
602       if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
603 	return 1;
604       break;
605     case AARCH64_OPND_QLF_X:
606       if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
607 	return 1;
608       break;
609     case AARCH64_OPND_QLF_WSP:
610       if (target == AARCH64_OPND_QLF_W
611 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
612 	return 1;
613       break;
614     case AARCH64_OPND_QLF_SP:
615       if (target == AARCH64_OPND_QLF_X
616 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
617 	return 1;
618       break;
619     default:
620       break;
621     }
622 
623   return 0;
624 }
625 
626 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
627    for operand KNOWN_IDX, return the expected qualifier for operand IDX.
628 
629    Return NIL if more than one expected qualifiers are found.  */
630 
631 aarch64_opnd_qualifier_t
632 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
633 				int idx,
634 				const aarch64_opnd_qualifier_t known_qlf,
635 				int known_idx)
636 {
637   int i, saved_i;
638 
639   /* Special case.
640 
641      When the known qualifier is NIL, we have to assume that there is only
642      one qualifier sequence in the *QSEQ_LIST and return the corresponding
643      qualifier directly.  One scenario is that for instruction
644 	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
645      which has only one possible valid qualifier sequence
646 	NIL, S_D
647      the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
648      determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
649 
650      Because the qualifier NIL has dual roles in the qualifier sequence:
651      it can mean no qualifier for the operand, or the qualifer sequence is
652      not in use (when all qualifiers in the sequence are NILs), we have to
653      handle this special case here.  */
654   if (known_qlf == AARCH64_OPND_NIL)
655     {
656       assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
657       return qseq_list[0][idx];
658     }
659 
660   for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
661     {
662       if (qseq_list[i][known_idx] == known_qlf)
663 	{
664 	  if (saved_i != -1)
665 	    /* More than one sequences are found to have KNOWN_QLF at
666 	       KNOWN_IDX.  */
667 	    return AARCH64_OPND_NIL;
668 	  saved_i = i;
669 	}
670     }
671 
672   return qseq_list[saved_i][idx];
673 }
674 
675 enum operand_qualifier_kind
676 {
677   OQK_NIL,
678   OQK_OPD_VARIANT,
679   OQK_VALUE_IN_RANGE,
680   OQK_MISC,
681 };
682 
683 /* Operand qualifier description.  */
684 struct operand_qualifier_data
685 {
686   /* The usage of the three data fields depends on the qualifier kind.  */
687   int data0;
688   int data1;
689   int data2;
690   /* Description.  */
691   const char *desc;
692   /* Kind.  */
693   enum operand_qualifier_kind kind;
694 };
695 
696 /* Indexed by the operand qualifier enumerators.  */
697 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
698 {
699   {0, 0, 0, "NIL", OQK_NIL},
700 
701   /* Operand variant qualifiers.
702      First 3 fields:
703      element size, number of elements and common value for encoding.  */
704 
705   {4, 1, 0x0, "w", OQK_OPD_VARIANT},
706   {8, 1, 0x1, "x", OQK_OPD_VARIANT},
707   {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
708   {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
709 
710   {1, 1, 0x0, "b", OQK_OPD_VARIANT},
711   {2, 1, 0x1, "h", OQK_OPD_VARIANT},
712   {4, 1, 0x2, "s", OQK_OPD_VARIANT},
713   {8, 1, 0x3, "d", OQK_OPD_VARIANT},
714   {16, 1, 0x4, "q", OQK_OPD_VARIANT},
715   {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
716   {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
717 
718   {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
719   {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
720   {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
721   {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
722   {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
723   {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
724   {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
725   {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
726   {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
727   {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
728   {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
729 
730   {0, 0, 0, "z", OQK_OPD_VARIANT},
731   {0, 0, 0, "m", OQK_OPD_VARIANT},
732 
733   /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc).  */
734   {16, 0, 0, "tag", OQK_OPD_VARIANT},
735 
736   /* Qualifiers constraining the value range.
737      First 3 fields:
738      Lower bound, higher bound, unused.  */
739 
740   {0, 15, 0, "CR",       OQK_VALUE_IN_RANGE},
741   {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
742   {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
743   {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
744   {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
745   {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
746   {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
747 
748   /* Qualifiers for miscellaneous purpose.
749      First 3 fields:
750      unused, unused and unused.  */
751 
752   {0, 0, 0, "lsl", 0},
753   {0, 0, 0, "msl", 0},
754 
755   {0, 0, 0, "retrieving", 0},
756 };
757 
758 static inline bfd_boolean
759 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
760 {
761   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
762     ? TRUE : FALSE;
763 }
764 
765 static inline bfd_boolean
766 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
767 {
768   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
769     ? TRUE : FALSE;
770 }
771 
772 const char*
773 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
774 {
775   return aarch64_opnd_qualifiers[qualifier].desc;
776 }
777 
778 /* Given an operand qualifier, return the expected data element size
779    of a qualified operand.  */
780 unsigned char
781 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
782 {
783   assert (operand_variant_qualifier_p (qualifier) == TRUE);
784   return aarch64_opnd_qualifiers[qualifier].data0;
785 }
786 
787 unsigned char
788 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
789 {
790   assert (operand_variant_qualifier_p (qualifier) == TRUE);
791   return aarch64_opnd_qualifiers[qualifier].data1;
792 }
793 
794 aarch64_insn
795 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
796 {
797   assert (operand_variant_qualifier_p (qualifier) == TRUE);
798   return aarch64_opnd_qualifiers[qualifier].data2;
799 }
800 
801 static int
802 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
803 {
804   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
805   return aarch64_opnd_qualifiers[qualifier].data0;
806 }
807 
808 static int
809 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
810 {
811   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
812   return aarch64_opnd_qualifiers[qualifier].data1;
813 }
814 
815 #ifdef DEBUG_AARCH64
816 void
817 aarch64_verbose (const char *str, ...)
818 {
819   va_list ap;
820   va_start (ap, str);
821   printf ("#### ");
822   vprintf (str, ap);
823   printf ("\n");
824   va_end (ap);
825 }
826 
827 static inline void
828 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
829 {
830   int i;
831   printf ("#### \t");
832   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
833     printf ("%s,", aarch64_get_qualifier_name (*qualifier));
834   printf ("\n");
835 }
836 
837 static void
838 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
839 		       const aarch64_opnd_qualifier_t *qualifier)
840 {
841   int i;
842   aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
843 
844   aarch64_verbose ("dump_match_qualifiers:");
845   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
846     curr[i] = opnd[i].qualifier;
847   dump_qualifier_sequence (curr);
848   aarch64_verbose ("against");
849   dump_qualifier_sequence (qualifier);
850 }
851 #endif /* DEBUG_AARCH64 */
852 
853 /* This function checks if the given instruction INSN is a destructive
854    instruction based on the usage of the registers.  It does not recognize
855    unary destructive instructions.  */
856 bfd_boolean
857 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
858 {
859   int i = 0;
860   const enum aarch64_opnd *opnds = opcode->operands;
861 
862   if (opnds[0] == AARCH64_OPND_NIL)
863     return FALSE;
864 
865   while (opnds[++i] != AARCH64_OPND_NIL)
866     if (opnds[i] == opnds[0])
867       return TRUE;
868 
869   return FALSE;
870 }
871 
872 /* TODO improve this, we can have an extra field at the runtime to
873    store the number of operands rather than calculating it every time.  */
874 
875 int
876 aarch64_num_of_operands (const aarch64_opcode *opcode)
877 {
878   int i = 0;
879   const enum aarch64_opnd *opnds = opcode->operands;
880   while (opnds[i++] != AARCH64_OPND_NIL)
881     ;
882   --i;
883   assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
884   return i;
885 }
886 
887 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
888    If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
889 
890    N.B. on the entry, it is very likely that only some operands in *INST
891    have had their qualifiers been established.
892 
893    If STOP_AT is not -1, the function will only try to match
894    the qualifier sequence for operands before and including the operand
895    of index STOP_AT; and on success *RET will only be filled with the first
896    (STOP_AT+1) qualifiers.
897 
898    A couple examples of the matching algorithm:
899 
900    X,W,NIL should match
901    X,W,NIL
902 
903    NIL,NIL should match
904    X  ,NIL
905 
906    Apart from serving the main encoding routine, this can also be called
907    during or after the operand decoding.  */
908 
909 int
910 aarch64_find_best_match (const aarch64_inst *inst,
911 			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
912 			 int stop_at, aarch64_opnd_qualifier_t *ret)
913 {
914   int found = 0;
915   int i, num_opnds;
916   const aarch64_opnd_qualifier_t *qualifiers;
917 
918   num_opnds = aarch64_num_of_operands (inst->opcode);
919   if (num_opnds == 0)
920     {
921       DEBUG_TRACE ("SUCCEED: no operand");
922       return 1;
923     }
924 
925   if (stop_at < 0 || stop_at >= num_opnds)
926     stop_at = num_opnds - 1;
927 
928   /* For each pattern.  */
929   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
930     {
931       int j;
932       qualifiers = *qualifiers_list;
933 
934       /* Start as positive.  */
935       found = 1;
936 
937       DEBUG_TRACE ("%d", i);
938 #ifdef DEBUG_AARCH64
939       if (debug_dump)
940 	dump_match_qualifiers (inst->operands, qualifiers);
941 #endif
942 
943       /* Most opcodes has much fewer patterns in the list.
944 	 First NIL qualifier indicates the end in the list.   */
945       if (empty_qualifier_sequence_p (qualifiers) == TRUE)
946 	{
947 	  DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
948 	  if (i)
949 	    found = 0;
950 	  break;
951 	}
952 
953       for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
954 	{
955 	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
956 	    {
957 	      /* Either the operand does not have qualifier, or the qualifier
958 		 for the operand needs to be deduced from the qualifier
959 		 sequence.
960 		 In the latter case, any constraint checking related with
961 		 the obtained qualifier should be done later in
962 		 operand_general_constraint_met_p.  */
963 	      continue;
964 	    }
965 	  else if (*qualifiers != inst->operands[j].qualifier)
966 	    {
967 	      /* Unless the target qualifier can also qualify the operand
968 		 (which has already had a non-nil qualifier), non-equal
969 		 qualifiers are generally un-matched.  */
970 	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
971 		continue;
972 	      else
973 		{
974 		  found = 0;
975 		  break;
976 		}
977 	    }
978 	  else
979 	    continue;	/* Equal qualifiers are certainly matched.  */
980 	}
981 
982       /* Qualifiers established.  */
983       if (found == 1)
984 	break;
985     }
986 
987   if (found == 1)
988     {
989       /* Fill the result in *RET.  */
990       int j;
991       qualifiers = *qualifiers_list;
992 
993       DEBUG_TRACE ("complete qualifiers using list %d", i);
994 #ifdef DEBUG_AARCH64
995       if (debug_dump)
996 	dump_qualifier_sequence (qualifiers);
997 #endif
998 
999       for (j = 0; j <= stop_at; ++j, ++qualifiers)
1000 	ret[j] = *qualifiers;
1001       for (; j < AARCH64_MAX_OPND_NUM; ++j)
1002 	ret[j] = AARCH64_OPND_QLF_NIL;
1003 
1004       DEBUG_TRACE ("SUCCESS");
1005       return 1;
1006     }
1007 
1008   DEBUG_TRACE ("FAIL");
1009   return 0;
1010 }
1011 
1012 /* Operand qualifier matching and resolving.
1013 
1014    Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1015    sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1016 
1017    if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1018    succeeds.  */
1019 
1020 static int
1021 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1022 {
1023   int i, nops;
1024   aarch64_opnd_qualifier_seq_t qualifiers;
1025 
1026   if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1027 			       qualifiers))
1028     {
1029       DEBUG_TRACE ("matching FAIL");
1030       return 0;
1031     }
1032 
1033   if (inst->opcode->flags & F_STRICT)
1034     {
1035       /* Require an exact qualifier match, even for NIL qualifiers.  */
1036       nops = aarch64_num_of_operands (inst->opcode);
1037       for (i = 0; i < nops; ++i)
1038 	if (inst->operands[i].qualifier != qualifiers[i])
1039 	  return FALSE;
1040     }
1041 
1042   /* Update the qualifiers.  */
1043   if (update_p == TRUE)
1044     for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1045       {
1046 	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1047 	  break;
1048 	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1049 			"update %s with %s for operand %d",
1050 			aarch64_get_qualifier_name (inst->operands[i].qualifier),
1051 			aarch64_get_qualifier_name (qualifiers[i]), i);
1052 	inst->operands[i].qualifier = qualifiers[i];
1053       }
1054 
1055   DEBUG_TRACE ("matching SUCCESS");
1056   return 1;
1057 }
1058 
1059 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1060    register by MOVZ.
1061 
1062    IS32 indicates whether value is a 32-bit immediate or not.
1063    If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1064    amount will be returned in *SHIFT_AMOUNT.  */
1065 
1066 bfd_boolean
1067 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1068 {
1069   int amount;
1070 
1071   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1072 
1073   if (is32)
1074     {
1075       /* Allow all zeros or all ones in top 32-bits, so that
1076 	 32-bit constant expressions like ~0x80000000 are
1077 	 permitted.  */
1078       if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1079 	/* Immediate out of range.  */
1080 	return FALSE;
1081       value &= 0xffffffff;
1082     }
1083 
1084   /* first, try movz then movn */
1085   amount = -1;
1086   if ((value & ((uint64_t) 0xffff << 0)) == value)
1087     amount = 0;
1088   else if ((value & ((uint64_t) 0xffff << 16)) == value)
1089     amount = 16;
1090   else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1091     amount = 32;
1092   else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1093     amount = 48;
1094 
1095   if (amount == -1)
1096     {
1097       DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1098       return FALSE;
1099     }
1100 
1101   if (shift_amount != NULL)
1102     *shift_amount = amount;
1103 
1104   DEBUG_TRACE ("exit TRUE with amount %d", amount);
1105 
1106   return TRUE;
1107 }
1108 
1109 /* Build the accepted values for immediate logical SIMD instructions.
1110 
1111    The standard encodings of the immediate value are:
1112      N      imms     immr         SIMD size  R             S
1113      1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
1114      0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
1115      0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
1116      0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
1117      0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
1118      0      11110s   00000r       2       UInt(r)       UInt(s)
1119    where all-ones value of S is reserved.
1120 
1121    Let's call E the SIMD size.
1122 
1123    The immediate value is: S+1 bits '1' rotated to the right by R.
1124 
1125    The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1126    (remember S != E - 1).  */
1127 
1128 #define TOTAL_IMM_NB  5334
1129 
1130 typedef struct
1131 {
1132   uint64_t imm;
1133   aarch64_insn encoding;
1134 } simd_imm_encoding;
1135 
1136 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1137 
1138 static int
1139 simd_imm_encoding_cmp(const void *i1, const void *i2)
1140 {
1141   const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1142   const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1143 
1144   if (imm1->imm < imm2->imm)
1145     return -1;
1146   if (imm1->imm > imm2->imm)
1147     return +1;
1148   return 0;
1149 }
1150 
1151 /* immediate bitfield standard encoding
1152    imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
1153    1         ssssss     rrrrrr      64        rrrrrr ssssss
1154    0         0sssss     0rrrrr      32        rrrrr  sssss
1155    0         10ssss     00rrrr      16        rrrr   ssss
1156    0         110sss     000rrr      8         rrr    sss
1157    0         1110ss     0000rr      4         rr     ss
1158    0         11110s     00000r      2         r      s  */
1159 static inline int
1160 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1161 {
1162   return (is64 << 12) | (r << 6) | s;
1163 }
1164 
1165 static void
1166 build_immediate_table (void)
1167 {
1168   uint32_t log_e, e, s, r, s_mask;
1169   uint64_t mask, imm;
1170   int nb_imms;
1171   int is64;
1172 
1173   nb_imms = 0;
1174   for (log_e = 1; log_e <= 6; log_e++)
1175     {
1176       /* Get element size.  */
1177       e = 1u << log_e;
1178       if (log_e == 6)
1179 	{
1180 	  is64 = 1;
1181 	  mask = 0xffffffffffffffffull;
1182 	  s_mask = 0;
1183 	}
1184       else
1185 	{
1186 	  is64 = 0;
1187 	  mask = (1ull << e) - 1;
1188 	  /* log_e  s_mask
1189 	     1     ((1 << 4) - 1) << 2 = 111100
1190 	     2     ((1 << 3) - 1) << 3 = 111000
1191 	     3     ((1 << 2) - 1) << 4 = 110000
1192 	     4     ((1 << 1) - 1) << 5 = 100000
1193 	     5     ((1 << 0) - 1) << 6 = 000000  */
1194 	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1195 	}
1196       for (s = 0; s < e - 1; s++)
1197 	for (r = 0; r < e; r++)
1198 	  {
1199 	    /* s+1 consecutive bits to 1 (s < 63) */
1200 	    imm = (1ull << (s + 1)) - 1;
1201 	    /* rotate right by r */
1202 	    if (r != 0)
1203 	      imm = (imm >> r) | ((imm << (e - r)) & mask);
1204 	    /* replicate the constant depending on SIMD size */
1205 	    switch (log_e)
1206 	      {
1207 	      case 1: imm = (imm <<  2) | imm;
1208 		/* Fall through.  */
1209 	      case 2: imm = (imm <<  4) | imm;
1210 		/* Fall through.  */
1211 	      case 3: imm = (imm <<  8) | imm;
1212 		/* Fall through.  */
1213 	      case 4: imm = (imm << 16) | imm;
1214 		/* Fall through.  */
1215 	      case 5: imm = (imm << 32) | imm;
1216 		/* Fall through.  */
1217 	      case 6: break;
1218 	      default: abort ();
1219 	      }
1220 	    simd_immediates[nb_imms].imm = imm;
1221 	    simd_immediates[nb_imms].encoding =
1222 	      encode_immediate_bitfield(is64, s | s_mask, r);
1223 	    nb_imms++;
1224 	  }
1225     }
1226   assert (nb_imms == TOTAL_IMM_NB);
1227   qsort(simd_immediates, nb_imms,
1228 	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1229 }
1230 
1231 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1232    be accepted by logical (immediate) instructions
1233    e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1234 
1235    ESIZE is the number of bytes in the decoded immediate value.
1236    If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1237    VALUE will be returned in *ENCODING.  */
1238 
1239 bfd_boolean
1240 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1241 {
1242   simd_imm_encoding imm_enc;
1243   const simd_imm_encoding *imm_encoding;
1244   static bfd_boolean initialized = FALSE;
1245   uint64_t upper;
1246   int i;
1247 
1248   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1249 	       value, esize);
1250 
1251   if (!initialized)
1252     {
1253       build_immediate_table ();
1254       initialized = TRUE;
1255     }
1256 
1257   /* Allow all zeros or all ones in top bits, so that
1258      constant expressions like ~1 are permitted.  */
1259   upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1260   if ((value & ~upper) != value && (value | upper) != value)
1261     return FALSE;
1262 
1263   /* Replicate to a full 64-bit value.  */
1264   value &= ~upper;
1265   for (i = esize * 8; i < 64; i *= 2)
1266     value |= (value << i);
1267 
1268   imm_enc.imm = value;
1269   imm_encoding = (const simd_imm_encoding *)
1270     bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1271             sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1272   if (imm_encoding == NULL)
1273     {
1274       DEBUG_TRACE ("exit with FALSE");
1275       return FALSE;
1276     }
1277   if (encoding != NULL)
1278     *encoding = imm_encoding->encoding;
1279   DEBUG_TRACE ("exit with TRUE");
1280   return TRUE;
1281 }
1282 
1283 /* If 64-bit immediate IMM is in the format of
1284    "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1285    where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1286    of value "abcdefgh".  Otherwise return -1.  */
1287 int
1288 aarch64_shrink_expanded_imm8 (uint64_t imm)
1289 {
1290   int i, ret;
1291   uint32_t byte;
1292 
1293   ret = 0;
1294   for (i = 0; i < 8; i++)
1295     {
1296       byte = (imm >> (8 * i)) & 0xff;
1297       if (byte == 0xff)
1298 	ret |= 1 << i;
1299       else if (byte != 0x00)
1300 	return -1;
1301     }
1302   return ret;
1303 }
1304 
1305 /* Utility inline functions for operand_general_constraint_met_p.  */
1306 
1307 static inline void
1308 set_error (aarch64_operand_error *mismatch_detail,
1309 	   enum aarch64_operand_error_kind kind, int idx,
1310 	   const char* error)
1311 {
1312   if (mismatch_detail == NULL)
1313     return;
1314   mismatch_detail->kind = kind;
1315   mismatch_detail->index = idx;
1316   mismatch_detail->error = error;
1317 }
1318 
1319 static inline void
1320 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1321 		  const char* error)
1322 {
1323   if (mismatch_detail == NULL)
1324     return;
1325   set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1326 }
1327 
1328 static inline void
1329 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1330 			int idx, int lower_bound, int upper_bound,
1331 			const char* error)
1332 {
1333   if (mismatch_detail == NULL)
1334     return;
1335   set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1336   mismatch_detail->data[0] = lower_bound;
1337   mismatch_detail->data[1] = upper_bound;
1338 }
1339 
1340 static inline void
1341 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1342 			    int idx, int lower_bound, int upper_bound)
1343 {
1344   if (mismatch_detail == NULL)
1345     return;
1346   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1347 			  _("immediate value"));
1348 }
1349 
1350 static inline void
1351 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1352 			       int idx, int lower_bound, int upper_bound)
1353 {
1354   if (mismatch_detail == NULL)
1355     return;
1356   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1357 			  _("immediate offset"));
1358 }
1359 
1360 static inline void
1361 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1362 			      int idx, int lower_bound, int upper_bound)
1363 {
1364   if (mismatch_detail == NULL)
1365     return;
1366   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1367 			  _("register number"));
1368 }
1369 
1370 static inline void
1371 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1372 				 int idx, int lower_bound, int upper_bound)
1373 {
1374   if (mismatch_detail == NULL)
1375     return;
1376   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1377 			  _("register element index"));
1378 }
1379 
1380 static inline void
1381 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1382 				   int idx, int lower_bound, int upper_bound)
1383 {
1384   if (mismatch_detail == NULL)
1385     return;
1386   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1387 			  _("shift amount"));
1388 }
1389 
1390 /* Report that the MUL modifier in operand IDX should be in the range
1391    [LOWER_BOUND, UPPER_BOUND].  */
1392 static inline void
1393 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1394 				   int idx, int lower_bound, int upper_bound)
1395 {
1396   if (mismatch_detail == NULL)
1397     return;
1398   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1399 			  _("multiplier"));
1400 }
1401 
1402 static inline void
1403 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1404 		     int alignment)
1405 {
1406   if (mismatch_detail == NULL)
1407     return;
1408   set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1409   mismatch_detail->data[0] = alignment;
1410 }
1411 
1412 static inline void
1413 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1414 		    int expected_num)
1415 {
1416   if (mismatch_detail == NULL)
1417     return;
1418   set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1419   mismatch_detail->data[0] = expected_num;
1420 }
1421 
1422 static inline void
1423 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1424 		 const char* error)
1425 {
1426   if (mismatch_detail == NULL)
1427     return;
1428   set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1429 }
1430 
1431 /* General constraint checking based on operand code.
1432 
1433    Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1434    as the IDXth operand of opcode OPCODE.  Otherwise return 0.
1435 
1436    This function has to be called after the qualifiers for all operands
1437    have been resolved.
1438 
1439    Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1440    i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
1441    of error message during the disassembling where error message is not
1442    wanted.  We avoid the dynamic construction of strings of error messages
1443    here (i.e. in libopcodes), as it is costly and complicated; instead, we
1444    use a combination of error code, static string and some integer data to
1445    represent an error.  */
1446 
1447 static int
1448 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1449 				  enum aarch64_opnd type,
1450 				  const aarch64_opcode *opcode,
1451 				  aarch64_operand_error *mismatch_detail)
1452 {
1453   unsigned num, modifiers, shift;
1454   unsigned char size;
1455   int64_t imm, min_value, max_value;
1456   uint64_t uvalue, mask;
1457   const aarch64_opnd_info *opnd = opnds + idx;
1458   aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1459 
1460   assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1461 
1462   switch (aarch64_operands[type].op_class)
1463     {
1464     case AARCH64_OPND_CLASS_INT_REG:
1465       /* Check pair reg constraints for cas* instructions.  */
1466       if (type == AARCH64_OPND_PAIRREG)
1467 	{
1468 	  assert (idx == 1 || idx == 3);
1469 	  if (opnds[idx - 1].reg.regno % 2 != 0)
1470 	    {
1471 	      set_syntax_error (mismatch_detail, idx - 1,
1472 				_("reg pair must start from even reg"));
1473 	      return 0;
1474 	    }
1475 	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1476 	    {
1477 	      set_syntax_error (mismatch_detail, idx,
1478 				_("reg pair must be contiguous"));
1479 	      return 0;
1480 	    }
1481 	  break;
1482 	}
1483 
1484       /* <Xt> may be optional in some IC and TLBI instructions.  */
1485       if (type == AARCH64_OPND_Rt_SYS)
1486 	{
1487 	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1488 			       == AARCH64_OPND_CLASS_SYSTEM));
1489 	  if (opnds[1].present
1490 	      && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1491 	    {
1492 	      set_other_error (mismatch_detail, idx, _("extraneous register"));
1493 	      return 0;
1494 	    }
1495 	  if (!opnds[1].present
1496 	      && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1497 	    {
1498 	      set_other_error (mismatch_detail, idx, _("missing register"));
1499 	      return 0;
1500 	    }
1501 	}
1502       switch (qualifier)
1503 	{
1504 	case AARCH64_OPND_QLF_WSP:
1505 	case AARCH64_OPND_QLF_SP:
1506 	  if (!aarch64_stack_pointer_p (opnd))
1507 	    {
1508 	      set_other_error (mismatch_detail, idx,
1509 			       _("stack pointer register expected"));
1510 	      return 0;
1511 	    }
1512 	  break;
1513 	default:
1514 	  break;
1515 	}
1516       break;
1517 
1518     case AARCH64_OPND_CLASS_SVE_REG:
1519       switch (type)
1520 	{
1521 	case AARCH64_OPND_SVE_Zm3_INDEX:
1522 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
1523 	case AARCH64_OPND_SVE_Zm3_11_INDEX:
1524 	case AARCH64_OPND_SVE_Zm4_11_INDEX:
1525 	case AARCH64_OPND_SVE_Zm4_INDEX:
1526 	  size = get_operand_fields_width (get_operand_from_code (type));
1527 	  shift = get_operand_specific_data (&aarch64_operands[type]);
1528 	  mask = (1 << shift) - 1;
1529 	  if (opnd->reg.regno > mask)
1530 	    {
1531 	      assert (mask == 7 || mask == 15);
1532 	      set_other_error (mismatch_detail, idx,
1533 			       mask == 15
1534 			       ? _("z0-z15 expected")
1535 			       : _("z0-z7 expected"));
1536 	      return 0;
1537 	    }
1538 	  mask = (1u << (size - shift)) - 1;
1539 	  if (!value_in_range_p (opnd->reglane.index, 0, mask))
1540 	    {
1541 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1542 	      return 0;
1543 	    }
1544 	  break;
1545 
1546 	case AARCH64_OPND_SVE_Zn_INDEX:
1547 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1548 	  if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1549 	    {
1550 	      set_elem_idx_out_of_range_error (mismatch_detail, idx,
1551 					       0, 64 / size - 1);
1552 	      return 0;
1553 	    }
1554 	  break;
1555 
1556 	case AARCH64_OPND_SVE_ZnxN:
1557 	case AARCH64_OPND_SVE_ZtxN:
1558 	  if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1559 	    {
1560 	      set_other_error (mismatch_detail, idx,
1561 			       _("invalid register list"));
1562 	      return 0;
1563 	    }
1564 	  break;
1565 
1566 	default:
1567 	  break;
1568 	}
1569       break;
1570 
1571     case AARCH64_OPND_CLASS_PRED_REG:
1572       if (opnd->reg.regno >= 8
1573 	  && get_operand_fields_width (get_operand_from_code (type)) == 3)
1574 	{
1575 	  set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1576 	  return 0;
1577 	}
1578       break;
1579 
1580     case AARCH64_OPND_CLASS_COND:
1581       if (type == AARCH64_OPND_COND1
1582 	  && (opnds[idx].cond->value & 0xe) == 0xe)
1583 	{
1584 	  /* Not allow AL or NV.  */
1585 	  set_syntax_error (mismatch_detail, idx, NULL);
1586 	}
1587       break;
1588 
1589     case AARCH64_OPND_CLASS_ADDRESS:
1590       /* Check writeback.  */
1591       switch (opcode->iclass)
1592 	{
1593 	case ldst_pos:
1594 	case ldst_unscaled:
1595 	case ldstnapair_offs:
1596 	case ldstpair_off:
1597 	case ldst_unpriv:
1598 	  if (opnd->addr.writeback == 1)
1599 	    {
1600 	      set_syntax_error (mismatch_detail, idx,
1601 				_("unexpected address writeback"));
1602 	      return 0;
1603 	    }
1604 	  break;
1605 	case ldst_imm10:
1606 	  if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1607 	    {
1608 	      set_syntax_error (mismatch_detail, idx,
1609 				_("unexpected address writeback"));
1610 	      return 0;
1611 	    }
1612 	  break;
1613 	case ldst_imm9:
1614 	case ldstpair_indexed:
1615 	case asisdlsep:
1616 	case asisdlsop:
1617 	  if (opnd->addr.writeback == 0)
1618 	    {
1619 	      set_syntax_error (mismatch_detail, idx,
1620 				_("address writeback expected"));
1621 	      return 0;
1622 	    }
1623 	  break;
1624 	default:
1625 	  assert (opnd->addr.writeback == 0);
1626 	  break;
1627 	}
1628       switch (type)
1629 	{
1630 	case AARCH64_OPND_ADDR_SIMM7:
1631 	  /* Scaled signed 7 bits immediate offset.  */
1632 	  /* Get the size of the data element that is accessed, which may be
1633 	     different from that of the source register size,
1634 	     e.g. in strb/ldrb.  */
1635 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1636 	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1637 	    {
1638 	      set_offset_out_of_range_error (mismatch_detail, idx,
1639 					     -64 * size, 63 * size);
1640 	      return 0;
1641 	    }
1642 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1643 	    {
1644 	      set_unaligned_error (mismatch_detail, idx, size);
1645 	      return 0;
1646 	    }
1647 	  break;
1648 	case AARCH64_OPND_ADDR_OFFSET:
1649 	case AARCH64_OPND_ADDR_SIMM9:
1650 	  /* Unscaled signed 9 bits immediate offset.  */
1651 	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1652 	    {
1653 	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1654 	      return 0;
1655 	    }
1656 	  break;
1657 
1658 	case AARCH64_OPND_ADDR_SIMM9_2:
1659 	  /* Unscaled signed 9 bits immediate offset, which has to be negative
1660 	     or unaligned.  */
1661 	  size = aarch64_get_qualifier_esize (qualifier);
1662 	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1663 	       && !value_aligned_p (opnd->addr.offset.imm, size))
1664 	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1665 	    return 1;
1666 	  set_other_error (mismatch_detail, idx,
1667 			   _("negative or unaligned offset expected"));
1668 	  return 0;
1669 
1670 	case AARCH64_OPND_ADDR_SIMM10:
1671 	  /* Scaled signed 10 bits immediate offset.  */
1672 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1673 	    {
1674 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1675 	      return 0;
1676 	    }
1677 	  if (!value_aligned_p (opnd->addr.offset.imm, 8))
1678 	    {
1679 	      set_unaligned_error (mismatch_detail, idx, 8);
1680 	      return 0;
1681 	    }
1682 	  break;
1683 
1684 	case AARCH64_OPND_ADDR_SIMM11:
1685 	  /* Signed 11 bits immediate offset (multiple of 16).  */
1686 	  if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1687 	    {
1688 	      set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1689 	      return 0;
1690 	    }
1691 
1692 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
1693 	    {
1694 	      set_unaligned_error (mismatch_detail, idx, 16);
1695 	      return 0;
1696 	    }
1697 	  break;
1698 
1699 	case AARCH64_OPND_ADDR_SIMM13:
1700 	  /* Signed 13 bits immediate offset (multiple of 16).  */
1701 	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1702 	    {
1703 	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1704 	      return 0;
1705 	    }
1706 
1707 	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
1708 	    {
1709 	      set_unaligned_error (mismatch_detail, idx, 16);
1710 	      return 0;
1711 	    }
1712 	  break;
1713 
1714 	case AARCH64_OPND_SIMD_ADDR_POST:
1715 	  /* AdvSIMD load/store multiple structures, post-index.  */
1716 	  assert (idx == 1);
1717 	  if (opnd->addr.offset.is_reg)
1718 	    {
1719 	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1720 		return 1;
1721 	      else
1722 		{
1723 		  set_other_error (mismatch_detail, idx,
1724 				   _("invalid register offset"));
1725 		  return 0;
1726 		}
1727 	    }
1728 	  else
1729 	    {
1730 	      const aarch64_opnd_info *prev = &opnds[idx-1];
1731 	      unsigned num_bytes; /* total number of bytes transferred.  */
1732 	      /* The opcode dependent area stores the number of elements in
1733 		 each structure to be loaded/stored.  */
1734 	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1735 	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1736 		/* Special handling of loading single structure to all lane.  */
1737 		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1738 		  * aarch64_get_qualifier_esize (prev->qualifier);
1739 	      else
1740 		num_bytes = prev->reglist.num_regs
1741 		  * aarch64_get_qualifier_esize (prev->qualifier)
1742 		  * aarch64_get_qualifier_nelem (prev->qualifier);
1743 	      if ((int) num_bytes != opnd->addr.offset.imm)
1744 		{
1745 		  set_other_error (mismatch_detail, idx,
1746 				   _("invalid post-increment amount"));
1747 		  return 0;
1748 		}
1749 	    }
1750 	  break;
1751 
1752 	case AARCH64_OPND_ADDR_REGOFF:
1753 	  /* Get the size of the data element that is accessed, which may be
1754 	     different from that of the source register size,
1755 	     e.g. in strb/ldrb.  */
1756 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1757 	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
1758 	  if (opnd->shifter.amount != 0
1759 	      && opnd->shifter.amount != (int)get_logsz (size))
1760 	    {
1761 	      set_other_error (mismatch_detail, idx,
1762 			       _("invalid shift amount"));
1763 	      return 0;
1764 	    }
1765 	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1766 	     operators.  */
1767 	  switch (opnd->shifter.kind)
1768 	    {
1769 	    case AARCH64_MOD_UXTW:
1770 	    case AARCH64_MOD_LSL:
1771 	    case AARCH64_MOD_SXTW:
1772 	    case AARCH64_MOD_SXTX: break;
1773 	    default:
1774 	      set_other_error (mismatch_detail, idx,
1775 			       _("invalid extend/shift operator"));
1776 	      return 0;
1777 	    }
1778 	  break;
1779 
1780 	case AARCH64_OPND_ADDR_UIMM12:
1781 	  imm = opnd->addr.offset.imm;
1782 	  /* Get the size of the data element that is accessed, which may be
1783 	     different from that of the source register size,
1784 	     e.g. in strb/ldrb.  */
1785 	  size = aarch64_get_qualifier_esize (qualifier);
1786 	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1787 	    {
1788 	      set_offset_out_of_range_error (mismatch_detail, idx,
1789 					     0, 4095 * size);
1790 	      return 0;
1791 	    }
1792 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1793 	    {
1794 	      set_unaligned_error (mismatch_detail, idx, size);
1795 	      return 0;
1796 	    }
1797 	  break;
1798 
1799 	case AARCH64_OPND_ADDR_PCREL14:
1800 	case AARCH64_OPND_ADDR_PCREL19:
1801 	case AARCH64_OPND_ADDR_PCREL21:
1802 	case AARCH64_OPND_ADDR_PCREL26:
1803 	  imm = opnd->imm.value;
1804 	  if (operand_need_shift_by_two (get_operand_from_code (type)))
1805 	    {
1806 	      /* The offset value in a PC-relative branch instruction is alway
1807 		 4-byte aligned and is encoded without the lowest 2 bits.  */
1808 	      if (!value_aligned_p (imm, 4))
1809 		{
1810 		  set_unaligned_error (mismatch_detail, idx, 4);
1811 		  return 0;
1812 		}
1813 	      /* Right shift by 2 so that we can carry out the following check
1814 		 canonically.  */
1815 	      imm >>= 2;
1816 	    }
1817 	  size = get_operand_fields_width (get_operand_from_code (type));
1818 	  if (!value_fit_signed_field_p (imm, size))
1819 	    {
1820 	      set_other_error (mismatch_detail, idx,
1821 			       _("immediate out of range"));
1822 	      return 0;
1823 	    }
1824 	  break;
1825 
1826 	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1827 	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1828 	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1829 	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1830 	  min_value = -8;
1831 	  max_value = 7;
1832 	sve_imm_offset_vl:
1833 	  assert (!opnd->addr.offset.is_reg);
1834 	  assert (opnd->addr.preind);
1835 	  num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1836 	  min_value *= num;
1837 	  max_value *= num;
1838 	  if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1839 	      || (opnd->shifter.operator_present
1840 		  && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1841 	    {
1842 	      set_other_error (mismatch_detail, idx,
1843 			       _("invalid addressing mode"));
1844 	      return 0;
1845 	    }
1846 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1847 	    {
1848 	      set_offset_out_of_range_error (mismatch_detail, idx,
1849 					     min_value, max_value);
1850 	      return 0;
1851 	    }
1852 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1853 	    {
1854 	      set_unaligned_error (mismatch_detail, idx, num);
1855 	      return 0;
1856 	    }
1857 	  break;
1858 
1859 	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1860 	  min_value = -32;
1861 	  max_value = 31;
1862 	  goto sve_imm_offset_vl;
1863 
1864 	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1865 	  min_value = -256;
1866 	  max_value = 255;
1867 	  goto sve_imm_offset_vl;
1868 
1869 	case AARCH64_OPND_SVE_ADDR_RI_U6:
1870 	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1871 	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1872 	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1873 	  min_value = 0;
1874 	  max_value = 63;
1875 	sve_imm_offset:
1876 	  assert (!opnd->addr.offset.is_reg);
1877 	  assert (opnd->addr.preind);
1878 	  num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1879 	  min_value *= num;
1880 	  max_value *= num;
1881 	  if (opnd->shifter.operator_present
1882 	      || opnd->shifter.amount_present)
1883 	    {
1884 	      set_other_error (mismatch_detail, idx,
1885 			       _("invalid addressing mode"));
1886 	      return 0;
1887 	    }
1888 	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1889 	    {
1890 	      set_offset_out_of_range_error (mismatch_detail, idx,
1891 					     min_value, max_value);
1892 	      return 0;
1893 	    }
1894 	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1895 	    {
1896 	      set_unaligned_error (mismatch_detail, idx, num);
1897 	      return 0;
1898 	    }
1899 	  break;
1900 
1901 	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1902 	case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1903 	  min_value = -8;
1904 	  max_value = 7;
1905 	  goto sve_imm_offset;
1906 
1907 	case AARCH64_OPND_SVE_ADDR_ZX:
1908 	  /* Everything is already ensured by parse_operands or
1909 	     aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1910 	     argument type).  */
1911 	  assert (opnd->addr.offset.is_reg);
1912 	  assert (opnd->addr.preind);
1913 	  assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1914 	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1915 	  assert (opnd->shifter.operator_present == 0);
1916 	  break;
1917 
1918 	case AARCH64_OPND_SVE_ADDR_R:
1919 	case AARCH64_OPND_SVE_ADDR_RR:
1920 	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1921 	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1922 	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1923 	case AARCH64_OPND_SVE_ADDR_RX:
1924 	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1925 	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1926 	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1927 	case AARCH64_OPND_SVE_ADDR_RZ:
1928 	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1929 	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1930 	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1931 	  modifiers = 1 << AARCH64_MOD_LSL;
1932 	sve_rr_operand:
1933 	  assert (opnd->addr.offset.is_reg);
1934 	  assert (opnd->addr.preind);
1935 	  if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1936 	      && opnd->addr.offset.regno == 31)
1937 	    {
1938 	      set_other_error (mismatch_detail, idx,
1939 			       _("index register xzr is not allowed"));
1940 	      return 0;
1941 	    }
1942 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1943 	      || (opnd->shifter.amount
1944 		  != get_operand_specific_data (&aarch64_operands[type])))
1945 	    {
1946 	      set_other_error (mismatch_detail, idx,
1947 			       _("invalid addressing mode"));
1948 	      return 0;
1949 	    }
1950 	  break;
1951 
1952 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1953 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1954 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1955 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1956 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1957 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1958 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1959 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1960 	  modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1961 	  goto sve_rr_operand;
1962 
1963 	case AARCH64_OPND_SVE_ADDR_ZI_U5:
1964 	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1965 	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1966 	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1967 	  min_value = 0;
1968 	  max_value = 31;
1969 	  goto sve_imm_offset;
1970 
1971 	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1972 	  modifiers = 1 << AARCH64_MOD_LSL;
1973 	sve_zz_operand:
1974 	  assert (opnd->addr.offset.is_reg);
1975 	  assert (opnd->addr.preind);
1976 	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1977 	      || opnd->shifter.amount < 0
1978 	      || opnd->shifter.amount > 3)
1979 	    {
1980 	      set_other_error (mismatch_detail, idx,
1981 			       _("invalid addressing mode"));
1982 	      return 0;
1983 	    }
1984 	  break;
1985 
1986 	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1987 	  modifiers = (1 << AARCH64_MOD_SXTW);
1988 	  goto sve_zz_operand;
1989 
1990 	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1991 	  modifiers = 1 << AARCH64_MOD_UXTW;
1992 	  goto sve_zz_operand;
1993 
1994 	default:
1995 	  break;
1996 	}
1997       break;
1998 
1999     case AARCH64_OPND_CLASS_SIMD_REGLIST:
2000       if (type == AARCH64_OPND_LEt)
2001 	{
2002 	  /* Get the upper bound for the element index.  */
2003 	  num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2004 	  if (!value_in_range_p (opnd->reglist.index, 0, num))
2005 	    {
2006 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2007 	      return 0;
2008 	    }
2009 	}
2010       /* The opcode dependent area stores the number of elements in
2011 	 each structure to be loaded/stored.  */
2012       num = get_opcode_dependent_value (opcode);
2013       switch (type)
2014 	{
2015 	case AARCH64_OPND_LVt:
2016 	  assert (num >= 1 && num <= 4);
2017 	  /* Unless LD1/ST1, the number of registers should be equal to that
2018 	     of the structure elements.  */
2019 	  if (num != 1 && opnd->reglist.num_regs != num)
2020 	    {
2021 	      set_reg_list_error (mismatch_detail, idx, num);
2022 	      return 0;
2023 	    }
2024 	  break;
2025 	case AARCH64_OPND_LVt_AL:
2026 	case AARCH64_OPND_LEt:
2027 	  assert (num >= 1 && num <= 4);
2028 	  /* The number of registers should be equal to that of the structure
2029 	     elements.  */
2030 	  if (opnd->reglist.num_regs != num)
2031 	    {
2032 	      set_reg_list_error (mismatch_detail, idx, num);
2033 	      return 0;
2034 	    }
2035 	  break;
2036 	default:
2037 	  break;
2038 	}
2039       break;
2040 
2041     case AARCH64_OPND_CLASS_IMMEDIATE:
2042       /* Constraint check on immediate operand.  */
2043       imm = opnd->imm.value;
2044       /* E.g. imm_0_31 constrains value to be 0..31.  */
2045       if (qualifier_value_in_range_constraint_p (qualifier)
2046 	  && !value_in_range_p (imm, get_lower_bound (qualifier),
2047 				get_upper_bound (qualifier)))
2048 	{
2049 	  set_imm_out_of_range_error (mismatch_detail, idx,
2050 				      get_lower_bound (qualifier),
2051 				      get_upper_bound (qualifier));
2052 	  return 0;
2053 	}
2054 
2055       switch (type)
2056 	{
2057 	case AARCH64_OPND_AIMM:
2058 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
2059 	    {
2060 	      set_other_error (mismatch_detail, idx,
2061 			       _("invalid shift operator"));
2062 	      return 0;
2063 	    }
2064 	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2065 	    {
2066 	      set_other_error (mismatch_detail, idx,
2067 			       _("shift amount must be 0 or 12"));
2068 	      return 0;
2069 	    }
2070 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2071 	    {
2072 	      set_other_error (mismatch_detail, idx,
2073 			       _("immediate out of range"));
2074 	      return 0;
2075 	    }
2076 	  break;
2077 
2078 	case AARCH64_OPND_HALF:
2079 	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2080 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
2081 	    {
2082 	      set_other_error (mismatch_detail, idx,
2083 			       _("invalid shift operator"));
2084 	      return 0;
2085 	    }
2086 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2087 	  if (!value_aligned_p (opnd->shifter.amount, 16))
2088 	    {
2089 	      set_other_error (mismatch_detail, idx,
2090 			       _("shift amount must be a multiple of 16"));
2091 	      return 0;
2092 	    }
2093 	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2094 	    {
2095 	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
2096 						 0, size * 8 - 16);
2097 	      return 0;
2098 	    }
2099 	  if (opnd->imm.value < 0)
2100 	    {
2101 	      set_other_error (mismatch_detail, idx,
2102 			       _("negative immediate value not allowed"));
2103 	      return 0;
2104 	    }
2105 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2106 	    {
2107 	      set_other_error (mismatch_detail, idx,
2108 			       _("immediate out of range"));
2109 	      return 0;
2110 	    }
2111 	  break;
2112 
2113 	case AARCH64_OPND_IMM_MOV:
2114 	    {
2115 	      int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2116 	      imm = opnd->imm.value;
2117 	      assert (idx == 1);
2118 	      switch (opcode->op)
2119 		{
2120 		case OP_MOV_IMM_WIDEN:
2121 		  imm = ~imm;
2122 		  /* Fall through.  */
2123 		case OP_MOV_IMM_WIDE:
2124 		  if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2125 		    {
2126 		      set_other_error (mismatch_detail, idx,
2127 				       _("immediate out of range"));
2128 		      return 0;
2129 		    }
2130 		  break;
2131 		case OP_MOV_IMM_LOG:
2132 		  if (!aarch64_logical_immediate_p (imm, esize, NULL))
2133 		    {
2134 		      set_other_error (mismatch_detail, idx,
2135 				       _("immediate out of range"));
2136 		      return 0;
2137 		    }
2138 		  break;
2139 		default:
2140 		  assert (0);
2141 		  return 0;
2142 		}
2143 	    }
2144 	  break;
2145 
2146 	case AARCH64_OPND_NZCV:
2147 	case AARCH64_OPND_CCMP_IMM:
2148 	case AARCH64_OPND_EXCEPTION:
2149 	case AARCH64_OPND_UNDEFINED:
2150 	case AARCH64_OPND_TME_UIMM16:
2151 	case AARCH64_OPND_UIMM4:
2152 	case AARCH64_OPND_UIMM4_ADDG:
2153 	case AARCH64_OPND_UIMM7:
2154 	case AARCH64_OPND_UIMM3_OP1:
2155 	case AARCH64_OPND_UIMM3_OP2:
2156 	case AARCH64_OPND_SVE_UIMM3:
2157 	case AARCH64_OPND_SVE_UIMM7:
2158 	case AARCH64_OPND_SVE_UIMM8:
2159 	case AARCH64_OPND_SVE_UIMM8_53:
2160 	  size = get_operand_fields_width (get_operand_from_code (type));
2161 	  assert (size < 32);
2162 	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2163 	    {
2164 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2165 					  (1u << size) - 1);
2166 	      return 0;
2167 	    }
2168 	  break;
2169 
2170 	case AARCH64_OPND_UIMM10:
2171 	  /* Scaled unsigned 10 bits immediate offset.  */
2172 	  if (!value_in_range_p (opnd->imm.value, 0, 1008))
2173 	    {
2174 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2175 	      return 0;
2176 	    }
2177 
2178 	  if (!value_aligned_p (opnd->imm.value, 16))
2179 	    {
2180 	      set_unaligned_error (mismatch_detail, idx, 16);
2181 	      return 0;
2182 	    }
2183 	  break;
2184 
2185 	case AARCH64_OPND_SIMM5:
2186 	case AARCH64_OPND_SVE_SIMM5:
2187 	case AARCH64_OPND_SVE_SIMM5B:
2188 	case AARCH64_OPND_SVE_SIMM6:
2189 	case AARCH64_OPND_SVE_SIMM8:
2190 	  size = get_operand_fields_width (get_operand_from_code (type));
2191 	  assert (size < 32);
2192 	  if (!value_fit_signed_field_p (opnd->imm.value, size))
2193 	    {
2194 	      set_imm_out_of_range_error (mismatch_detail, idx,
2195 					  -(1 << (size - 1)),
2196 					  (1 << (size - 1)) - 1);
2197 	      return 0;
2198 	    }
2199 	  break;
2200 
2201 	case AARCH64_OPND_WIDTH:
2202 	  assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2203 		  && opnds[0].type == AARCH64_OPND_Rd);
2204 	  size = get_upper_bound (qualifier);
2205 	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
2206 	    /* lsb+width <= reg.size  */
2207 	    {
2208 	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
2209 					  size - opnds[idx-1].imm.value);
2210 	      return 0;
2211 	    }
2212 	  break;
2213 
2214 	case AARCH64_OPND_LIMM:
2215 	case AARCH64_OPND_SVE_LIMM:
2216 	  {
2217 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2218 	    uint64_t uimm = opnd->imm.value;
2219 	    if (opcode->op == OP_BIC)
2220 	      uimm = ~uimm;
2221 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2222 	      {
2223 		set_other_error (mismatch_detail, idx,
2224 				 _("immediate out of range"));
2225 		return 0;
2226 	      }
2227 	  }
2228 	  break;
2229 
2230 	case AARCH64_OPND_IMM0:
2231 	case AARCH64_OPND_FPIMM0:
2232 	  if (opnd->imm.value != 0)
2233 	    {
2234 	      set_other_error (mismatch_detail, idx,
2235 			       _("immediate zero expected"));
2236 	      return 0;
2237 	    }
2238 	  break;
2239 
2240 	case AARCH64_OPND_IMM_ROT1:
2241 	case AARCH64_OPND_IMM_ROT2:
2242 	case AARCH64_OPND_SVE_IMM_ROT2:
2243 	  if (opnd->imm.value != 0
2244 	      && opnd->imm.value != 90
2245 	      && opnd->imm.value != 180
2246 	      && opnd->imm.value != 270)
2247 	    {
2248 	      set_other_error (mismatch_detail, idx,
2249 			       _("rotate expected to be 0, 90, 180 or 270"));
2250 	      return 0;
2251 	    }
2252 	  break;
2253 
2254 	case AARCH64_OPND_IMM_ROT3:
2255 	case AARCH64_OPND_SVE_IMM_ROT1:
2256 	case AARCH64_OPND_SVE_IMM_ROT3:
2257 	  if (opnd->imm.value != 90 && opnd->imm.value != 270)
2258 	    {
2259 	      set_other_error (mismatch_detail, idx,
2260 			       _("rotate expected to be 90 or 270"));
2261 	      return 0;
2262 	    }
2263 	  break;
2264 
2265 	case AARCH64_OPND_SHLL_IMM:
2266 	  assert (idx == 2);
2267 	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2268 	  if (opnd->imm.value != size)
2269 	    {
2270 	      set_other_error (mismatch_detail, idx,
2271 			       _("invalid shift amount"));
2272 	      return 0;
2273 	    }
2274 	  break;
2275 
2276 	case AARCH64_OPND_IMM_VLSL:
2277 	  size = aarch64_get_qualifier_esize (qualifier);
2278 	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2279 	    {
2280 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2281 					  size * 8 - 1);
2282 	      return 0;
2283 	    }
2284 	  break;
2285 
2286 	case AARCH64_OPND_IMM_VLSR:
2287 	  size = aarch64_get_qualifier_esize (qualifier);
2288 	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2289 	    {
2290 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2291 	      return 0;
2292 	    }
2293 	  break;
2294 
2295 	case AARCH64_OPND_SIMD_IMM:
2296 	case AARCH64_OPND_SIMD_IMM_SFT:
2297 	  /* Qualifier check.  */
2298 	  switch (qualifier)
2299 	    {
2300 	    case AARCH64_OPND_QLF_LSL:
2301 	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
2302 		{
2303 		  set_other_error (mismatch_detail, idx,
2304 				   _("invalid shift operator"));
2305 		  return 0;
2306 		}
2307 	      break;
2308 	    case AARCH64_OPND_QLF_MSL:
2309 	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
2310 		{
2311 		  set_other_error (mismatch_detail, idx,
2312 				   _("invalid shift operator"));
2313 		  return 0;
2314 		}
2315 	      break;
2316 	    case AARCH64_OPND_QLF_NIL:
2317 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2318 		{
2319 		  set_other_error (mismatch_detail, idx,
2320 				   _("shift is not permitted"));
2321 		  return 0;
2322 		}
2323 	      break;
2324 	    default:
2325 	      assert (0);
2326 	      return 0;
2327 	    }
2328 	  /* Is the immediate valid?  */
2329 	  assert (idx == 1);
2330 	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2331 	    {
2332 	      /* uimm8 or simm8 */
2333 	      if (!value_in_range_p (opnd->imm.value, -128, 255))
2334 		{
2335 		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2336 		  return 0;
2337 		}
2338 	    }
2339 	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2340 	    {
2341 	      /* uimm64 is not
2342 		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2343 		 ffffffffgggggggghhhhhhhh'.  */
2344 	      set_other_error (mismatch_detail, idx,
2345 			       _("invalid value for immediate"));
2346 	      return 0;
2347 	    }
2348 	  /* Is the shift amount valid?  */
2349 	  switch (opnd->shifter.kind)
2350 	    {
2351 	    case AARCH64_MOD_LSL:
2352 	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2353 	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2354 		{
2355 		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2356 						     (size - 1) * 8);
2357 		  return 0;
2358 		}
2359 	      if (!value_aligned_p (opnd->shifter.amount, 8))
2360 		{
2361 		  set_unaligned_error (mismatch_detail, idx, 8);
2362 		  return 0;
2363 		}
2364 	      break;
2365 	    case AARCH64_MOD_MSL:
2366 	      /* Only 8 and 16 are valid shift amount.  */
2367 	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2368 		{
2369 		  set_other_error (mismatch_detail, idx,
2370 				   _("shift amount must be 0 or 16"));
2371 		  return 0;
2372 		}
2373 	      break;
2374 	    default:
2375 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2376 		{
2377 		  set_other_error (mismatch_detail, idx,
2378 				   _("invalid shift operator"));
2379 		  return 0;
2380 		}
2381 	      break;
2382 	    }
2383 	  break;
2384 
2385 	case AARCH64_OPND_FPIMM:
2386 	case AARCH64_OPND_SIMD_FPIMM:
2387 	case AARCH64_OPND_SVE_FPIMM8:
2388 	  if (opnd->imm.is_fp == 0)
2389 	    {
2390 	      set_other_error (mismatch_detail, idx,
2391 			       _("floating-point immediate expected"));
2392 	      return 0;
2393 	    }
2394 	  /* The value is expected to be an 8-bit floating-point constant with
2395 	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
2396 	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2397 	     instruction).  */
2398 	  if (!value_in_range_p (opnd->imm.value, 0, 255))
2399 	    {
2400 	      set_other_error (mismatch_detail, idx,
2401 			       _("immediate out of range"));
2402 	      return 0;
2403 	    }
2404 	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
2405 	    {
2406 	      set_other_error (mismatch_detail, idx,
2407 			       _("invalid shift operator"));
2408 	      return 0;
2409 	    }
2410 	  break;
2411 
2412 	case AARCH64_OPND_SVE_AIMM:
2413 	  min_value = 0;
2414 	sve_aimm:
2415 	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2416 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2417 	  mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2418 	  uvalue = opnd->imm.value;
2419 	  shift = opnd->shifter.amount;
2420 	  if (size == 1)
2421 	    {
2422 	      if (shift != 0)
2423 		{
2424 		  set_other_error (mismatch_detail, idx,
2425 				   _("no shift amount allowed for"
2426 				     " 8-bit constants"));
2427 		  return 0;
2428 		}
2429 	    }
2430 	  else
2431 	    {
2432 	      if (shift != 0 && shift != 8)
2433 		{
2434 		  set_other_error (mismatch_detail, idx,
2435 				   _("shift amount must be 0 or 8"));
2436 		  return 0;
2437 		}
2438 	      if (shift == 0 && (uvalue & 0xff) == 0)
2439 		{
2440 		  shift = 8;
2441 		  uvalue = (int64_t) uvalue / 256;
2442 		}
2443 	    }
2444 	  mask >>= shift;
2445 	  if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2446 	    {
2447 	      set_other_error (mismatch_detail, idx,
2448 			       _("immediate too big for element size"));
2449 	      return 0;
2450 	    }
2451 	  uvalue = (uvalue - min_value) & mask;
2452 	  if (uvalue > 0xff)
2453 	    {
2454 	      set_other_error (mismatch_detail, idx,
2455 			       _("invalid arithmetic immediate"));
2456 	      return 0;
2457 	    }
2458 	  break;
2459 
2460 	case AARCH64_OPND_SVE_ASIMM:
2461 	  min_value = -128;
2462 	  goto sve_aimm;
2463 
2464 	case AARCH64_OPND_SVE_I1_HALF_ONE:
2465 	  assert (opnd->imm.is_fp);
2466 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2467 	    {
2468 	      set_other_error (mismatch_detail, idx,
2469 			       _("floating-point value must be 0.5 or 1.0"));
2470 	      return 0;
2471 	    }
2472 	  break;
2473 
2474 	case AARCH64_OPND_SVE_I1_HALF_TWO:
2475 	  assert (opnd->imm.is_fp);
2476 	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2477 	    {
2478 	      set_other_error (mismatch_detail, idx,
2479 			       _("floating-point value must be 0.5 or 2.0"));
2480 	      return 0;
2481 	    }
2482 	  break;
2483 
2484 	case AARCH64_OPND_SVE_I1_ZERO_ONE:
2485 	  assert (opnd->imm.is_fp);
2486 	  if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2487 	    {
2488 	      set_other_error (mismatch_detail, idx,
2489 			       _("floating-point value must be 0.0 or 1.0"));
2490 	      return 0;
2491 	    }
2492 	  break;
2493 
2494 	case AARCH64_OPND_SVE_INV_LIMM:
2495 	  {
2496 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2497 	    uint64_t uimm = ~opnd->imm.value;
2498 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2499 	      {
2500 		set_other_error (mismatch_detail, idx,
2501 				 _("immediate out of range"));
2502 		return 0;
2503 	      }
2504 	  }
2505 	  break;
2506 
2507 	case AARCH64_OPND_SVE_LIMM_MOV:
2508 	  {
2509 	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2510 	    uint64_t uimm = opnd->imm.value;
2511 	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2512 	      {
2513 		set_other_error (mismatch_detail, idx,
2514 				 _("immediate out of range"));
2515 		return 0;
2516 	      }
2517 	    if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2518 	      {
2519 		set_other_error (mismatch_detail, idx,
2520 				 _("invalid replicated MOV immediate"));
2521 		return 0;
2522 	      }
2523 	  }
2524 	  break;
2525 
2526 	case AARCH64_OPND_SVE_PATTERN_SCALED:
2527 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2528 	  if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2529 	    {
2530 	      set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2531 	      return 0;
2532 	    }
2533 	  break;
2534 
2535 	case AARCH64_OPND_SVE_SHLIMM_PRED:
2536 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2537 	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2538 	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2539 	  if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2540 	    {
2541 	      set_imm_out_of_range_error (mismatch_detail, idx,
2542 					  0, 8 * size - 1);
2543 	      return 0;
2544 	    }
2545 	  break;
2546 
2547 	case AARCH64_OPND_SVE_SHRIMM_PRED:
2548 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2549 	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2550 	  num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2551 	  size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2552 	  if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2553 	    {
2554 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2555 	      return 0;
2556 	    }
2557 	  break;
2558 
2559 	default:
2560 	  break;
2561 	}
2562       break;
2563 
2564     case AARCH64_OPND_CLASS_SYSTEM:
2565       switch (type)
2566 	{
2567 	case AARCH64_OPND_PSTATEFIELD:
2568 	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2569 	  /* MSR UAO, #uimm4
2570 	     MSR PAN, #uimm4
2571 	     MSR SSBS,#uimm4
2572 	     The immediate must be #0 or #1.  */
2573 	  if ((opnd->pstatefield == 0x03	/* UAO.  */
2574 	       || opnd->pstatefield == 0x04	/* PAN.  */
2575 	       || opnd->pstatefield == 0x19     /* SSBS.  */
2576 	       || opnd->pstatefield == 0x1a)	/* DIT.  */
2577 	      && opnds[1].imm.value > 1)
2578 	    {
2579 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2580 	      return 0;
2581 	    }
2582 	  /* MSR SPSel, #uimm4
2583 	     Uses uimm4 as a control value to select the stack pointer: if
2584 	     bit 0 is set it selects the current exception level's stack
2585 	     pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2586 	     Bits 1 to 3 of uimm4 are reserved and should be zero.  */
2587 	  if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2588 	    {
2589 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2590 	      return 0;
2591 	    }
2592 	  break;
2593 	default:
2594 	  break;
2595 	}
2596       break;
2597 
2598     case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2599       /* Get the upper bound for the element index.  */
2600       if (opcode->op == OP_FCMLA_ELEM)
2601 	/* FCMLA index range depends on the vector size of other operands
2602 	   and is halfed because complex numbers take two elements.  */
2603 	num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2604 	      * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2605       else
2606 	num = 16;
2607       num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2608       assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2609 
2610       /* Index out-of-range.  */
2611       if (!value_in_range_p (opnd->reglane.index, 0, num))
2612 	{
2613 	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2614 	  return 0;
2615 	}
2616       /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2617 	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
2618 	 number is encoded in "size:M:Rm":
2619 	 size	<Vm>
2620 	 00		RESERVED
2621 	 01		0:Rm
2622 	 10		M:Rm
2623 	 11		RESERVED  */
2624       if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2625 	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
2626 	{
2627 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2628 	  return 0;
2629 	}
2630       break;
2631 
2632     case AARCH64_OPND_CLASS_MODIFIED_REG:
2633       assert (idx == 1 || idx == 2);
2634       switch (type)
2635 	{
2636 	case AARCH64_OPND_Rm_EXT:
2637 	  if (!aarch64_extend_operator_p (opnd->shifter.kind)
2638 	      && opnd->shifter.kind != AARCH64_MOD_LSL)
2639 	    {
2640 	      set_other_error (mismatch_detail, idx,
2641 			       _("extend operator expected"));
2642 	      return 0;
2643 	    }
2644 	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2645 	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
2646 	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
2647 	     case.  */
2648 	  if (!aarch64_stack_pointer_p (opnds + 0)
2649 	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2650 	    {
2651 	      if (!opnd->shifter.operator_present)
2652 		{
2653 		  set_other_error (mismatch_detail, idx,
2654 				   _("missing extend operator"));
2655 		  return 0;
2656 		}
2657 	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2658 		{
2659 		  set_other_error (mismatch_detail, idx,
2660 				   _("'LSL' operator not allowed"));
2661 		  return 0;
2662 		}
2663 	    }
2664 	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
2665 		  || opnd->shifter.kind == AARCH64_MOD_LSL);
2666 	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2667 	    {
2668 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2669 	      return 0;
2670 	    }
2671 	  /* In the 64-bit form, the final register operand is written as Wm
2672 	     for all but the (possibly omitted) UXTX/LSL and SXTX
2673 	     operators.
2674 	     N.B. GAS allows X register to be used with any operator as a
2675 	     programming convenience.  */
2676 	  if (qualifier == AARCH64_OPND_QLF_X
2677 	      && opnd->shifter.kind != AARCH64_MOD_LSL
2678 	      && opnd->shifter.kind != AARCH64_MOD_UXTX
2679 	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
2680 	    {
2681 	      set_other_error (mismatch_detail, idx, _("W register expected"));
2682 	      return 0;
2683 	    }
2684 	  break;
2685 
2686 	case AARCH64_OPND_Rm_SFT:
2687 	  /* ROR is not available to the shifted register operand in
2688 	     arithmetic instructions.  */
2689 	  if (!aarch64_shift_operator_p (opnd->shifter.kind))
2690 	    {
2691 	      set_other_error (mismatch_detail, idx,
2692 			       _("shift operator expected"));
2693 	      return 0;
2694 	    }
2695 	  if (opnd->shifter.kind == AARCH64_MOD_ROR
2696 	      && opcode->iclass != log_shift)
2697 	    {
2698 	      set_other_error (mismatch_detail, idx,
2699 			       _("'ROR' operator not allowed"));
2700 	      return 0;
2701 	    }
2702 	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2703 	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
2704 	    {
2705 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2706 	      return 0;
2707 	    }
2708 	  break;
2709 
2710 	default:
2711 	  break;
2712 	}
2713       break;
2714 
2715     default:
2716       break;
2717     }
2718 
2719   return 1;
2720 }
2721 
2722 /* Main entrypoint for the operand constraint checking.
2723 
2724    Return 1 if operands of *INST meet the constraint applied by the operand
2725    codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2726    not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
2727    adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2728    with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2729    error kind when it is notified that an instruction does not pass the check).
2730 
2731    Un-determined operand qualifiers may get established during the process.  */
2732 
2733 int
2734 aarch64_match_operands_constraint (aarch64_inst *inst,
2735 				   aarch64_operand_error *mismatch_detail)
2736 {
2737   int i;
2738 
2739   DEBUG_TRACE ("enter");
2740 
2741   /* Check for cases where a source register needs to be the same as the
2742      destination register.  Do this before matching qualifiers since if
2743      an instruction has both invalid tying and invalid qualifiers,
2744      the error about qualifiers would suggest several alternative
2745      instructions that also have invalid tying.  */
2746   i = inst->opcode->tied_operand;
2747   if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2748     {
2749       if (mismatch_detail)
2750 	{
2751 	  mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2752 	  mismatch_detail->index = i;
2753 	  mismatch_detail->error = NULL;
2754 	}
2755       return 0;
2756     }
2757 
2758   /* Match operands' qualifier.
2759      *INST has already had qualifier establish for some, if not all, of
2760      its operands; we need to find out whether these established
2761      qualifiers match one of the qualifier sequence in
2762      INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
2763      with the corresponding qualifier in such a sequence.
2764      Only basic operand constraint checking is done here; the more thorough
2765      constraint checking will carried out by operand_general_constraint_met_p,
2766      which has be to called after this in order to get all of the operands'
2767      qualifiers established.  */
2768   if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2769     {
2770       DEBUG_TRACE ("FAIL on operand qualifier matching");
2771       if (mismatch_detail)
2772 	{
2773 	  /* Return an error type to indicate that it is the qualifier
2774 	     matching failure; we don't care about which operand as there
2775 	     are enough information in the opcode table to reproduce it.  */
2776 	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2777 	  mismatch_detail->index = -1;
2778 	  mismatch_detail->error = NULL;
2779 	}
2780       return 0;
2781     }
2782 
2783   /* Match operands' constraint.  */
2784   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2785     {
2786       enum aarch64_opnd type = inst->opcode->operands[i];
2787       if (type == AARCH64_OPND_NIL)
2788 	break;
2789       if (inst->operands[i].skip)
2790 	{
2791 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
2792 	  continue;
2793 	}
2794       if (operand_general_constraint_met_p (inst->operands, i, type,
2795 					    inst->opcode, mismatch_detail) == 0)
2796 	{
2797 	  DEBUG_TRACE ("FAIL on operand %d", i);
2798 	  return 0;
2799 	}
2800     }
2801 
2802   DEBUG_TRACE ("PASS");
2803 
2804   return 1;
2805 }
2806 
2807 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2808    Also updates the TYPE of each INST->OPERANDS with the corresponding
2809    value of OPCODE->OPERANDS.
2810 
2811    Note that some operand qualifiers may need to be manually cleared by
2812    the caller before it further calls the aarch64_opcode_encode; by
2813    doing this, it helps the qualifier matching facilities work
2814    properly.  */
2815 
2816 const aarch64_opcode*
2817 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2818 {
2819   int i;
2820   const aarch64_opcode *old = inst->opcode;
2821 
2822   inst->opcode = opcode;
2823 
2824   /* Update the operand types.  */
2825   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2826     {
2827       inst->operands[i].type = opcode->operands[i];
2828       if (opcode->operands[i] == AARCH64_OPND_NIL)
2829 	break;
2830     }
2831 
2832   DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2833 
2834   return old;
2835 }
2836 
2837 int
2838 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2839 {
2840   int i;
2841   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2842     if (operands[i] == operand)
2843       return i;
2844     else if (operands[i] == AARCH64_OPND_NIL)
2845       break;
2846   return -1;
2847 }
2848 
2849 /* R0...R30, followed by FOR31.  */
2850 #define BANK(R, FOR31) \
2851   { R  (0), R  (1), R  (2), R  (3), R  (4), R  (5), R  (6), R  (7), \
2852     R  (8), R  (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2853     R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2854     R (24), R (25), R (26), R (27), R (28), R (29), R (30),  FOR31 }
2855 /* [0][0]  32-bit integer regs with sp   Wn
2856    [0][1]  64-bit integer regs with sp   Xn  sf=1
2857    [1][0]  32-bit integer regs with #0   Wn
2858    [1][1]  64-bit integer regs with #0   Xn  sf=1 */
2859 static const char *int_reg[2][2][32] = {
2860 #define R32(X) "w" #X
2861 #define R64(X) "x" #X
2862   { BANK (R32, "wsp"), BANK (R64, "sp") },
2863   { BANK (R32, "wzr"), BANK (R64, "xzr") }
2864 #undef R64
2865 #undef R32
2866 };
2867 
2868 /* Names of the SVE vector registers, first with .S suffixes,
2869    then with .D suffixes.  */
2870 
2871 static const char *sve_reg[2][32] = {
2872 #define ZS(X) "z" #X ".s"
2873 #define ZD(X) "z" #X ".d"
2874   BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2875 #undef ZD
2876 #undef ZS
2877 };
2878 #undef BANK
2879 
2880 /* Return the integer register name.
2881    if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
2882 
2883 static inline const char *
2884 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2885 {
2886   const int has_zr = sp_reg_p ? 0 : 1;
2887   const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2888   return int_reg[has_zr][is_64][regno];
2889 }
2890 
2891 /* Like get_int_reg_name, but IS_64 is always 1.  */
2892 
2893 static inline const char *
2894 get_64bit_int_reg_name (int regno, int sp_reg_p)
2895 {
2896   const int has_zr = sp_reg_p ? 0 : 1;
2897   return int_reg[has_zr][1][regno];
2898 }
2899 
2900 /* Get the name of the integer offset register in OPND, using the shift type
2901    to decide whether it's a word or doubleword.  */
2902 
2903 static inline const char *
2904 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2905 {
2906   switch (opnd->shifter.kind)
2907     {
2908     case AARCH64_MOD_UXTW:
2909     case AARCH64_MOD_SXTW:
2910       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2911 
2912     case AARCH64_MOD_LSL:
2913     case AARCH64_MOD_SXTX:
2914       return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2915 
2916     default:
2917       abort ();
2918     }
2919 }
2920 
2921 /* Get the name of the SVE vector offset register in OPND, using the operand
2922    qualifier to decide whether the suffix should be .S or .D.  */
2923 
2924 static inline const char *
2925 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2926 {
2927   assert (qualifier == AARCH64_OPND_QLF_S_S
2928 	  || qualifier == AARCH64_OPND_QLF_S_D);
2929   return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2930 }
2931 
2932 /* Types for expanding an encoded 8-bit value to a floating-point value.  */
2933 
2934 typedef union
2935 {
2936   uint64_t i;
2937   double   d;
2938 } double_conv_t;
2939 
2940 typedef union
2941 {
2942   uint32_t i;
2943   float    f;
2944 } single_conv_t;
2945 
2946 typedef union
2947 {
2948   uint32_t i;
2949   float    f;
2950 } half_conv_t;
2951 
2952 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2953    normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2954    (depending on the type of the instruction).  IMM8 will be expanded to a
2955    single-precision floating-point value (SIZE == 4) or a double-precision
2956    floating-point value (SIZE == 8).  A half-precision floating-point value
2957    (SIZE == 2) is expanded to a single-precision floating-point value.  The
2958    expanded value is returned.  */
2959 
2960 static uint64_t
2961 expand_fp_imm (int size, uint32_t imm8)
2962 {
2963   uint64_t imm = 0;
2964   uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2965 
2966   imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
2967   imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
2968   imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
2969   imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2970     | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
2971   if (size == 8)
2972     {
2973       imm = (imm8_7 << (63-32))		/* imm8<7>  */
2974 	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
2975 	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2976 	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2977 	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
2978       imm <<= 32;
2979     }
2980   else if (size == 4 || size == 2)
2981     {
2982       imm = (imm8_7 << 31)	/* imm8<7>              */
2983 	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
2984 	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
2985 	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
2986     }
2987   else
2988     {
2989       /* An unsupported size.  */
2990       assert (0);
2991     }
2992 
2993   return imm;
2994 }
2995 
2996 /* Produce the string representation of the register list operand *OPND
2997    in the buffer pointed by BUF of size SIZE.  PREFIX is the part of
2998    the register name that comes before the register number, such as "v".  */
2999 static void
3000 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3001 		     const char *prefix)
3002 {
3003   const int num_regs = opnd->reglist.num_regs;
3004   const int first_reg = opnd->reglist.first_regno;
3005   const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3006   const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3007   char tb[8];	/* Temporary buffer.  */
3008 
3009   assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3010   assert (num_regs >= 1 && num_regs <= 4);
3011 
3012   /* Prepare the index if any.  */
3013   if (opnd->reglist.has_index)
3014     /* PR 21096: The %100 is to silence a warning about possible truncation.  */
3015     snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3016   else
3017     tb[0] = '\0';
3018 
3019   /* The hyphenated form is preferred for disassembly if there are
3020      more than two registers in the list, and the register numbers
3021      are monotonically increasing in increments of one.  */
3022   if (num_regs > 2 && last_reg > first_reg)
3023     snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3024 	      prefix, last_reg, qlf_name, tb);
3025   else
3026     {
3027       const int reg0 = first_reg;
3028       const int reg1 = (first_reg + 1) & 0x1f;
3029       const int reg2 = (first_reg + 2) & 0x1f;
3030       const int reg3 = (first_reg + 3) & 0x1f;
3031 
3032       switch (num_regs)
3033 	{
3034 	case 1:
3035 	  snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3036 	  break;
3037 	case 2:
3038 	  snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3039 		    prefix, reg1, qlf_name, tb);
3040 	  break;
3041 	case 3:
3042 	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3043 		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3044 		    prefix, reg2, qlf_name, tb);
3045 	  break;
3046 	case 4:
3047 	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3048 		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3049 		    prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3050 	  break;
3051 	}
3052     }
3053 }
3054 
3055 /* Print the register+immediate address in OPND to BUF, which has SIZE
3056    characters.  BASE is the name of the base register.  */
3057 
3058 static void
3059 print_immediate_offset_address (char *buf, size_t size,
3060 				const aarch64_opnd_info *opnd,
3061 				const char *base)
3062 {
3063   if (opnd->addr.writeback)
3064     {
3065       if (opnd->addr.preind)
3066         {
3067 	  if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3068             snprintf (buf, size, "[%s]!", base);
3069           else
3070 	    snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3071         }
3072       else
3073 	snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3074     }
3075   else
3076     {
3077       if (opnd->shifter.operator_present)
3078 	{
3079 	  assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3080 	  snprintf (buf, size, "[%s, #%d, mul vl]",
3081 		    base, opnd->addr.offset.imm);
3082 	}
3083       else if (opnd->addr.offset.imm)
3084 	snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3085       else
3086 	snprintf (buf, size, "[%s]", base);
3087     }
3088 }
3089 
3090 /* Produce the string representation of the register offset address operand
3091    *OPND in the buffer pointed by BUF of size SIZE.  BASE and OFFSET are
3092    the names of the base and offset registers.  */
3093 static void
3094 print_register_offset_address (char *buf, size_t size,
3095 			       const aarch64_opnd_info *opnd,
3096 			       const char *base, const char *offset)
3097 {
3098   char tb[16];			/* Temporary buffer.  */
3099   bfd_boolean print_extend_p = TRUE;
3100   bfd_boolean print_amount_p = TRUE;
3101   const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3102 
3103   if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3104 				|| !opnd->shifter.amount_present))
3105     {
3106       /* Not print the shift/extend amount when the amount is zero and
3107          when it is not the special case of 8-bit load/store instruction.  */
3108       print_amount_p = FALSE;
3109       /* Likewise, no need to print the shift operator LSL in such a
3110 	 situation.  */
3111       if (opnd->shifter.kind == AARCH64_MOD_LSL)
3112 	print_extend_p = FALSE;
3113     }
3114 
3115   /* Prepare for the extend/shift.  */
3116   if (print_extend_p)
3117     {
3118       if (print_amount_p)
3119 	snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3120   /* PR 21096: The %100 is to silence a warning about possible truncation.  */
3121 		  (opnd->shifter.amount % 100));
3122       else
3123 	snprintf (tb, sizeof (tb), ", %s", shift_name);
3124     }
3125   else
3126     tb[0] = '\0';
3127 
3128   snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3129 }
3130 
3131 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3132    in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
3133    PC, PCREL_P and ADDRESS are used to pass in and return information about
3134    the PC-relative address calculation, where the PC value is passed in
3135    PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3136    will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3137    calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3138 
3139    The function serves both the disassembler and the assembler diagnostics
3140    issuer, which is the reason why it lives in this file.  */
3141 
3142 void
3143 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3144 		       const aarch64_opcode *opcode,
3145 		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3146 		       bfd_vma *address, char** notes,
3147 		       aarch64_feature_set features)
3148 {
3149   unsigned int i, num_conds;
3150   const char *name = NULL;
3151   const aarch64_opnd_info *opnd = opnds + idx;
3152   enum aarch64_modifier_kind kind;
3153   uint64_t addr, enum_value;
3154 
3155   buf[0] = '\0';
3156   if (pcrel_p)
3157     *pcrel_p = 0;
3158 
3159   switch (opnd->type)
3160     {
3161     case AARCH64_OPND_Rd:
3162     case AARCH64_OPND_Rn:
3163     case AARCH64_OPND_Rm:
3164     case AARCH64_OPND_Rt:
3165     case AARCH64_OPND_Rt2:
3166     case AARCH64_OPND_Rs:
3167     case AARCH64_OPND_Ra:
3168     case AARCH64_OPND_Rt_SYS:
3169     case AARCH64_OPND_PAIRREG:
3170     case AARCH64_OPND_SVE_Rm:
3171       /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3172 	 the <ic_op>, therefore we use opnd->present to override the
3173 	 generic optional-ness information.  */
3174       if (opnd->type == AARCH64_OPND_Rt_SYS)
3175 	{
3176 	  if (!opnd->present)
3177 	    break;
3178 	}
3179       /* Omit the operand, e.g. RET.  */
3180       else if (optional_operand_p (opcode, idx)
3181 	       && (opnd->reg.regno
3182 		   == get_optional_operand_default_value (opcode)))
3183 	break;
3184       assert (opnd->qualifier == AARCH64_OPND_QLF_W
3185 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3186       snprintf (buf, size, "%s",
3187 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3188       break;
3189 
3190     case AARCH64_OPND_Rd_SP:
3191     case AARCH64_OPND_Rn_SP:
3192     case AARCH64_OPND_Rt_SP:
3193     case AARCH64_OPND_SVE_Rn_SP:
3194     case AARCH64_OPND_Rm_SP:
3195       assert (opnd->qualifier == AARCH64_OPND_QLF_W
3196 	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
3197 	      || opnd->qualifier == AARCH64_OPND_QLF_X
3198 	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
3199       snprintf (buf, size, "%s",
3200 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3201       break;
3202 
3203     case AARCH64_OPND_Rm_EXT:
3204       kind = opnd->shifter.kind;
3205       assert (idx == 1 || idx == 2);
3206       if ((aarch64_stack_pointer_p (opnds)
3207 	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3208 	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
3209 	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
3210 	       && kind == AARCH64_MOD_UXTW)
3211 	      || (opnd->qualifier == AARCH64_OPND_QLF_X
3212 		  && kind == AARCH64_MOD_UXTX)))
3213 	{
3214 	  /* 'LSL' is the preferred form in this case.  */
3215 	  kind = AARCH64_MOD_LSL;
3216 	  if (opnd->shifter.amount == 0)
3217 	    {
3218 	      /* Shifter omitted.  */
3219 	      snprintf (buf, size, "%s",
3220 			get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3221 	      break;
3222 	    }
3223 	}
3224       if (opnd->shifter.amount)
3225 	snprintf (buf, size, "%s, %s #%" PRIi64,
3226 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3227 		  aarch64_operand_modifiers[kind].name,
3228 		  opnd->shifter.amount);
3229       else
3230 	snprintf (buf, size, "%s, %s",
3231 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3232 		  aarch64_operand_modifiers[kind].name);
3233       break;
3234 
3235     case AARCH64_OPND_Rm_SFT:
3236       assert (opnd->qualifier == AARCH64_OPND_QLF_W
3237 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3238       if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3239 	snprintf (buf, size, "%s",
3240 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3241       else
3242 	snprintf (buf, size, "%s, %s #%" PRIi64,
3243 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3244 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
3245 		  opnd->shifter.amount);
3246       break;
3247 
3248     case AARCH64_OPND_Fd:
3249     case AARCH64_OPND_Fn:
3250     case AARCH64_OPND_Fm:
3251     case AARCH64_OPND_Fa:
3252     case AARCH64_OPND_Ft:
3253     case AARCH64_OPND_Ft2:
3254     case AARCH64_OPND_Sd:
3255     case AARCH64_OPND_Sn:
3256     case AARCH64_OPND_Sm:
3257     case AARCH64_OPND_SVE_VZn:
3258     case AARCH64_OPND_SVE_Vd:
3259     case AARCH64_OPND_SVE_Vm:
3260     case AARCH64_OPND_SVE_Vn:
3261       snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3262 		opnd->reg.regno);
3263       break;
3264 
3265     case AARCH64_OPND_Va:
3266     case AARCH64_OPND_Vd:
3267     case AARCH64_OPND_Vn:
3268     case AARCH64_OPND_Vm:
3269       snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3270 		aarch64_get_qualifier_name (opnd->qualifier));
3271       break;
3272 
3273     case AARCH64_OPND_Ed:
3274     case AARCH64_OPND_En:
3275     case AARCH64_OPND_Em:
3276     case AARCH64_OPND_Em16:
3277     case AARCH64_OPND_SM3_IMM2:
3278       snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3279 		aarch64_get_qualifier_name (opnd->qualifier),
3280 		opnd->reglane.index);
3281       break;
3282 
3283     case AARCH64_OPND_VdD1:
3284     case AARCH64_OPND_VnD1:
3285       snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3286       break;
3287 
3288     case AARCH64_OPND_LVn:
3289     case AARCH64_OPND_LVt:
3290     case AARCH64_OPND_LVt_AL:
3291     case AARCH64_OPND_LEt:
3292       print_register_list (buf, size, opnd, "v");
3293       break;
3294 
3295     case AARCH64_OPND_SVE_Pd:
3296     case AARCH64_OPND_SVE_Pg3:
3297     case AARCH64_OPND_SVE_Pg4_5:
3298     case AARCH64_OPND_SVE_Pg4_10:
3299     case AARCH64_OPND_SVE_Pg4_16:
3300     case AARCH64_OPND_SVE_Pm:
3301     case AARCH64_OPND_SVE_Pn:
3302     case AARCH64_OPND_SVE_Pt:
3303       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3304 	snprintf (buf, size, "p%d", opnd->reg.regno);
3305       else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3306 	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3307 	snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3308 		  aarch64_get_qualifier_name (opnd->qualifier));
3309       else
3310 	snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3311 		  aarch64_get_qualifier_name (opnd->qualifier));
3312       break;
3313 
3314     case AARCH64_OPND_SVE_Za_5:
3315     case AARCH64_OPND_SVE_Za_16:
3316     case AARCH64_OPND_SVE_Zd:
3317     case AARCH64_OPND_SVE_Zm_5:
3318     case AARCH64_OPND_SVE_Zm_16:
3319     case AARCH64_OPND_SVE_Zn:
3320     case AARCH64_OPND_SVE_Zt:
3321       if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3322 	snprintf (buf, size, "z%d", opnd->reg.regno);
3323       else
3324 	snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3325 		  aarch64_get_qualifier_name (opnd->qualifier));
3326       break;
3327 
3328     case AARCH64_OPND_SVE_ZnxN:
3329     case AARCH64_OPND_SVE_ZtxN:
3330       print_register_list (buf, size, opnd, "z");
3331       break;
3332 
3333     case AARCH64_OPND_SVE_Zm3_INDEX:
3334     case AARCH64_OPND_SVE_Zm3_22_INDEX:
3335     case AARCH64_OPND_SVE_Zm3_11_INDEX:
3336     case AARCH64_OPND_SVE_Zm4_11_INDEX:
3337     case AARCH64_OPND_SVE_Zm4_INDEX:
3338     case AARCH64_OPND_SVE_Zn_INDEX:
3339       snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3340 		aarch64_get_qualifier_name (opnd->qualifier),
3341 		opnd->reglane.index);
3342       break;
3343 
3344     case AARCH64_OPND_CRn:
3345     case AARCH64_OPND_CRm:
3346       snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3347       break;
3348 
3349     case AARCH64_OPND_IDX:
3350     case AARCH64_OPND_MASK:
3351     case AARCH64_OPND_IMM:
3352     case AARCH64_OPND_IMM_2:
3353     case AARCH64_OPND_WIDTH:
3354     case AARCH64_OPND_UIMM3_OP1:
3355     case AARCH64_OPND_UIMM3_OP2:
3356     case AARCH64_OPND_BIT_NUM:
3357     case AARCH64_OPND_IMM_VLSL:
3358     case AARCH64_OPND_IMM_VLSR:
3359     case AARCH64_OPND_SHLL_IMM:
3360     case AARCH64_OPND_IMM0:
3361     case AARCH64_OPND_IMMR:
3362     case AARCH64_OPND_IMMS:
3363     case AARCH64_OPND_UNDEFINED:
3364     case AARCH64_OPND_FBITS:
3365     case AARCH64_OPND_TME_UIMM16:
3366     case AARCH64_OPND_SIMM5:
3367     case AARCH64_OPND_SVE_SHLIMM_PRED:
3368     case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3369     case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3370     case AARCH64_OPND_SVE_SHRIMM_PRED:
3371     case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3372     case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3373     case AARCH64_OPND_SVE_SIMM5:
3374     case AARCH64_OPND_SVE_SIMM5B:
3375     case AARCH64_OPND_SVE_SIMM6:
3376     case AARCH64_OPND_SVE_SIMM8:
3377     case AARCH64_OPND_SVE_UIMM3:
3378     case AARCH64_OPND_SVE_UIMM7:
3379     case AARCH64_OPND_SVE_UIMM8:
3380     case AARCH64_OPND_SVE_UIMM8_53:
3381     case AARCH64_OPND_IMM_ROT1:
3382     case AARCH64_OPND_IMM_ROT2:
3383     case AARCH64_OPND_IMM_ROT3:
3384     case AARCH64_OPND_SVE_IMM_ROT1:
3385     case AARCH64_OPND_SVE_IMM_ROT2:
3386     case AARCH64_OPND_SVE_IMM_ROT3:
3387       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3388       break;
3389 
3390     case AARCH64_OPND_SVE_I1_HALF_ONE:
3391     case AARCH64_OPND_SVE_I1_HALF_TWO:
3392     case AARCH64_OPND_SVE_I1_ZERO_ONE:
3393       {
3394 	single_conv_t c;
3395 	c.i = opnd->imm.value;
3396 	snprintf (buf, size, "#%.1f", c.f);
3397 	break;
3398       }
3399 
3400     case AARCH64_OPND_SVE_PATTERN:
3401       if (optional_operand_p (opcode, idx)
3402 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3403 	break;
3404       enum_value = opnd->imm.value;
3405       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3406       if (aarch64_sve_pattern_array[enum_value])
3407 	snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3408       else
3409 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3410       break;
3411 
3412     case AARCH64_OPND_SVE_PATTERN_SCALED:
3413       if (optional_operand_p (opcode, idx)
3414 	  && !opnd->shifter.operator_present
3415 	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3416 	break;
3417       enum_value = opnd->imm.value;
3418       assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3419       if (aarch64_sve_pattern_array[opnd->imm.value])
3420 	snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3421       else
3422 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3423       if (opnd->shifter.operator_present)
3424 	{
3425 	  size_t len = strlen (buf);
3426 	  snprintf (buf + len, size - len, ", %s #%" PRIi64,
3427 		    aarch64_operand_modifiers[opnd->shifter.kind].name,
3428 		    opnd->shifter.amount);
3429 	}
3430       break;
3431 
3432     case AARCH64_OPND_SVE_PRFOP:
3433       enum_value = opnd->imm.value;
3434       assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3435       if (aarch64_sve_prfop_array[enum_value])
3436 	snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3437       else
3438 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3439       break;
3440 
3441     case AARCH64_OPND_IMM_MOV:
3442       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3443 	{
3444 	case 4:	/* e.g. MOV Wd, #<imm32>.  */
3445 	    {
3446 	      int imm32 = opnd->imm.value;
3447 	      snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3448 	    }
3449 	  break;
3450 	case 8:	/* e.g. MOV Xd, #<imm64>.  */
3451 	  snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3452 		    opnd->imm.value, opnd->imm.value);
3453 	  break;
3454 	default: assert (0);
3455 	}
3456       break;
3457 
3458     case AARCH64_OPND_FPIMM0:
3459       snprintf (buf, size, "#0.0");
3460       break;
3461 
3462     case AARCH64_OPND_LIMM:
3463     case AARCH64_OPND_AIMM:
3464     case AARCH64_OPND_HALF:
3465     case AARCH64_OPND_SVE_INV_LIMM:
3466     case AARCH64_OPND_SVE_LIMM:
3467     case AARCH64_OPND_SVE_LIMM_MOV:
3468       if (opnd->shifter.amount)
3469 	snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3470 		  opnd->shifter.amount);
3471       else
3472 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3473       break;
3474 
3475     case AARCH64_OPND_SIMD_IMM:
3476     case AARCH64_OPND_SIMD_IMM_SFT:
3477       if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3478 	  || opnd->shifter.kind == AARCH64_MOD_NONE)
3479 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3480       else
3481 	snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3482 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
3483 		  opnd->shifter.amount);
3484       break;
3485 
3486     case AARCH64_OPND_SVE_AIMM:
3487     case AARCH64_OPND_SVE_ASIMM:
3488       if (opnd->shifter.amount)
3489 	snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3490 		  opnd->shifter.amount);
3491       else
3492 	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3493       break;
3494 
3495     case AARCH64_OPND_FPIMM:
3496     case AARCH64_OPND_SIMD_FPIMM:
3497     case AARCH64_OPND_SVE_FPIMM8:
3498       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3499 	{
3500 	case 2:	/* e.g. FMOV <Hd>, #<imm>.  */
3501 	    {
3502 	      half_conv_t c;
3503 	      c.i = expand_fp_imm (2, opnd->imm.value);
3504 	      snprintf (buf, size,  "#%.18e", c.f);
3505 	    }
3506 	  break;
3507 	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
3508 	    {
3509 	      single_conv_t c;
3510 	      c.i = expand_fp_imm (4, opnd->imm.value);
3511 	      snprintf (buf, size,  "#%.18e", c.f);
3512 	    }
3513 	  break;
3514 	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
3515 	    {
3516 	      double_conv_t c;
3517 	      c.i = expand_fp_imm (8, opnd->imm.value);
3518 	      snprintf (buf, size,  "#%.18e", c.d);
3519 	    }
3520 	  break;
3521 	default: assert (0);
3522 	}
3523       break;
3524 
3525     case AARCH64_OPND_CCMP_IMM:
3526     case AARCH64_OPND_NZCV:
3527     case AARCH64_OPND_EXCEPTION:
3528     case AARCH64_OPND_UIMM4:
3529     case AARCH64_OPND_UIMM4_ADDG:
3530     case AARCH64_OPND_UIMM7:
3531     case AARCH64_OPND_UIMM10:
3532       if (optional_operand_p (opcode, idx) == TRUE
3533 	  && (opnd->imm.value ==
3534 	      (int64_t) get_optional_operand_default_value (opcode)))
3535 	/* Omit the operand, e.g. DCPS1.  */
3536 	break;
3537       snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3538       break;
3539 
3540     case AARCH64_OPND_COND:
3541     case AARCH64_OPND_COND1:
3542       snprintf (buf, size, "%s", opnd->cond->names[0]);
3543       num_conds = ARRAY_SIZE (opnd->cond->names);
3544       for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3545 	{
3546 	  size_t len = strlen (buf);
3547 	  if (i == 1)
3548 	    snprintf (buf + len, size - len, "  // %s = %s",
3549 		      opnd->cond->names[0], opnd->cond->names[i]);
3550 	  else
3551 	    snprintf (buf + len, size - len, ", %s",
3552 		      opnd->cond->names[i]);
3553 	}
3554       break;
3555 
3556     case AARCH64_OPND_ADDR_ADRP:
3557       addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3558 	+ opnd->imm.value;
3559       if (pcrel_p)
3560 	*pcrel_p = 1;
3561       if (address)
3562 	*address = addr;
3563       /* This is not necessary during the disassembling, as print_address_func
3564 	 in the disassemble_info will take care of the printing.  But some
3565 	 other callers may be still interested in getting the string in *STR,
3566 	 so here we do snprintf regardless.  */
3567       snprintf (buf, size, "#0x%" PRIx64, addr);
3568       break;
3569 
3570     case AARCH64_OPND_ADDR_PCREL14:
3571     case AARCH64_OPND_ADDR_PCREL19:
3572     case AARCH64_OPND_ADDR_PCREL21:
3573     case AARCH64_OPND_ADDR_PCREL26:
3574       addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3575       if (pcrel_p)
3576 	*pcrel_p = 1;
3577       if (address)
3578 	*address = addr;
3579       /* This is not necessary during the disassembling, as print_address_func
3580 	 in the disassemble_info will take care of the printing.  But some
3581 	 other callers may be still interested in getting the string in *STR,
3582 	 so here we do snprintf regardless.  */
3583       snprintf (buf, size, "#0x%" PRIx64, addr);
3584       break;
3585 
3586     case AARCH64_OPND_ADDR_SIMPLE:
3587     case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3588     case AARCH64_OPND_SIMD_ADDR_POST:
3589       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3590       if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3591 	{
3592 	  if (opnd->addr.offset.is_reg)
3593 	    snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3594 	  else
3595 	    snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3596 	}
3597       else
3598 	snprintf (buf, size, "[%s]", name);
3599       break;
3600 
3601     case AARCH64_OPND_ADDR_REGOFF:
3602     case AARCH64_OPND_SVE_ADDR_R:
3603     case AARCH64_OPND_SVE_ADDR_RR:
3604     case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3605     case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3606     case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3607     case AARCH64_OPND_SVE_ADDR_RX:
3608     case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3609     case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3610     case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3611       print_register_offset_address
3612 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3613 	 get_offset_int_reg_name (opnd));
3614       break;
3615 
3616     case AARCH64_OPND_SVE_ADDR_ZX:
3617       print_register_offset_address
3618 	(buf, size, opnd,
3619 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3620 	 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3621       break;
3622 
3623     case AARCH64_OPND_SVE_ADDR_RZ:
3624     case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3625     case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3626     case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3627     case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3628     case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3629     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3630     case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3631     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3632     case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3633     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3634     case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3635       print_register_offset_address
3636 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3637 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3638       break;
3639 
3640     case AARCH64_OPND_ADDR_SIMM7:
3641     case AARCH64_OPND_ADDR_SIMM9:
3642     case AARCH64_OPND_ADDR_SIMM9_2:
3643     case AARCH64_OPND_ADDR_SIMM10:
3644     case AARCH64_OPND_ADDR_SIMM11:
3645     case AARCH64_OPND_ADDR_SIMM13:
3646     case AARCH64_OPND_ADDR_OFFSET:
3647     case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3648     case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3649     case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3650     case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3651     case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3652     case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3653     case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3654     case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3655     case AARCH64_OPND_SVE_ADDR_RI_U6:
3656     case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3657     case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3658     case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3659       print_immediate_offset_address
3660 	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3661       break;
3662 
3663     case AARCH64_OPND_SVE_ADDR_ZI_U5:
3664     case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3665     case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3666     case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3667       print_immediate_offset_address
3668 	(buf, size, opnd,
3669 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3670       break;
3671 
3672     case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3673     case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3674     case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3675       print_register_offset_address
3676 	(buf, size, opnd,
3677 	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3678 	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3679       break;
3680 
3681     case AARCH64_OPND_ADDR_UIMM12:
3682       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3683       if (opnd->addr.offset.imm)
3684 	snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3685       else
3686 	snprintf (buf, size, "[%s]", name);
3687       break;
3688 
3689     case AARCH64_OPND_SYSREG:
3690       for (i = 0; aarch64_sys_regs[i].name; ++i)
3691 	{
3692 	  const aarch64_sys_reg *sr = aarch64_sys_regs + i;
3693 
3694 	  bfd_boolean exact_match
3695 	    = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
3696 	    || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
3697 	    && AARCH64_CPU_HAS_FEATURE (features, sr->features);
3698 
3699 	  /* Try and find an exact match, But if that fails, return the first
3700 	     partial match that was found.  */
3701 	  if (aarch64_sys_regs[i].value == opnd->sysreg.value
3702 	      && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
3703 	      && (name == NULL || exact_match))
3704 	    {
3705 	      name = aarch64_sys_regs[i].name;
3706 	      if (exact_match)
3707 		{
3708 		  if (notes)
3709 		    *notes = NULL;
3710 		  break;
3711 		}
3712 
3713 	      /* If we didn't match exactly, that means the presense of a flag
3714 		 indicates what we didn't want for this instruction.  e.g. If
3715 		 F_REG_READ is there, that means we were looking for a write
3716 		 register.  See aarch64_ext_sysreg.  */
3717 	      if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3718 		*notes = _("reading from a write-only register");
3719 	      else if (aarch64_sys_regs[i].flags & F_REG_READ)
3720 		*notes = _("writing to a read-only register");
3721 	    }
3722 	}
3723 
3724       if (name)
3725 	snprintf (buf, size, "%s", name);
3726       else
3727 	{
3728 	  /* Implementation defined system register.  */
3729 	  unsigned int value = opnd->sysreg.value;
3730 	  snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3731 		    (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3732 		    value & 0x7);
3733 	}
3734       break;
3735 
3736     case AARCH64_OPND_PSTATEFIELD:
3737       for (i = 0; aarch64_pstatefields[i].name; ++i)
3738 	if (aarch64_pstatefields[i].value == opnd->pstatefield)
3739 	  break;
3740       assert (aarch64_pstatefields[i].name);
3741       snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3742       break;
3743 
3744     case AARCH64_OPND_SYSREG_AT:
3745     case AARCH64_OPND_SYSREG_DC:
3746     case AARCH64_OPND_SYSREG_IC:
3747     case AARCH64_OPND_SYSREG_TLBI:
3748     case AARCH64_OPND_SYSREG_SR:
3749       snprintf (buf, size, "%s", opnd->sysins_op->name);
3750       break;
3751 
3752     case AARCH64_OPND_BARRIER:
3753       snprintf (buf, size, "%s", opnd->barrier->name);
3754       break;
3755 
3756     case AARCH64_OPND_BARRIER_ISB:
3757       /* Operand can be omitted, e.g. in DCPS1.  */
3758       if (! optional_operand_p (opcode, idx)
3759 	  || (opnd->barrier->value
3760 	      != get_optional_operand_default_value (opcode)))
3761 	snprintf (buf, size, "#0x%x", opnd->barrier->value);
3762       break;
3763 
3764     case AARCH64_OPND_PRFOP:
3765       if (opnd->prfop->name != NULL)
3766 	snprintf (buf, size, "%s", opnd->prfop->name);
3767       else
3768 	snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3769       break;
3770 
3771     case AARCH64_OPND_BARRIER_PSB:
3772       snprintf (buf, size, "csync");
3773       break;
3774 
3775     case AARCH64_OPND_BTI_TARGET:
3776       if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3777 	snprintf (buf, size, "%s", opnd->hint_option->name);
3778       break;
3779 
3780     default:
3781       assert (0);
3782     }
3783 }
3784 
3785 #define CPENC(op0,op1,crn,crm,op2) \
3786   ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3787   /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3788 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3789   /* for 3.9.10 System Instructions */
3790 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3791 
3792 #define C0  0
3793 #define C1  1
3794 #define C2  2
3795 #define C3  3
3796 #define C4  4
3797 #define C5  5
3798 #define C6  6
3799 #define C7  7
3800 #define C8  8
3801 #define C9  9
3802 #define C10 10
3803 #define C11 11
3804 #define C12 12
3805 #define C13 13
3806 #define C14 14
3807 #define C15 15
3808 
3809 #define SYSREG(name, encoding, flags, features) \
3810   { name, encoding, flags, features }
3811 
3812 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
3813 
3814 #define SR_FEAT(n,e,f,feat) \
3815   SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
3816 
3817 #define SR_FEAT2(n,e,f,fe1,fe2) \
3818   SYSREG ((n), (e), (f) | F_ARCHEXT, \
3819 	  AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
3820 
3821 #define SR_RNG(n,e,f)	 SR_FEAT2(n,e,f,RNG,V8_5)
3822 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
3823 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
3824 
3825 #define SR_V8_A(n,e,f)	  SR_FEAT (n,e,f,V8_A)
3826 #define SR_V8_R(n,e,f)	  SR_FEAT (n,e,f,V8_R)
3827 #define SR_V8_1(n,e,f)	  SR_FEAT (n,e,f,V8_1)
3828 #define SR_V8_2(n,e,f)	  SR_FEAT (n,e,f,V8_2)
3829 #define SR_V8_3(n,e,f)	  SR_FEAT (n,e,f,V8_3)
3830 #define SR_V8_4(n,e,f)	  SR_FEAT (n,e,f,V8_4)
3831 #define SR_V8_4(n,e,f)	  SR_FEAT (n,e,f,V8_4)
3832 #define SR_PAN(n,e,f)	  SR_FEAT (n,e,f,PAN)
3833 #define SR_RAS(n,e,f)	  SR_FEAT (n,e,f,RAS)
3834 #define SR_SSBS(n,e,f)	  SR_FEAT (n,e,f,SSBS)
3835 #define SR_SVE(n,e,f)	  SR_FEAT (n,e,f,SVE)
3836 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
3837 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
3838 #define SR_MEMTAG(n,e,f)  SR_FEAT (n,e,f,MEMTAG)
3839 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
3840 
3841 #define SR_EXPAND_ELx(f,x) \
3842   f (x, 1),  \
3843   f (x, 2),  \
3844   f (x, 3),  \
3845   f (x, 4),  \
3846   f (x, 5),  \
3847   f (x, 6),  \
3848   f (x, 7),  \
3849   f (x, 8),  \
3850   f (x, 9),  \
3851   f (x, 10), \
3852   f (x, 11), \
3853   f (x, 12), \
3854   f (x, 13), \
3855   f (x, 14), \
3856   f (x, 15),
3857 
3858 #define SR_EXPAND_EL12(f) \
3859   SR_EXPAND_ELx (f,1) \
3860   SR_EXPAND_ELx (f,2)
3861 
3862 /* TODO there is one more issues need to be resolved
3863    1. handle cpu-implementation-defined system registers.
3864 
3865    Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
3866    respectively.  If neither of these are set then the register is read-write.  */
3867 const aarch64_sys_reg aarch64_sys_regs [] =
3868 {
3869   SR_CORE ("spsr_el1",		CPEN_ (0,C0,0),		0), /* = spsr_svc.  */
3870   SR_V8_1 ("spsr_el12",		CPEN_ (5,C0,0),		0),
3871   SR_CORE ("elr_el1",		CPEN_ (0,C0,1),		0),
3872   SR_V8_1 ("elr_el12",		CPEN_ (5,C0,1),		0),
3873   SR_CORE ("sp_el0",		CPEN_ (0,C1,0),		0),
3874   SR_CORE ("spsel",		CPEN_ (0,C2,0),		0),
3875   SR_CORE ("daif",		CPEN_ (3,C2,1),		0),
3876   SR_CORE ("currentel",		CPEN_ (0,C2,2),		F_REG_READ),
3877   SR_PAN  ("pan",		CPEN_ (0,C2,3),		0),
3878   SR_V8_2 ("uao",		CPEN_ (0,C2,4),		0),
3879   SR_CORE ("nzcv",		CPEN_ (3,C2,0),		0),
3880   SR_SSBS ("ssbs",		CPEN_ (3,C2,6),		0),
3881   SR_CORE ("fpcr",		CPEN_ (3,C4,0),		0),
3882   SR_CORE ("fpsr",		CPEN_ (3,C4,1),		0),
3883   SR_CORE ("dspsr_el0",		CPEN_ (3,C5,0),		0),
3884   SR_CORE ("dlr_el0",		CPEN_ (3,C5,1),		0),
3885   SR_CORE ("spsr_el2",		CPEN_ (4,C0,0),		0), /* = spsr_hyp.  */
3886   SR_CORE ("elr_el2",		CPEN_ (4,C0,1),		0),
3887   SR_CORE ("sp_el1",		CPEN_ (4,C1,0),		0),
3888   SR_CORE ("spsr_irq",		CPEN_ (4,C3,0),		0),
3889   SR_CORE ("spsr_abt",		CPEN_ (4,C3,1),		0),
3890   SR_CORE ("spsr_und",		CPEN_ (4,C3,2),		0),
3891   SR_CORE ("spsr_fiq",		CPEN_ (4,C3,3),		0),
3892   SR_CORE ("spsr_el3",		CPEN_ (6,C0,0),		0),
3893   SR_CORE ("elr_el3",		CPEN_ (6,C0,1),		0),
3894   SR_CORE ("sp_el2",		CPEN_ (6,C1,0),		0),
3895   SR_CORE ("spsr_svc",		CPEN_ (0,C0,0),		F_DEPRECATED), /* = spsr_el1.  */
3896   SR_CORE ("spsr_hyp",		CPEN_ (4,C0,0),		F_DEPRECATED), /* = spsr_el2.  */
3897   SR_CORE ("midr_el1",		CPENC (3,0,C0,C0,0),	F_REG_READ),
3898   SR_CORE ("ctr_el0",		CPENC (3,3,C0,C0,1),	F_REG_READ),
3899   SR_CORE ("mpidr_el1",		CPENC (3,0,C0,C0,5),	F_REG_READ),
3900   SR_CORE ("revidr_el1",	CPENC (3,0,C0,C0,6),	F_REG_READ),
3901   SR_CORE ("aidr_el1",		CPENC (3,1,C0,C0,7),	F_REG_READ),
3902   SR_CORE ("dczid_el0",		CPENC (3,3,C0,C0,7),	F_REG_READ),
3903   SR_CORE ("id_dfr0_el1",	CPENC (3,0,C0,C1,2),	F_REG_READ),
3904   SR_CORE ("id_pfr0_el1",	CPENC (3,0,C0,C1,0),	F_REG_READ),
3905   SR_CORE ("id_pfr1_el1",	CPENC (3,0,C0,C1,1),	F_REG_READ),
3906   SR_ID_PFR2 ("id_pfr2_el1",	CPENC (3,0,C0,C3,4),	F_REG_READ),
3907   SR_CORE ("id_afr0_el1",	CPENC (3,0,C0,C1,3),	F_REG_READ),
3908   SR_CORE ("id_mmfr0_el1",	CPENC (3,0,C0,C1,4),	F_REG_READ),
3909   SR_CORE ("id_mmfr1_el1",	CPENC (3,0,C0,C1,5),	F_REG_READ),
3910   SR_CORE ("id_mmfr2_el1",	CPENC (3,0,C0,C1,6),	F_REG_READ),
3911   SR_CORE ("id_mmfr3_el1",	CPENC (3,0,C0,C1,7),	F_REG_READ),
3912   SR_CORE ("id_mmfr4_el1",	CPENC (3,0,C0,C2,6),	F_REG_READ),
3913   SR_CORE ("id_isar0_el1",	CPENC (3,0,C0,C2,0),	F_REG_READ),
3914   SR_CORE ("id_isar1_el1",	CPENC (3,0,C0,C2,1),	F_REG_READ),
3915   SR_CORE ("id_isar2_el1",	CPENC (3,0,C0,C2,2),	F_REG_READ),
3916   SR_CORE ("id_isar3_el1",	CPENC (3,0,C0,C2,3),	F_REG_READ),
3917   SR_CORE ("id_isar4_el1",	CPENC (3,0,C0,C2,4),	F_REG_READ),
3918   SR_CORE ("id_isar5_el1",	CPENC (3,0,C0,C2,5),	F_REG_READ),
3919   SR_CORE ("mvfr0_el1",		CPENC (3,0,C0,C3,0),	F_REG_READ),
3920   SR_CORE ("mvfr1_el1",		CPENC (3,0,C0,C3,1),	F_REG_READ),
3921   SR_CORE ("mvfr2_el1",		CPENC (3,0,C0,C3,2),	F_REG_READ),
3922   SR_CORE ("ccsidr_el1",	CPENC (3,1,C0,C0,0),	F_REG_READ),
3923   SR_CORE ("id_aa64pfr0_el1",	CPENC (3,0,C0,C4,0),	F_REG_READ),
3924   SR_CORE ("id_aa64pfr1_el1",	CPENC (3,0,C0,C4,1),	F_REG_READ),
3925   SR_CORE ("id_aa64dfr0_el1",	CPENC (3,0,C0,C5,0),	F_REG_READ),
3926   SR_CORE ("id_aa64dfr1_el1",	CPENC (3,0,C0,C5,1),	F_REG_READ),
3927   SR_CORE ("id_aa64isar0_el1",	CPENC (3,0,C0,C6,0),	F_REG_READ),
3928   SR_CORE ("id_aa64isar1_el1",	CPENC (3,0,C0,C6,1),	F_REG_READ),
3929   SR_CORE ("id_aa64mmfr0_el1",	CPENC (3,0,C0,C7,0),	F_REG_READ),
3930   SR_CORE ("id_aa64mmfr1_el1",	CPENC (3,0,C0,C7,1),	F_REG_READ),
3931   SR_V8_2 ("id_aa64mmfr2_el1",	CPENC (3,0,C0,C7,2),	F_REG_READ),
3932   SR_CORE ("id_aa64afr0_el1",	CPENC (3,0,C0,C5,4),	F_REG_READ),
3933   SR_CORE ("id_aa64afr1_el1",	CPENC (3,0,C0,C5,5),	F_REG_READ),
3934   SR_SVE  ("id_aa64zfr0_el1",	CPENC (3,0,C0,C4,4),	F_REG_READ),
3935   SR_CORE ("clidr_el1",		CPENC (3,1,C0,C0,1),	F_REG_READ),
3936   SR_CORE ("csselr_el1",	CPENC (3,2,C0,C0,0),	0),
3937   SR_CORE ("vpidr_el2",		CPENC (3,4,C0,C0,0),	0),
3938   SR_CORE ("vmpidr_el2",	CPENC (3,4,C0,C0,5),	0),
3939   SR_CORE ("sctlr_el1",		CPENC (3,0,C1,C0,0),	0),
3940   SR_CORE ("sctlr_el2",		CPENC (3,4,C1,C0,0),	0),
3941   SR_CORE ("sctlr_el3",		CPENC (3,6,C1,C0,0),	0),
3942   SR_V8_1 ("sctlr_el12",	CPENC (3,5,C1,C0,0),	0),
3943   SR_CORE ("actlr_el1",		CPENC (3,0,C1,C0,1),	0),
3944   SR_CORE ("actlr_el2",		CPENC (3,4,C1,C0,1),	0),
3945   SR_CORE ("actlr_el3",		CPENC (3,6,C1,C0,1),	0),
3946   SR_CORE ("cpacr_el1",		CPENC (3,0,C1,C0,2),	0),
3947   SR_V8_1 ("cpacr_el12",	CPENC (3,5,C1,C0,2),	0),
3948   SR_CORE ("cptr_el2",		CPENC (3,4,C1,C1,2),	0),
3949   SR_CORE ("cptr_el3",		CPENC (3,6,C1,C1,2),	0),
3950   SR_CORE ("scr_el3",		CPENC (3,6,C1,C1,0),	0),
3951   SR_CORE ("hcr_el2",		CPENC (3,4,C1,C1,0),	0),
3952   SR_CORE ("mdcr_el2",		CPENC (3,4,C1,C1,1),	0),
3953   SR_CORE ("mdcr_el3",		CPENC (3,6,C1,C3,1),	0),
3954   SR_CORE ("hstr_el2",		CPENC (3,4,C1,C1,3),	0),
3955   SR_CORE ("hacr_el2",		CPENC (3,4,C1,C1,7),	0),
3956   SR_SVE  ("zcr_el1",		CPENC (3,0,C1,C2,0),	0),
3957   SR_SVE  ("zcr_el12",		CPENC (3,5,C1,C2,0),	0),
3958   SR_SVE  ("zcr_el2",		CPENC (3,4,C1,C2,0),	0),
3959   SR_SVE  ("zcr_el3",		CPENC (3,6,C1,C2,0),	0),
3960   SR_SVE  ("zidr_el1",		CPENC (3,0,C0,C0,7),	0),
3961   SR_CORE ("ttbr0_el1",		CPENC (3,0,C2,C0,0),	0),
3962   SR_CORE ("ttbr1_el1",		CPENC (3,0,C2,C0,1),	0),
3963   SR_V8_A ("ttbr0_el2",		CPENC (3,4,C2,C0,0),	0),
3964   SR_V8_1_A ("ttbr1_el2",	CPENC (3,4,C2,C0,1),	0),
3965   SR_CORE ("ttbr0_el3",		CPENC (3,6,C2,C0,0),	0),
3966   SR_V8_1 ("ttbr0_el12",	CPENC (3,5,C2,C0,0),	0),
3967   SR_V8_1 ("ttbr1_el12",	CPENC (3,5,C2,C0,1),	0),
3968   SR_V8_A ("vttbr_el2",		CPENC (3,4,C2,C1,0),	0),
3969   SR_CORE ("tcr_el1",		CPENC (3,0,C2,C0,2),	0),
3970   SR_CORE ("tcr_el2",		CPENC (3,4,C2,C0,2),	0),
3971   SR_CORE ("tcr_el3",		CPENC (3,6,C2,C0,2),	0),
3972   SR_V8_1 ("tcr_el12",		CPENC (3,5,C2,C0,2),	0),
3973   SR_CORE ("vtcr_el2",		CPENC (3,4,C2,C1,2),	0),
3974   SR_V8_3 ("apiakeylo_el1",	CPENC (3,0,C2,C1,0),	0),
3975   SR_V8_3 ("apiakeyhi_el1",	CPENC (3,0,C2,C1,1),	0),
3976   SR_V8_3 ("apibkeylo_el1",	CPENC (3,0,C2,C1,2),	0),
3977   SR_V8_3 ("apibkeyhi_el1",	CPENC (3,0,C2,C1,3),	0),
3978   SR_V8_3 ("apdakeylo_el1",	CPENC (3,0,C2,C2,0),	0),
3979   SR_V8_3 ("apdakeyhi_el1",	CPENC (3,0,C2,C2,1),	0),
3980   SR_V8_3 ("apdbkeylo_el1",	CPENC (3,0,C2,C2,2),	0),
3981   SR_V8_3 ("apdbkeyhi_el1",	CPENC (3,0,C2,C2,3),	0),
3982   SR_V8_3 ("apgakeylo_el1",	CPENC (3,0,C2,C3,0),	0),
3983   SR_V8_3 ("apgakeyhi_el1",	CPENC (3,0,C2,C3,1),	0),
3984   SR_CORE ("afsr0_el1",		CPENC (3,0,C5,C1,0),	0),
3985   SR_CORE ("afsr1_el1",		CPENC (3,0,C5,C1,1),	0),
3986   SR_CORE ("afsr0_el2",		CPENC (3,4,C5,C1,0),	0),
3987   SR_CORE ("afsr1_el2",		CPENC (3,4,C5,C1,1),	0),
3988   SR_CORE ("afsr0_el3",		CPENC (3,6,C5,C1,0),	0),
3989   SR_V8_1 ("afsr0_el12",	CPENC (3,5,C5,C1,0),	0),
3990   SR_CORE ("afsr1_el3",		CPENC (3,6,C5,C1,1),	0),
3991   SR_V8_1 ("afsr1_el12",	CPENC (3,5,C5,C1,1),	0),
3992   SR_CORE ("esr_el1",		CPENC (3,0,C5,C2,0),	0),
3993   SR_CORE ("esr_el2",		CPENC (3,4,C5,C2,0),	0),
3994   SR_CORE ("esr_el3",		CPENC (3,6,C5,C2,0),	0),
3995   SR_V8_1 ("esr_el12",		CPENC (3,5,C5,C2,0),	0),
3996   SR_RAS  ("vsesr_el2",		CPENC (3,4,C5,C2,3),	0),
3997   SR_CORE ("fpexc32_el2",	CPENC (3,4,C5,C3,0),	0),
3998   SR_RAS  ("erridr_el1",	CPENC (3,0,C5,C3,0),	F_REG_READ),
3999   SR_RAS  ("errselr_el1",	CPENC (3,0,C5,C3,1),	0),
4000   SR_RAS  ("erxfr_el1",		CPENC (3,0,C5,C4,0),	F_REG_READ),
4001   SR_RAS  ("erxctlr_el1",	CPENC (3,0,C5,C4,1),	0),
4002   SR_RAS  ("erxstatus_el1",	CPENC (3,0,C5,C4,2),	0),
4003   SR_RAS  ("erxaddr_el1",	CPENC (3,0,C5,C4,3),	0),
4004   SR_RAS  ("erxmisc0_el1",	CPENC (3,0,C5,C5,0),	0),
4005   SR_RAS  ("erxmisc1_el1",	CPENC (3,0,C5,C5,1),	0),
4006   SR_CORE ("far_el1",		CPENC (3,0,C6,C0,0),	0),
4007   SR_CORE ("far_el2",		CPENC (3,4,C6,C0,0),	0),
4008   SR_CORE ("far_el3",		CPENC (3,6,C6,C0,0),	0),
4009   SR_V8_1 ("far_el12",		CPENC (3,5,C6,C0,0),	0),
4010   SR_CORE ("hpfar_el2",		CPENC (3,4,C6,C0,4),	0),
4011   SR_CORE ("par_el1",		CPENC (3,0,C7,C4,0),	0),
4012   SR_CORE ("mair_el1",		CPENC (3,0,C10,C2,0),	0),
4013   SR_CORE ("mair_el2",		CPENC (3,4,C10,C2,0),	0),
4014   SR_CORE ("mair_el3",		CPENC (3,6,C10,C2,0),	0),
4015   SR_V8_1 ("mair_el12",		CPENC (3,5,C10,C2,0),	0),
4016   SR_CORE ("amair_el1",		CPENC (3,0,C10,C3,0),	0),
4017   SR_CORE ("amair_el2",		CPENC (3,4,C10,C3,0),	0),
4018   SR_CORE ("amair_el3",		CPENC (3,6,C10,C3,0),	0),
4019   SR_V8_1 ("amair_el12",	CPENC (3,5,C10,C3,0),	0),
4020   SR_CORE ("vbar_el1",		CPENC (3,0,C12,C0,0),	0),
4021   SR_CORE ("vbar_el2",		CPENC (3,4,C12,C0,0),	0),
4022   SR_CORE ("vbar_el3",		CPENC (3,6,C12,C0,0),	0),
4023   SR_V8_1 ("vbar_el12",		CPENC (3,5,C12,C0,0),	0),
4024   SR_CORE ("rvbar_el1",		CPENC (3,0,C12,C0,1),	F_REG_READ),
4025   SR_CORE ("rvbar_el2",		CPENC (3,4,C12,C0,1),	F_REG_READ),
4026   SR_CORE ("rvbar_el3",		CPENC (3,6,C12,C0,1),	F_REG_READ),
4027   SR_CORE ("rmr_el1",		CPENC (3,0,C12,C0,2),	0),
4028   SR_CORE ("rmr_el2",		CPENC (3,4,C12,C0,2),	0),
4029   SR_CORE ("rmr_el3",		CPENC (3,6,C12,C0,2),	0),
4030   SR_CORE ("isr_el1",		CPENC (3,0,C12,C1,0),	F_REG_READ),
4031   SR_RAS  ("disr_el1",		CPENC (3,0,C12,C1,1),	0),
4032   SR_RAS  ("vdisr_el2",		CPENC (3,4,C12,C1,1),	0),
4033   SR_CORE ("contextidr_el1",	CPENC (3,0,C13,C0,1),	0),
4034   SR_V8_1 ("contextidr_el2",	CPENC (3,4,C13,C0,1),	0),
4035   SR_V8_1 ("contextidr_el12",	CPENC (3,5,C13,C0,1),	0),
4036   SR_RNG  ("rndr",		CPENC (3,3,C2,C4,0),	F_REG_READ),
4037   SR_RNG  ("rndrrs",		CPENC (3,3,C2,C4,1),	F_REG_READ),
4038   SR_MEMTAG ("tco",		CPENC (3,3,C4,C2,7),	0),
4039   SR_MEMTAG ("tfsre0_el1",	CPENC (3,0,C5,C6,1),	0),
4040   SR_MEMTAG ("tfsr_el1",	CPENC (3,0,C5,C6,0),	0),
4041   SR_MEMTAG ("tfsr_el2",	CPENC (3,4,C5,C6,0),	0),
4042   SR_MEMTAG ("tfsr_el3",	CPENC (3,6,C5,C6,0),	0),
4043   SR_MEMTAG ("tfsr_el12",	CPENC (3,5,C5,C6,0),	0),
4044   SR_MEMTAG ("rgsr_el1",	CPENC (3,0,C1,C0,5),	0),
4045   SR_MEMTAG ("gcr_el1",		CPENC (3,0,C1,C0,6),	0),
4046   SR_MEMTAG ("gmid_el1",	CPENC (3,1,C0,C0,4),	F_REG_READ),
4047   SR_CORE ("tpidr_el0",		CPENC (3,3,C13,C0,2),	0),
4048   SR_CORE ("tpidrro_el0",       CPENC (3,3,C13,C0,3),	0),
4049   SR_CORE ("tpidr_el1",		CPENC (3,0,C13,C0,4),	0),
4050   SR_CORE ("tpidr_el2",		CPENC (3,4,C13,C0,2),	0),
4051   SR_CORE ("tpidr_el3",		CPENC (3,6,C13,C0,2),	0),
4052   SR_SCXTNUM ("scxtnum_el0",	CPENC (3,3,C13,C0,7),	0),
4053   SR_SCXTNUM ("scxtnum_el1",	CPENC (3,0,C13,C0,7),	0),
4054   SR_SCXTNUM ("scxtnum_el2",	CPENC (3,4,C13,C0,7),	0),
4055   SR_SCXTNUM ("scxtnum_el12",   CPENC (3,5,C13,C0,7),	0),
4056   SR_SCXTNUM ("scxtnum_el3",    CPENC (3,6,C13,C0,7),	0),
4057   SR_CORE ("teecr32_el1",       CPENC (2,2,C0, C0,0),	0), /* See section 3.9.7.1.  */
4058   SR_CORE ("cntfrq_el0",	CPENC (3,3,C14,C0,0),	0),
4059   SR_CORE ("cntpct_el0",	CPENC (3,3,C14,C0,1),	F_REG_READ),
4060   SR_CORE ("cntvct_el0",	CPENC (3,3,C14,C0,2),	F_REG_READ),
4061   SR_CORE ("cntvoff_el2",       CPENC (3,4,C14,C0,3),	0),
4062   SR_CORE ("cntkctl_el1",       CPENC (3,0,C14,C1,0),	0),
4063   SR_V8_1 ("cntkctl_el12",	CPENC (3,5,C14,C1,0),	0),
4064   SR_CORE ("cnthctl_el2",	CPENC (3,4,C14,C1,0),	0),
4065   SR_CORE ("cntp_tval_el0",	CPENC (3,3,C14,C2,0),	0),
4066   SR_V8_1 ("cntp_tval_el02",	CPENC (3,5,C14,C2,0),	0),
4067   SR_CORE ("cntp_ctl_el0",      CPENC (3,3,C14,C2,1),	0),
4068   SR_V8_1 ("cntp_ctl_el02",	CPENC (3,5,C14,C2,1),	0),
4069   SR_CORE ("cntp_cval_el0",     CPENC (3,3,C14,C2,2),	0),
4070   SR_V8_1 ("cntp_cval_el02",	CPENC (3,5,C14,C2,2),	0),
4071   SR_CORE ("cntv_tval_el0",     CPENC (3,3,C14,C3,0),	0),
4072   SR_V8_1 ("cntv_tval_el02",	CPENC (3,5,C14,C3,0),	0),
4073   SR_CORE ("cntv_ctl_el0",      CPENC (3,3,C14,C3,1),	0),
4074   SR_V8_1 ("cntv_ctl_el02",	CPENC (3,5,C14,C3,1),	0),
4075   SR_CORE ("cntv_cval_el0",     CPENC (3,3,C14,C3,2),	0),
4076   SR_V8_1 ("cntv_cval_el02",	CPENC (3,5,C14,C3,2),	0),
4077   SR_CORE ("cnthp_tval_el2",	CPENC (3,4,C14,C2,0),	0),
4078   SR_CORE ("cnthp_ctl_el2",	CPENC (3,4,C14,C2,1),	0),
4079   SR_CORE ("cnthp_cval_el2",	CPENC (3,4,C14,C2,2),	0),
4080   SR_CORE ("cntps_tval_el1",	CPENC (3,7,C14,C2,0),	0),
4081   SR_CORE ("cntps_ctl_el1",	CPENC (3,7,C14,C2,1),	0),
4082   SR_CORE ("cntps_cval_el1",	CPENC (3,7,C14,C2,2),	0),
4083   SR_V8_1 ("cnthv_tval_el2",	CPENC (3,4,C14,C3,0),	0),
4084   SR_V8_1 ("cnthv_ctl_el2",	CPENC (3,4,C14,C3,1),	0),
4085   SR_V8_1 ("cnthv_cval_el2",	CPENC (3,4,C14,C3,2),	0),
4086   SR_CORE ("dacr32_el2",	CPENC (3,4,C3,C0,0),	0),
4087   SR_CORE ("ifsr32_el2",	CPENC (3,4,C5,C0,1),	0),
4088   SR_CORE ("teehbr32_el1",	CPENC (2,2,C1,C0,0),	0),
4089   SR_CORE ("sder32_el3",	CPENC (3,6,C1,C1,1),	0),
4090   SR_CORE ("mdscr_el1",		CPENC (2,0,C0,C2,2),	0),
4091   SR_CORE ("mdccsr_el0",	CPENC (2,3,C0,C1,0),	F_REG_READ),
4092   SR_CORE ("mdccint_el1",       CPENC (2,0,C0,C2,0),	0),
4093   SR_CORE ("dbgdtr_el0",	CPENC (2,3,C0,C4,0),	0),
4094   SR_CORE ("dbgdtrrx_el0",	CPENC (2,3,C0,C5,0),	F_REG_READ),
4095   SR_CORE ("dbgdtrtx_el0",	CPENC (2,3,C0,C5,0),	F_REG_WRITE),
4096   SR_CORE ("osdtrrx_el1",	CPENC (2,0,C0,C0,2),	0),
4097   SR_CORE ("osdtrtx_el1",	CPENC (2,0,C0,C3,2),	0),
4098   SR_CORE ("oseccr_el1",	CPENC (2,0,C0,C6,2),	0),
4099   SR_CORE ("dbgvcr32_el2",      CPENC (2,4,C0,C7,0),	0),
4100   SR_CORE ("dbgbvr0_el1",       CPENC (2,0,C0,C0,4),	0),
4101   SR_CORE ("dbgbvr1_el1",       CPENC (2,0,C0,C1,4),	0),
4102   SR_CORE ("dbgbvr2_el1",       CPENC (2,0,C0,C2,4),	0),
4103   SR_CORE ("dbgbvr3_el1",       CPENC (2,0,C0,C3,4),	0),
4104   SR_CORE ("dbgbvr4_el1",       CPENC (2,0,C0,C4,4),	0),
4105   SR_CORE ("dbgbvr5_el1",       CPENC (2,0,C0,C5,4),	0),
4106   SR_CORE ("dbgbvr6_el1",       CPENC (2,0,C0,C6,4),	0),
4107   SR_CORE ("dbgbvr7_el1",       CPENC (2,0,C0,C7,4),	0),
4108   SR_CORE ("dbgbvr8_el1",       CPENC (2,0,C0,C8,4),	0),
4109   SR_CORE ("dbgbvr9_el1",       CPENC (2,0,C0,C9,4),	0),
4110   SR_CORE ("dbgbvr10_el1",      CPENC (2,0,C0,C10,4),	0),
4111   SR_CORE ("dbgbvr11_el1",      CPENC (2,0,C0,C11,4),	0),
4112   SR_CORE ("dbgbvr12_el1",      CPENC (2,0,C0,C12,4),	0),
4113   SR_CORE ("dbgbvr13_el1",      CPENC (2,0,C0,C13,4),	0),
4114   SR_CORE ("dbgbvr14_el1",      CPENC (2,0,C0,C14,4),	0),
4115   SR_CORE ("dbgbvr15_el1",      CPENC (2,0,C0,C15,4),	0),
4116   SR_CORE ("dbgbcr0_el1",       CPENC (2,0,C0,C0,5),	0),
4117   SR_CORE ("dbgbcr1_el1",       CPENC (2,0,C0,C1,5),	0),
4118   SR_CORE ("dbgbcr2_el1",       CPENC (2,0,C0,C2,5),	0),
4119   SR_CORE ("dbgbcr3_el1",       CPENC (2,0,C0,C3,5),	0),
4120   SR_CORE ("dbgbcr4_el1",       CPENC (2,0,C0,C4,5),	0),
4121   SR_CORE ("dbgbcr5_el1",       CPENC (2,0,C0,C5,5),	0),
4122   SR_CORE ("dbgbcr6_el1",       CPENC (2,0,C0,C6,5),	0),
4123   SR_CORE ("dbgbcr7_el1",       CPENC (2,0,C0,C7,5),	0),
4124   SR_CORE ("dbgbcr8_el1",       CPENC (2,0,C0,C8,5),	0),
4125   SR_CORE ("dbgbcr9_el1",       CPENC (2,0,C0,C9,5),	0),
4126   SR_CORE ("dbgbcr10_el1",      CPENC (2,0,C0,C10,5),	0),
4127   SR_CORE ("dbgbcr11_el1",      CPENC (2,0,C0,C11,5),	0),
4128   SR_CORE ("dbgbcr12_el1",      CPENC (2,0,C0,C12,5),	0),
4129   SR_CORE ("dbgbcr13_el1",      CPENC (2,0,C0,C13,5),	0),
4130   SR_CORE ("dbgbcr14_el1",      CPENC (2,0,C0,C14,5),	0),
4131   SR_CORE ("dbgbcr15_el1",      CPENC (2,0,C0,C15,5),	0),
4132   SR_CORE ("dbgwvr0_el1",       CPENC (2,0,C0,C0,6),	0),
4133   SR_CORE ("dbgwvr1_el1",       CPENC (2,0,C0,C1,6),	0),
4134   SR_CORE ("dbgwvr2_el1",       CPENC (2,0,C0,C2,6),	0),
4135   SR_CORE ("dbgwvr3_el1",       CPENC (2,0,C0,C3,6),	0),
4136   SR_CORE ("dbgwvr4_el1",       CPENC (2,0,C0,C4,6),	0),
4137   SR_CORE ("dbgwvr5_el1",       CPENC (2,0,C0,C5,6),	0),
4138   SR_CORE ("dbgwvr6_el1",       CPENC (2,0,C0,C6,6),	0),
4139   SR_CORE ("dbgwvr7_el1",       CPENC (2,0,C0,C7,6),	0),
4140   SR_CORE ("dbgwvr8_el1",       CPENC (2,0,C0,C8,6),	0),
4141   SR_CORE ("dbgwvr9_el1",       CPENC (2,0,C0,C9,6),	0),
4142   SR_CORE ("dbgwvr10_el1",      CPENC (2,0,C0,C10,6),	0),
4143   SR_CORE ("dbgwvr11_el1",      CPENC (2,0,C0,C11,6),	0),
4144   SR_CORE ("dbgwvr12_el1",      CPENC (2,0,C0,C12,6),	0),
4145   SR_CORE ("dbgwvr13_el1",      CPENC (2,0,C0,C13,6),	0),
4146   SR_CORE ("dbgwvr14_el1",      CPENC (2,0,C0,C14,6),	0),
4147   SR_CORE ("dbgwvr15_el1",      CPENC (2,0,C0,C15,6),	0),
4148   SR_CORE ("dbgwcr0_el1",       CPENC (2,0,C0,C0,7),	0),
4149   SR_CORE ("dbgwcr1_el1",       CPENC (2,0,C0,C1,7),	0),
4150   SR_CORE ("dbgwcr2_el1",       CPENC (2,0,C0,C2,7),	0),
4151   SR_CORE ("dbgwcr3_el1",       CPENC (2,0,C0,C3,7),	0),
4152   SR_CORE ("dbgwcr4_el1",       CPENC (2,0,C0,C4,7),	0),
4153   SR_CORE ("dbgwcr5_el1",       CPENC (2,0,C0,C5,7),	0),
4154   SR_CORE ("dbgwcr6_el1",       CPENC (2,0,C0,C6,7),	0),
4155   SR_CORE ("dbgwcr7_el1",       CPENC (2,0,C0,C7,7),	0),
4156   SR_CORE ("dbgwcr8_el1",       CPENC (2,0,C0,C8,7),	0),
4157   SR_CORE ("dbgwcr9_el1",       CPENC (2,0,C0,C9,7),	0),
4158   SR_CORE ("dbgwcr10_el1",      CPENC (2,0,C0,C10,7),	0),
4159   SR_CORE ("dbgwcr11_el1",      CPENC (2,0,C0,C11,7),	0),
4160   SR_CORE ("dbgwcr12_el1",      CPENC (2,0,C0,C12,7),	0),
4161   SR_CORE ("dbgwcr13_el1",      CPENC (2,0,C0,C13,7),	0),
4162   SR_CORE ("dbgwcr14_el1",      CPENC (2,0,C0,C14,7),	0),
4163   SR_CORE ("dbgwcr15_el1",      CPENC (2,0,C0,C15,7),	0),
4164   SR_CORE ("mdrar_el1",		CPENC (2,0,C1,C0,0),	F_REG_READ),
4165   SR_CORE ("oslar_el1",		CPENC (2,0,C1,C0,4),	F_REG_WRITE),
4166   SR_CORE ("oslsr_el1",		CPENC (2,0,C1,C1,4),	F_REG_READ),
4167   SR_CORE ("osdlr_el1",		CPENC (2,0,C1,C3,4),	0),
4168   SR_CORE ("dbgprcr_el1",       CPENC (2,0,C1,C4,4),	0),
4169   SR_CORE ("dbgclaimset_el1",   CPENC (2,0,C7,C8,6),	0),
4170   SR_CORE ("dbgclaimclr_el1",   CPENC (2,0,C7,C9,6),	0),
4171   SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6),	F_REG_READ),
4172   SR_PROFILE ("pmblimitr_el1",	CPENC (3,0,C9,C10,0),	0),
4173   SR_PROFILE ("pmbptr_el1",	CPENC (3,0,C9,C10,1),	0),
4174   SR_PROFILE ("pmbsr_el1",	CPENC (3,0,C9,C10,3),	0),
4175   SR_PROFILE ("pmbidr_el1",	CPENC (3,0,C9,C10,7),	F_REG_READ),
4176   SR_PROFILE ("pmscr_el1",	CPENC (3,0,C9,C9,0),	0),
4177   SR_PROFILE ("pmsicr_el1",	CPENC (3,0,C9,C9,2),	0),
4178   SR_PROFILE ("pmsirr_el1",	CPENC (3,0,C9,C9,3),	0),
4179   SR_PROFILE ("pmsfcr_el1",	CPENC (3,0,C9,C9,4),	0),
4180   SR_PROFILE ("pmsevfr_el1",	CPENC (3,0,C9,C9,5),	0),
4181   SR_PROFILE ("pmslatfr_el1",	CPENC (3,0,C9,C9,6),	0),
4182   SR_PROFILE ("pmsidr_el1",	CPENC (3,0,C9,C9,7),	0),
4183   SR_PROFILE ("pmscr_el2",	CPENC (3,4,C9,C9,0),	0),
4184   SR_PROFILE ("pmscr_el12",	CPENC (3,5,C9,C9,0),	0),
4185   SR_CORE ("pmcr_el0",		CPENC (3,3,C9,C12,0),	0),
4186   SR_CORE ("pmcntenset_el0",    CPENC (3,3,C9,C12,1),	0),
4187   SR_CORE ("pmcntenclr_el0",    CPENC (3,3,C9,C12,2),	0),
4188   SR_CORE ("pmovsclr_el0",      CPENC (3,3,C9,C12,3),	0),
4189   SR_CORE ("pmswinc_el0",       CPENC (3,3,C9,C12,4),	F_REG_WRITE),
4190   SR_CORE ("pmselr_el0",	CPENC (3,3,C9,C12,5),	0),
4191   SR_CORE ("pmceid0_el0",       CPENC (3,3,C9,C12,6),	F_REG_READ),
4192   SR_CORE ("pmceid1_el0",       CPENC (3,3,C9,C12,7),	F_REG_READ),
4193   SR_CORE ("pmccntr_el0",       CPENC (3,3,C9,C13,0),	0),
4194   SR_CORE ("pmxevtyper_el0",    CPENC (3,3,C9,C13,1),	0),
4195   SR_CORE ("pmxevcntr_el0",     CPENC (3,3,C9,C13,2),	0),
4196   SR_CORE ("pmuserenr_el0",     CPENC (3,3,C9,C14,0),	0),
4197   SR_CORE ("pmintenset_el1",    CPENC (3,0,C9,C14,1),	0),
4198   SR_CORE ("pmintenclr_el1",    CPENC (3,0,C9,C14,2),	0),
4199   SR_CORE ("pmovsset_el0",      CPENC (3,3,C9,C14,3),	0),
4200   SR_CORE ("pmevcntr0_el0",     CPENC (3,3,C14,C8,0),	0),
4201   SR_CORE ("pmevcntr1_el0",     CPENC (3,3,C14,C8,1),	0),
4202   SR_CORE ("pmevcntr2_el0",     CPENC (3,3,C14,C8,2),	0),
4203   SR_CORE ("pmevcntr3_el0",     CPENC (3,3,C14,C8,3),	0),
4204   SR_CORE ("pmevcntr4_el0",     CPENC (3,3,C14,C8,4),	0),
4205   SR_CORE ("pmevcntr5_el0",     CPENC (3,3,C14,C8,5),	0),
4206   SR_CORE ("pmevcntr6_el0",     CPENC (3,3,C14,C8,6),	0),
4207   SR_CORE ("pmevcntr7_el0",     CPENC (3,3,C14,C8,7),	0),
4208   SR_CORE ("pmevcntr8_el0",     CPENC (3,3,C14,C9,0),	0),
4209   SR_CORE ("pmevcntr9_el0",     CPENC (3,3,C14,C9,1),	0),
4210   SR_CORE ("pmevcntr10_el0",    CPENC (3,3,C14,C9,2),	0),
4211   SR_CORE ("pmevcntr11_el0",    CPENC (3,3,C14,C9,3),	0),
4212   SR_CORE ("pmevcntr12_el0",    CPENC (3,3,C14,C9,4),	0),
4213   SR_CORE ("pmevcntr13_el0",    CPENC (3,3,C14,C9,5),	0),
4214   SR_CORE ("pmevcntr14_el0",    CPENC (3,3,C14,C9,6),	0),
4215   SR_CORE ("pmevcntr15_el0",    CPENC (3,3,C14,C9,7),	0),
4216   SR_CORE ("pmevcntr16_el0",    CPENC (3,3,C14,C10,0),	0),
4217   SR_CORE ("pmevcntr17_el0",    CPENC (3,3,C14,C10,1),	0),
4218   SR_CORE ("pmevcntr18_el0",    CPENC (3,3,C14,C10,2),	0),
4219   SR_CORE ("pmevcntr19_el0",    CPENC (3,3,C14,C10,3),	0),
4220   SR_CORE ("pmevcntr20_el0",    CPENC (3,3,C14,C10,4),	0),
4221   SR_CORE ("pmevcntr21_el0",    CPENC (3,3,C14,C10,5),	0),
4222   SR_CORE ("pmevcntr22_el0",    CPENC (3,3,C14,C10,6),	0),
4223   SR_CORE ("pmevcntr23_el0",    CPENC (3,3,C14,C10,7),	0),
4224   SR_CORE ("pmevcntr24_el0",    CPENC (3,3,C14,C11,0),	0),
4225   SR_CORE ("pmevcntr25_el0",    CPENC (3,3,C14,C11,1),	0),
4226   SR_CORE ("pmevcntr26_el0",    CPENC (3,3,C14,C11,2),	0),
4227   SR_CORE ("pmevcntr27_el0",    CPENC (3,3,C14,C11,3),	0),
4228   SR_CORE ("pmevcntr28_el0",    CPENC (3,3,C14,C11,4),	0),
4229   SR_CORE ("pmevcntr29_el0",    CPENC (3,3,C14,C11,5),	0),
4230   SR_CORE ("pmevcntr30_el0",    CPENC (3,3,C14,C11,6),	0),
4231   SR_CORE ("pmevtyper0_el0",    CPENC (3,3,C14,C12,0),	0),
4232   SR_CORE ("pmevtyper1_el0",    CPENC (3,3,C14,C12,1),	0),
4233   SR_CORE ("pmevtyper2_el0",    CPENC (3,3,C14,C12,2),	0),
4234   SR_CORE ("pmevtyper3_el0",    CPENC (3,3,C14,C12,3),	0),
4235   SR_CORE ("pmevtyper4_el0",    CPENC (3,3,C14,C12,4),	0),
4236   SR_CORE ("pmevtyper5_el0",    CPENC (3,3,C14,C12,5),	0),
4237   SR_CORE ("pmevtyper6_el0",    CPENC (3,3,C14,C12,6),	0),
4238   SR_CORE ("pmevtyper7_el0",    CPENC (3,3,C14,C12,7),	0),
4239   SR_CORE ("pmevtyper8_el0",    CPENC (3,3,C14,C13,0),	0),
4240   SR_CORE ("pmevtyper9_el0",    CPENC (3,3,C14,C13,1),	0),
4241   SR_CORE ("pmevtyper10_el0",   CPENC (3,3,C14,C13,2),	0),
4242   SR_CORE ("pmevtyper11_el0",   CPENC (3,3,C14,C13,3),	0),
4243   SR_CORE ("pmevtyper12_el0",   CPENC (3,3,C14,C13,4),	0),
4244   SR_CORE ("pmevtyper13_el0",   CPENC (3,3,C14,C13,5),	0),
4245   SR_CORE ("pmevtyper14_el0",   CPENC (3,3,C14,C13,6),	0),
4246   SR_CORE ("pmevtyper15_el0",   CPENC (3,3,C14,C13,7),	0),
4247   SR_CORE ("pmevtyper16_el0",   CPENC (3,3,C14,C14,0),	0),
4248   SR_CORE ("pmevtyper17_el0",   CPENC (3,3,C14,C14,1),	0),
4249   SR_CORE ("pmevtyper18_el0",   CPENC (3,3,C14,C14,2),	0),
4250   SR_CORE ("pmevtyper19_el0",   CPENC (3,3,C14,C14,3),	0),
4251   SR_CORE ("pmevtyper20_el0",   CPENC (3,3,C14,C14,4),	0),
4252   SR_CORE ("pmevtyper21_el0",   CPENC (3,3,C14,C14,5),	0),
4253   SR_CORE ("pmevtyper22_el0",   CPENC (3,3,C14,C14,6),	0),
4254   SR_CORE ("pmevtyper23_el0",   CPENC (3,3,C14,C14,7),	0),
4255   SR_CORE ("pmevtyper24_el0",   CPENC (3,3,C14,C15,0),	0),
4256   SR_CORE ("pmevtyper25_el0",   CPENC (3,3,C14,C15,1),	0),
4257   SR_CORE ("pmevtyper26_el0",   CPENC (3,3,C14,C15,2),	0),
4258   SR_CORE ("pmevtyper27_el0",   CPENC (3,3,C14,C15,3),	0),
4259   SR_CORE ("pmevtyper28_el0",   CPENC (3,3,C14,C15,4),	0),
4260   SR_CORE ("pmevtyper29_el0",   CPENC (3,3,C14,C15,5),	0),
4261   SR_CORE ("pmevtyper30_el0",   CPENC (3,3,C14,C15,6),	0),
4262   SR_CORE ("pmccfiltr_el0",     CPENC (3,3,C14,C15,7),	0),
4263 
4264   SR_V8_4 ("dit",		CPEN_ (3,C2,5),		0),
4265   SR_V8_4 ("vstcr_el2",		CPENC (3,4,C2,C6,2),	0),
4266   SR_V8_4_A ("vsttbr_el2",	CPENC (3,4,C2,C6,0),	0),
4267   SR_V8_4 ("cnthvs_tval_el2",	CPENC (3,4,C14,C4,0),	0),
4268   SR_V8_4 ("cnthvs_cval_el2",	CPENC (3,4,C14,C4,2),	0),
4269   SR_V8_4 ("cnthvs_ctl_el2",	CPENC (3,4,C14,C4,1),	0),
4270   SR_V8_4 ("cnthps_tval_el2",	CPENC (3,4,C14,C5,0),	0),
4271   SR_V8_4 ("cnthps_cval_el2",	CPENC (3,4,C14,C5,2),	0),
4272   SR_V8_4 ("cnthps_ctl_el2",	CPENC (3,4,C14,C5,1),	0),
4273   SR_V8_4 ("sder32_el2",	CPENC (3,4,C1,C3,1),	0),
4274   SR_V8_4 ("vncr_el2",		CPENC (3,4,C2,C2,0),	0),
4275 
4276   SR_CORE ("mpam0_el1",		CPENC (3,0,C10,C5,1),	0),
4277   SR_CORE ("mpam1_el1",		CPENC (3,0,C10,C5,0),	0),
4278   SR_CORE ("mpam1_el12",	CPENC (3,5,C10,C5,0),	0),
4279   SR_CORE ("mpam2_el2",		CPENC (3,4,C10,C5,0),	0),
4280   SR_CORE ("mpam3_el3",		CPENC (3,6,C10,C5,0),	0),
4281   SR_CORE ("mpamhcr_el2",	CPENC (3,4,C10,C4,0),	0),
4282   SR_CORE ("mpamidr_el1",	CPENC (3,0,C10,C4,4),	F_REG_READ),
4283   SR_CORE ("mpamvpm0_el2",	CPENC (3,4,C10,C6,0),	0),
4284   SR_CORE ("mpamvpm1_el2",	CPENC (3,4,C10,C6,1),	0),
4285   SR_CORE ("mpamvpm2_el2",	CPENC (3,4,C10,C6,2),	0),
4286   SR_CORE ("mpamvpm3_el2",	CPENC (3,4,C10,C6,3),	0),
4287   SR_CORE ("mpamvpm4_el2",	CPENC (3,4,C10,C6,4),	0),
4288   SR_CORE ("mpamvpm5_el2",	CPENC (3,4,C10,C6,5),	0),
4289   SR_CORE ("mpamvpm6_el2",	CPENC (3,4,C10,C6,6),	0),
4290   SR_CORE ("mpamvpm7_el2",	CPENC (3,4,C10,C6,7),	0),
4291   SR_CORE ("mpamvpmv_el2",	CPENC (3,4,C10,C4,1),	0),
4292 
4293   SR_V8_R ("mpuir_el1",		CPENC (3,0,C0,C0,4),	F_REG_READ),
4294   SR_V8_R ("mpuir_el2",		CPENC (3,4,C0,C0,4),	F_REG_READ),
4295   SR_V8_R ("prbar_el1",		CPENC (3,0,C6,C8,0),	0),
4296   SR_V8_R ("prbar_el2",		CPENC (3,4,C6,C8,0),	0),
4297 
4298 #define ENC_BARLAR(x,n,lar) \
4299   CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4300 
4301 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4302 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4303 
4304   SR_EXPAND_EL12 (PRBARn_ELx)
4305   SR_V8_R ("prenr_el1",		CPENC (3,0,C6,C1,1),	0),
4306   SR_V8_R ("prenr_el2",		CPENC (3,4,C6,C1,1),	0),
4307   SR_V8_R ("prlar_el1",		CPENC (3,0,C6,C8,1),	0),
4308   SR_V8_R ("prlar_el2",		CPENC (3,4,C6,C8,1),	0),
4309   SR_EXPAND_EL12 (PRLARn_ELx)
4310   SR_V8_R ("prselr_el1",	CPENC (3,0,C6,C2,1),	0),
4311   SR_V8_R ("prselr_el2",	CPENC (3,4,C6,C2,1),	0),
4312   SR_V8_R ("vsctlr_el2",	CPENC (3,4,C2,C0,0),	0),
4313 
4314   { 0, CPENC (0,0,0,0,0), 0, 0 }
4315 };
4316 
4317 bfd_boolean
4318 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
4319 {
4320   return (reg_flags & F_DEPRECATED) != 0;
4321 }
4322 
4323 /* The CPENC below is fairly misleading, the fields
4324    here are not in CPENC form. They are in op2op1 form. The fields are encoded
4325    by ins_pstatefield, which just shifts the value by the width of the fields
4326    in a loop. So if you CPENC them only the first value will be set, the rest
4327    are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4328    value of 0b110000000001000000 (0x30040) while what you want is
4329    0b011010 (0x1a).  */
4330 const aarch64_sys_reg aarch64_pstatefields [] =
4331 {
4332   SR_CORE ("spsel",	  0x05,	0),
4333   SR_CORE ("daifset",	  0x1e,	0),
4334   SR_CORE ("daifclr",	  0x1f,	0),
4335   SR_PAN  ("pan",	  0x04, 0),
4336   SR_V8_2 ("uao",	  0x03, 0),
4337   SR_SSBS ("ssbs",	  0x19, 0),
4338   SR_V8_4 ("dit",	  0x1a,	0),
4339   SR_MEMTAG ("tco",	  0x1c,	0),
4340   { 0,	  CPENC (0,0,0,0,0), 0, 0 },
4341 };
4342 
4343 bfd_boolean
4344 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4345 				 const aarch64_sys_reg *reg)
4346 {
4347   if (!(reg->flags & F_ARCHEXT))
4348     return TRUE;
4349 
4350   return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
4351 }
4352 
4353 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4354 {
4355     { "ialluis", CPENS(0,C7,C1,0), 0 },
4356     { "iallu",   CPENS(0,C7,C5,0), 0 },
4357     { "ivau",    CPENS (3, C7, C5, 1), F_HASXT },
4358     { 0, CPENS(0,0,0,0), 0 }
4359 };
4360 
4361 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4362 {
4363     { "zva",	    CPENS (3, C7, C4, 1),  F_HASXT },
4364     { "gva",	    CPENS (3, C7, C4, 3),  F_HASXT | F_ARCHEXT },
4365     { "gzva",	    CPENS (3, C7, C4, 4),  F_HASXT | F_ARCHEXT },
4366     { "ivac",       CPENS (0, C7, C6, 1),  F_HASXT },
4367     { "igvac",      CPENS (0, C7, C6, 3),  F_HASXT | F_ARCHEXT },
4368     { "igsw",       CPENS (0, C7, C6, 4),  F_HASXT | F_ARCHEXT },
4369     { "isw",	    CPENS (0, C7, C6, 2),  F_HASXT },
4370     { "igdvac",	    CPENS (0, C7, C6, 5),  F_HASXT | F_ARCHEXT },
4371     { "igdsw",	    CPENS (0, C7, C6, 6),  F_HASXT | F_ARCHEXT },
4372     { "cvac",       CPENS (3, C7, C10, 1), F_HASXT },
4373     { "cgvac",      CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4374     { "cgdvac",     CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4375     { "csw",	    CPENS (0, C7, C10, 2), F_HASXT },
4376     { "cgsw",       CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4377     { "cgdsw",	    CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4378     { "cvau",       CPENS (3, C7, C11, 1), F_HASXT },
4379     { "cvap",       CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4380     { "cgvap",      CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4381     { "cgdvap",     CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4382     { "cvadp",      CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4383     { "cgvadp",     CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4384     { "cgdvadp",    CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4385     { "civac",      CPENS (3, C7, C14, 1), F_HASXT },
4386     { "cigvac",     CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4387     { "cigdvac",    CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4388     { "cisw",       CPENS (0, C7, C14, 2), F_HASXT },
4389     { "cigsw",      CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4390     { "cigdsw",     CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4391     { 0,       CPENS(0,0,0,0), 0 }
4392 };
4393 
4394 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4395 {
4396     { "s1e1r",      CPENS (0, C7, C8, 0), F_HASXT },
4397     { "s1e1w",      CPENS (0, C7, C8, 1), F_HASXT },
4398     { "s1e0r",      CPENS (0, C7, C8, 2), F_HASXT },
4399     { "s1e0w",      CPENS (0, C7, C8, 3), F_HASXT },
4400     { "s12e1r",     CPENS (4, C7, C8, 4), F_HASXT },
4401     { "s12e1w",     CPENS (4, C7, C8, 5), F_HASXT },
4402     { "s12e0r",     CPENS (4, C7, C8, 6), F_HASXT },
4403     { "s12e0w",     CPENS (4, C7, C8, 7), F_HASXT },
4404     { "s1e2r",      CPENS (4, C7, C8, 0), F_HASXT },
4405     { "s1e2w",      CPENS (4, C7, C8, 1), F_HASXT },
4406     { "s1e3r",      CPENS (6, C7, C8, 0), F_HASXT },
4407     { "s1e3w",      CPENS (6, C7, C8, 1), F_HASXT },
4408     { "s1e1rp",     CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4409     { "s1e1wp",     CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4410     { 0,       CPENS(0,0,0,0), 0 }
4411 };
4412 
4413 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4414 {
4415     { "vmalle1",   CPENS(0,C8,C7,0), 0 },
4416     { "vae1",      CPENS (0, C8, C7, 1), F_HASXT },
4417     { "aside1",    CPENS (0, C8, C7, 2), F_HASXT },
4418     { "vaae1",     CPENS (0, C8, C7, 3), F_HASXT },
4419     { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4420     { "vae1is",    CPENS (0, C8, C3, 1), F_HASXT },
4421     { "aside1is",  CPENS (0, C8, C3, 2), F_HASXT },
4422     { "vaae1is",   CPENS (0, C8, C3, 3), F_HASXT },
4423     { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4424     { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4425     { "ipas2e1",   CPENS (4, C8, C4, 1), F_HASXT },
4426     { "ipas2le1",  CPENS (4, C8, C4, 5), F_HASXT },
4427     { "vae2",      CPENS (4, C8, C7, 1), F_HASXT },
4428     { "vae2is",    CPENS (4, C8, C3, 1), F_HASXT },
4429     { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4430     { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4431     { "vae3",      CPENS (6, C8, C7, 1), F_HASXT },
4432     { "vae3is",    CPENS (6, C8, C3, 1), F_HASXT },
4433     { "alle2",     CPENS(4,C8,C7,0), 0 },
4434     { "alle2is",   CPENS(4,C8,C3,0), 0 },
4435     { "alle1",     CPENS(4,C8,C7,4), 0 },
4436     { "alle1is",   CPENS(4,C8,C3,4), 0 },
4437     { "alle3",     CPENS(6,C8,C7,0), 0 },
4438     { "alle3is",   CPENS(6,C8,C3,0), 0 },
4439     { "vale1is",   CPENS (0, C8, C3, 5), F_HASXT },
4440     { "vale2is",   CPENS (4, C8, C3, 5), F_HASXT },
4441     { "vale3is",   CPENS (6, C8, C3, 5), F_HASXT },
4442     { "vaale1is",  CPENS (0, C8, C3, 7), F_HASXT },
4443     { "vale1",     CPENS (0, C8, C7, 5), F_HASXT },
4444     { "vale2",     CPENS (4, C8, C7, 5), F_HASXT },
4445     { "vale3",     CPENS (6, C8, C7, 5), F_HASXT },
4446     { "vaale1",    CPENS (0, C8, C7, 7), F_HASXT },
4447 
4448     { "vmalle1os",    CPENS (0, C8, C1, 0), F_ARCHEXT },
4449     { "vae1os",       CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4450     { "aside1os",     CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4451     { "vaae1os",      CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4452     { "vale1os",      CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4453     { "vaale1os",     CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4454     { "ipas2e1os",    CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4455     { "ipas2le1os",   CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4456     { "vae2os",       CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4457     { "vale2os",      CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4458     { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4459     { "vae3os",       CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4460     { "vale3os",      CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4461     { "alle2os",      CPENS (4, C8, C1, 0), F_ARCHEXT },
4462     { "alle1os",      CPENS (4, C8, C1, 4), F_ARCHEXT },
4463     { "alle3os",      CPENS (6, C8, C1, 0), F_ARCHEXT },
4464 
4465     { "rvae1",      CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4466     { "rvaae1",     CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4467     { "rvale1",     CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4468     { "rvaale1",    CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4469     { "rvae1is",    CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4470     { "rvaae1is",   CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4471     { "rvale1is",   CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4472     { "rvaale1is",  CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4473     { "rvae1os",    CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4474     { "rvaae1os",   CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4475     { "rvale1os",   CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4476     { "rvaale1os",  CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4477     { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4478     { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4479     { "ripas2e1",   CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4480     { "ripas2le1",  CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4481     { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4482     { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4483     { "rvae2",      CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4484     { "rvale2",     CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4485     { "rvae2is",    CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4486     { "rvale2is",   CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4487     { "rvae2os",    CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4488     { "rvale2os",   CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4489     { "rvae3",      CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4490     { "rvale3",     CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4491     { "rvae3is",    CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4492     { "rvale3is",   CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4493     { "rvae3os",    CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4494     { "rvale3os",   CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4495 
4496     { 0,       CPENS(0,0,0,0), 0 }
4497 };
4498 
4499 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4500 {
4501     /* RCTX is somewhat unique in a way that it has different values
4502        (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4503        Thus op2 is masked out and instead encoded directly in the
4504        aarch64_opcode_table entries for the respective instructions.  */
4505     { "rctx",   CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4506 
4507     { 0,       CPENS(0,0,0,0), 0 }
4508 };
4509 
4510 bfd_boolean
4511 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4512 {
4513   return (sys_ins_reg->flags & F_HASXT) != 0;
4514 }
4515 
4516 extern bfd_boolean
4517 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4518 		 const char *reg_name,
4519                  aarch64_insn reg_value,
4520                  uint32_t reg_flags,
4521                  aarch64_feature_set reg_features)
4522 {
4523   /* Armv8-R has no EL3.  */
4524   if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
4525     {
4526       const char *suffix = strrchr (reg_name, '_');
4527       if (suffix && !strcmp (suffix, "_el3"))
4528 	return FALSE;
4529     }
4530 
4531   if (!(reg_flags & F_ARCHEXT))
4532     return TRUE;
4533 
4534   if (reg_features
4535       && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
4536     return TRUE;
4537 
4538   /* ARMv8.4 TLB instructions.  */
4539   if ((reg_value == CPENS (0, C8, C1, 0)
4540        || reg_value == CPENS (0, C8, C1, 1)
4541        || reg_value == CPENS (0, C8, C1, 2)
4542        || reg_value == CPENS (0, C8, C1, 3)
4543        || reg_value == CPENS (0, C8, C1, 5)
4544        || reg_value == CPENS (0, C8, C1, 7)
4545        || reg_value == CPENS (4, C8, C4, 0)
4546        || reg_value == CPENS (4, C8, C4, 4)
4547        || reg_value == CPENS (4, C8, C1, 1)
4548        || reg_value == CPENS (4, C8, C1, 5)
4549        || reg_value == CPENS (4, C8, C1, 6)
4550        || reg_value == CPENS (6, C8, C1, 1)
4551        || reg_value == CPENS (6, C8, C1, 5)
4552        || reg_value == CPENS (4, C8, C1, 0)
4553        || reg_value == CPENS (4, C8, C1, 4)
4554        || reg_value == CPENS (6, C8, C1, 0)
4555        || reg_value == CPENS (0, C8, C6, 1)
4556        || reg_value == CPENS (0, C8, C6, 3)
4557        || reg_value == CPENS (0, C8, C6, 5)
4558        || reg_value == CPENS (0, C8, C6, 7)
4559        || reg_value == CPENS (0, C8, C2, 1)
4560        || reg_value == CPENS (0, C8, C2, 3)
4561        || reg_value == CPENS (0, C8, C2, 5)
4562        || reg_value == CPENS (0, C8, C2, 7)
4563        || reg_value == CPENS (0, C8, C5, 1)
4564        || reg_value == CPENS (0, C8, C5, 3)
4565        || reg_value == CPENS (0, C8, C5, 5)
4566        || reg_value == CPENS (0, C8, C5, 7)
4567        || reg_value == CPENS (4, C8, C0, 2)
4568        || reg_value == CPENS (4, C8, C0, 6)
4569        || reg_value == CPENS (4, C8, C4, 2)
4570        || reg_value == CPENS (4, C8, C4, 6)
4571        || reg_value == CPENS (4, C8, C4, 3)
4572        || reg_value == CPENS (4, C8, C4, 7)
4573        || reg_value == CPENS (4, C8, C6, 1)
4574        || reg_value == CPENS (4, C8, C6, 5)
4575        || reg_value == CPENS (4, C8, C2, 1)
4576        || reg_value == CPENS (4, C8, C2, 5)
4577        || reg_value == CPENS (4, C8, C5, 1)
4578        || reg_value == CPENS (4, C8, C5, 5)
4579        || reg_value == CPENS (6, C8, C6, 1)
4580        || reg_value == CPENS (6, C8, C6, 5)
4581        || reg_value == CPENS (6, C8, C2, 1)
4582        || reg_value == CPENS (6, C8, C2, 5)
4583        || reg_value == CPENS (6, C8, C5, 1)
4584        || reg_value == CPENS (6, C8, C5, 5))
4585       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4586     return TRUE;
4587 
4588   /* DC CVAP.  Values are from aarch64_sys_regs_dc.  */
4589   if (reg_value == CPENS (3, C7, C12, 1)
4590       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4591     return TRUE;
4592 
4593   /* DC CVADP.  Values are from aarch64_sys_regs_dc.  */
4594   if (reg_value == CPENS (3, C7, C13, 1)
4595       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4596     return TRUE;
4597 
4598   /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension.  */
4599   if ((reg_value == CPENS (0, C7, C6, 3)
4600        || reg_value == CPENS (0, C7, C6, 4)
4601        || reg_value == CPENS (0, C7, C10, 4)
4602        || reg_value == CPENS (0, C7, C14, 4)
4603        || reg_value == CPENS (3, C7, C10, 3)
4604        || reg_value == CPENS (3, C7, C12, 3)
4605        || reg_value == CPENS (3, C7, C13, 3)
4606        || reg_value == CPENS (3, C7, C14, 3)
4607        || reg_value == CPENS (3, C7, C4, 3)
4608        || reg_value == CPENS (0, C7, C6, 5)
4609        || reg_value == CPENS (0, C7, C6, 6)
4610        || reg_value == CPENS (0, C7, C10, 6)
4611        || reg_value == CPENS (0, C7, C14, 6)
4612        || reg_value == CPENS (3, C7, C10, 5)
4613        || reg_value == CPENS (3, C7, C12, 5)
4614        || reg_value == CPENS (3, C7, C13, 5)
4615        || reg_value == CPENS (3, C7, C14, 5)
4616        || reg_value == CPENS (3, C7, C4, 4))
4617       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4618     return TRUE;
4619 
4620   /* AT S1E1RP, AT S1E1WP.  Values are from aarch64_sys_regs_at.  */
4621   if ((reg_value == CPENS (0, C7, C9, 0)
4622        || reg_value == CPENS (0, C7, C9, 1))
4623       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4624     return TRUE;
4625 
4626   /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4627   if (reg_value == CPENS (3, C7, C3, 0)
4628       && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4629     return TRUE;
4630 
4631   return FALSE;
4632 }
4633 
4634 #undef C0
4635 #undef C1
4636 #undef C2
4637 #undef C3
4638 #undef C4
4639 #undef C5
4640 #undef C6
4641 #undef C7
4642 #undef C8
4643 #undef C9
4644 #undef C10
4645 #undef C11
4646 #undef C12
4647 #undef C13
4648 #undef C14
4649 #undef C15
4650 
4651 #define BIT(INSN,BT)     (((INSN) >> (BT)) & 1)
4652 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4653 
4654 static enum err_type
4655 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4656 	      const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4657 	      bfd_boolean encoding ATTRIBUTE_UNUSED,
4658 	      aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4659 	      aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4660 {
4661   int t  = BITS (insn, 4, 0);
4662   int n  = BITS (insn, 9, 5);
4663   int t2 = BITS (insn, 14, 10);
4664 
4665   if (BIT (insn, 23))
4666     {
4667       /* Write back enabled.  */
4668       if ((t == n || t2 == n) && n != 31)
4669 	return ERR_UND;
4670     }
4671 
4672   if (BIT (insn, 22))
4673     {
4674       /* Load */
4675       if (t == t2)
4676 	return ERR_UND;
4677     }
4678 
4679   return ERR_OK;
4680 }
4681 
4682 /* Verifier for vector by element 3 operands functions where the
4683    conditions `if sz:L == 11 then UNDEFINED` holds.  */
4684 
4685 static enum err_type
4686 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4687 		bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4688 		aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4689 		aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4690 {
4691   const aarch64_insn undef_pattern = 0x3;
4692   aarch64_insn value;
4693 
4694   assert (inst->opcode);
4695   assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4696   value = encoding ? inst->value : insn;
4697   assert (value);
4698 
4699   if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4700     return ERR_UND;
4701 
4702   return ERR_OK;
4703 }
4704 
4705 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4706    If INST is NULL the given insn_sequence is cleared and the sequence is left
4707    uninitialized.  */
4708 
4709 void
4710 init_insn_sequence (const struct aarch64_inst *inst,
4711 		    aarch64_instr_sequence *insn_sequence)
4712 {
4713   int num_req_entries = 0;
4714   insn_sequence->next_insn = 0;
4715   insn_sequence->num_insns = num_req_entries;
4716   if (insn_sequence->instr)
4717     XDELETE (insn_sequence->instr);
4718   insn_sequence->instr = NULL;
4719 
4720   if (inst)
4721     {
4722       insn_sequence->instr = XNEW (aarch64_inst);
4723       memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4724     }
4725 
4726   /* Handle all the cases here.  May need to think of something smarter than
4727      a giant if/else chain if this grows.  At that time, a lookup table may be
4728      best.  */
4729   if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4730     num_req_entries = 1;
4731 
4732   if (insn_sequence->current_insns)
4733     XDELETEVEC (insn_sequence->current_insns);
4734   insn_sequence->current_insns = NULL;
4735 
4736   if (num_req_entries != 0)
4737     {
4738       size_t size = num_req_entries * sizeof (aarch64_inst);
4739       insn_sequence->current_insns
4740 	= (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4741       memset (insn_sequence->current_insns, 0, size);
4742     }
4743 }
4744 
4745 
4746 /*  This function verifies that the instruction INST adheres to its specified
4747     constraints.  If it does then ERR_OK is returned, if not then ERR_VFI is
4748     returned and MISMATCH_DETAIL contains the reason why verification failed.
4749 
4750     The function is called both during assembly and disassembly.  If assembling
4751     then ENCODING will be TRUE, else FALSE.  If dissassembling PC will be set
4752     and will contain the PC of the current instruction w.r.t to the section.
4753 
4754     If ENCODING and PC=0 then you are at a start of a section.  The constraints
4755     are verified against the given state insn_sequence which is updated as it
4756     transitions through the verification.  */
4757 
4758 enum err_type
4759 verify_constraints (const struct aarch64_inst *inst,
4760 		    const aarch64_insn insn ATTRIBUTE_UNUSED,
4761 		    bfd_vma pc,
4762 		    bfd_boolean encoding,
4763 		    aarch64_operand_error *mismatch_detail,
4764 		    aarch64_instr_sequence *insn_sequence)
4765 {
4766   assert (inst);
4767   assert (inst->opcode);
4768 
4769   const struct aarch64_opcode *opcode = inst->opcode;
4770   if (!opcode->constraints && !insn_sequence->instr)
4771     return ERR_OK;
4772 
4773   assert (insn_sequence);
4774 
4775   enum err_type res = ERR_OK;
4776 
4777   /* This instruction puts a constraint on the insn_sequence.  */
4778   if (opcode->flags & F_SCAN)
4779     {
4780       if (insn_sequence->instr)
4781 	{
4782 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4783 	  mismatch_detail->error = _("instruction opens new dependency "
4784 				     "sequence without ending previous one");
4785 	  mismatch_detail->index = -1;
4786 	  mismatch_detail->non_fatal = TRUE;
4787 	  res = ERR_VFI;
4788 	}
4789 
4790       init_insn_sequence (inst, insn_sequence);
4791       return res;
4792     }
4793 
4794   /* Verify constraints on an existing sequence.  */
4795   if (insn_sequence->instr)
4796     {
4797       const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4798       /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4799 	 closed a previous one that we should have.  */
4800       if (!encoding && pc == 0)
4801 	{
4802 	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4803 	  mismatch_detail->error = _("previous `movprfx' sequence not closed");
4804 	  mismatch_detail->index = -1;
4805 	  mismatch_detail->non_fatal = TRUE;
4806 	  res = ERR_VFI;
4807 	  /* Reset the sequence.  */
4808 	  init_insn_sequence (NULL, insn_sequence);
4809 	  return res;
4810 	}
4811 
4812       /* Validate C_SCAN_MOVPRFX constraints.  Move this to a lookup table.  */
4813       if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4814 	{
4815 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4816 	     instruction for better error messages.  */
4817 	  if (!opcode->avariant
4818 	      || !(*opcode->avariant &
4819 		   (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
4820 	    {
4821 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4822 	      mismatch_detail->error = _("SVE instruction expected after "
4823 					 "`movprfx'");
4824 	      mismatch_detail->index = -1;
4825 	      mismatch_detail->non_fatal = TRUE;
4826 	      res = ERR_VFI;
4827 	      goto done;
4828 	    }
4829 
4830 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4831 	     instruction that is allowed to be used with a MOVPRFX.  */
4832 	  if (!(opcode->constraints & C_SCAN_MOVPRFX))
4833 	    {
4834 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4835 	      mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4836 					 "expected");
4837 	      mismatch_detail->index = -1;
4838 	      mismatch_detail->non_fatal = TRUE;
4839 	      res = ERR_VFI;
4840 	      goto done;
4841 	    }
4842 
4843 	  /* Next check for usage of the predicate register.  */
4844 	  aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4845 	  aarch64_opnd_info blk_pred, inst_pred;
4846 	  memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4847 	  memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4848 	  bfd_boolean predicated = FALSE;
4849 	  assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4850 
4851 	  /* Determine if the movprfx instruction used is predicated or not.  */
4852 	  if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4853 	    {
4854 	      predicated = TRUE;
4855 	      blk_pred = insn_sequence->instr->operands[1];
4856 	    }
4857 
4858 	  unsigned char max_elem_size = 0;
4859 	  unsigned char current_elem_size;
4860 	  int num_op_used = 0, last_op_usage = 0;
4861 	  int i, inst_pred_idx = -1;
4862 	  int num_ops = aarch64_num_of_operands (opcode);
4863 	  for (i = 0; i < num_ops; i++)
4864 	    {
4865 	      aarch64_opnd_info inst_op = inst->operands[i];
4866 	      switch (inst_op.type)
4867 		{
4868 		  case AARCH64_OPND_SVE_Zd:
4869 		  case AARCH64_OPND_SVE_Zm_5:
4870 		  case AARCH64_OPND_SVE_Zm_16:
4871 		  case AARCH64_OPND_SVE_Zn:
4872 		  case AARCH64_OPND_SVE_Zt:
4873 		  case AARCH64_OPND_SVE_Vm:
4874 		  case AARCH64_OPND_SVE_Vn:
4875 		  case AARCH64_OPND_Va:
4876 		  case AARCH64_OPND_Vn:
4877 		  case AARCH64_OPND_Vm:
4878 		  case AARCH64_OPND_Sn:
4879 		  case AARCH64_OPND_Sm:
4880 		    if (inst_op.reg.regno == blk_dest.reg.regno)
4881 		      {
4882 			num_op_used++;
4883 			last_op_usage = i;
4884 		      }
4885 		    current_elem_size
4886 		      = aarch64_get_qualifier_esize (inst_op.qualifier);
4887 		    if (current_elem_size > max_elem_size)
4888 		      max_elem_size = current_elem_size;
4889 		    break;
4890 		  case AARCH64_OPND_SVE_Pd:
4891 		  case AARCH64_OPND_SVE_Pg3:
4892 		  case AARCH64_OPND_SVE_Pg4_5:
4893 		  case AARCH64_OPND_SVE_Pg4_10:
4894 		  case AARCH64_OPND_SVE_Pg4_16:
4895 		  case AARCH64_OPND_SVE_Pm:
4896 		  case AARCH64_OPND_SVE_Pn:
4897 		  case AARCH64_OPND_SVE_Pt:
4898 		    inst_pred = inst_op;
4899 		    inst_pred_idx = i;
4900 		    break;
4901 		  default:
4902 		    break;
4903 		}
4904 	    }
4905 
4906 	   assert (max_elem_size != 0);
4907 	   aarch64_opnd_info inst_dest = inst->operands[0];
4908 	   /* Determine the size that should be used to compare against the
4909 	      movprfx size.  */
4910 	   current_elem_size
4911 	     = opcode->constraints & C_MAX_ELEM
4912 	       ? max_elem_size
4913 	       : aarch64_get_qualifier_esize (inst_dest.qualifier);
4914 
4915 	  /* If movprfx is predicated do some extra checks.  */
4916 	  if (predicated)
4917 	    {
4918 	      /* The instruction must be predicated.  */
4919 	      if (inst_pred_idx < 0)
4920 		{
4921 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4922 		  mismatch_detail->error = _("predicated instruction expected "
4923 					     "after `movprfx'");
4924 		  mismatch_detail->index = -1;
4925 		  mismatch_detail->non_fatal = TRUE;
4926 		  res = ERR_VFI;
4927 		  goto done;
4928 		}
4929 
4930 	      /* The instruction must have a merging predicate.  */
4931 	      if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
4932 		{
4933 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4934 		  mismatch_detail->error = _("merging predicate expected due "
4935 					     "to preceding `movprfx'");
4936 		  mismatch_detail->index = inst_pred_idx;
4937 		  mismatch_detail->non_fatal = TRUE;
4938 		  res = ERR_VFI;
4939 		  goto done;
4940 		}
4941 
4942 	      /* The same register must be used in instruction.  */
4943 	      if (blk_pred.reg.regno != inst_pred.reg.regno)
4944 		{
4945 		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4946 		  mismatch_detail->error = _("predicate register differs "
4947 					     "from that in preceding "
4948 					     "`movprfx'");
4949 		  mismatch_detail->index = inst_pred_idx;
4950 		  mismatch_detail->non_fatal = TRUE;
4951 		  res = ERR_VFI;
4952 		  goto done;
4953 		}
4954 	    }
4955 
4956 	  /* Destructive operations by definition must allow one usage of the
4957 	     same register.  */
4958 	  int allowed_usage
4959 	    = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
4960 
4961 	  /* Operand is not used at all.  */
4962 	  if (num_op_used == 0)
4963 	    {
4964 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4965 	      mismatch_detail->error = _("output register of preceding "
4966 					 "`movprfx' not used in current "
4967 					 "instruction");
4968 	      mismatch_detail->index = 0;
4969 	      mismatch_detail->non_fatal = TRUE;
4970 	      res = ERR_VFI;
4971 	      goto done;
4972 	    }
4973 
4974 	  /* We now know it's used, now determine exactly where it's used.  */
4975 	  if (blk_dest.reg.regno != inst_dest.reg.regno)
4976 	    {
4977 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4978 	      mismatch_detail->error = _("output register of preceding "
4979 					 "`movprfx' expected as output");
4980 	      mismatch_detail->index = 0;
4981 	      mismatch_detail->non_fatal = TRUE;
4982 	      res = ERR_VFI;
4983 	      goto done;
4984 	    }
4985 
4986 	  /* Operand used more than allowed for the specific opcode type.  */
4987 	  if (num_op_used > allowed_usage)
4988 	    {
4989 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4990 	      mismatch_detail->error = _("output register of preceding "
4991 					 "`movprfx' used as input");
4992 	      mismatch_detail->index = last_op_usage;
4993 	      mismatch_detail->non_fatal = TRUE;
4994 	      res = ERR_VFI;
4995 	      goto done;
4996 	    }
4997 
4998 	  /* Now the only thing left is the qualifiers checks.  The register
4999 	     must have the same maximum element size.  */
5000 	  if (inst_dest.qualifier
5001 	      && blk_dest.qualifier
5002 	      && current_elem_size
5003 		 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5004 	    {
5005 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5006 	      mismatch_detail->error = _("register size not compatible with "
5007 					 "previous `movprfx'");
5008 	      mismatch_detail->index = 0;
5009 	      mismatch_detail->non_fatal = TRUE;
5010 	      res = ERR_VFI;
5011 	      goto done;
5012 	    }
5013 	}
5014 
5015     done:
5016       /* Add the new instruction to the sequence.  */
5017       memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5018 	      inst, sizeof (aarch64_inst));
5019 
5020       /* Check if sequence is now full.  */
5021       if (insn_sequence->next_insn >= insn_sequence->num_insns)
5022 	{
5023 	  /* Sequence is full, but we don't have anything special to do for now,
5024 	     so clear and reset it.  */
5025 	  init_insn_sequence (NULL, insn_sequence);
5026 	}
5027     }
5028 
5029   return res;
5030 }
5031 
5032 
5033 /* Return true if VALUE cannot be moved into an SVE register using DUP
5034    (with any element size, not just ESIZE) and if using DUPM would
5035    therefore be OK.  ESIZE is the number of bytes in the immediate.  */
5036 
5037 bfd_boolean
5038 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5039 {
5040   int64_t svalue = uvalue;
5041   uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5042 
5043   if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5044     return FALSE;
5045   if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5046     {
5047       svalue = (int32_t) uvalue;
5048       if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5049 	{
5050 	  svalue = (int16_t) uvalue;
5051 	  if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5052 	    return FALSE;
5053 	}
5054     }
5055   if ((svalue & 0xff) == 0)
5056     svalue /= 256;
5057   return svalue < -128 || svalue >= 128;
5058 }
5059 
5060 /* Include the opcode description table as well as the operand description
5061    table.  */
5062 #define VERIFIER(x) verify_##x
5063 #include "aarch64-tbl.h"
5064