xref: /netbsd-src/external/gpl3/binutils/dist/opcodes/aarch64-dis.c (revision ae87de8892f277bece3527c15b186ebcfa188227)
1 /* aarch64-dis.c -- AArch64 disassembler.
2    Copyright (C) 2009-2022 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include <stdint.h>
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 
29 #define INSNLEN 4
30 
31 /* Cached mapping symbol state.  */
32 enum map_type
33 {
34   MAP_INSN,
35   MAP_DATA
36 };
37 
38 static aarch64_feature_set arch_variant; /* See select_aarch64_variant.  */
39 static enum map_type last_type;
40 static int last_mapping_sym = -1;
41 static bfd_vma last_stop_offset = 0;
42 static bfd_vma last_mapping_addr = 0;
43 
44 /* Other options */
45 static int no_aliases = 0;	/* If set disassemble as most general inst.  */
46 static int no_notes = 1;	/* If set do not print disassemble notes in the
47 				  output as comments.  */
48 
49 /* Currently active instruction sequence.  */
50 static aarch64_instr_sequence insn_sequence;
51 
52 static void
53 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
54 {
55 }
56 
57 static void
58 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
59 {
60   /* Try to match options that are simple flags */
61   if (startswith (option, "no-aliases"))
62     {
63       no_aliases = 1;
64       return;
65     }
66 
67   if (startswith (option, "aliases"))
68     {
69       no_aliases = 0;
70       return;
71     }
72 
73   if (startswith (option, "no-notes"))
74     {
75       no_notes = 1;
76       return;
77     }
78 
79   if (startswith (option, "notes"))
80     {
81       no_notes = 0;
82       return;
83     }
84 
85 #ifdef DEBUG_AARCH64
86   if (startswith (option, "debug_dump"))
87     {
88       debug_dump = 1;
89       return;
90     }
91 #endif /* DEBUG_AARCH64 */
92 
93   /* Invalid option.  */
94   opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
95 }
96 
97 static void
98 parse_aarch64_dis_options (const char *options)
99 {
100   const char *option_end;
101 
102   if (options == NULL)
103     return;
104 
105   while (*options != '\0')
106     {
107       /* Skip empty options.  */
108       if (*options == ',')
109 	{
110 	  options++;
111 	  continue;
112 	}
113 
114       /* We know that *options is neither NUL or a comma.  */
115       option_end = options + 1;
116       while (*option_end != ',' && *option_end != '\0')
117 	option_end++;
118 
119       parse_aarch64_dis_option (options, option_end - options);
120 
121       /* Go on to the next one.  If option_end points to a comma, it
122 	 will be skipped above.  */
123       options = option_end;
124     }
125 }
126 
127 /* Functions doing the instruction disassembling.  */
128 
129 /* The unnamed arguments consist of the number of fields and information about
130    these fields where the VALUE will be extracted from CODE and returned.
131    MASK can be zero or the base mask of the opcode.
132 
133    N.B. the fields are required to be in such an order than the most signficant
134    field for VALUE comes the first, e.g. the <index> in
135     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
136    is encoded in H:L:M in some cases, the fields H:L:M should be passed in
137    the order of H, L, M.  */
138 
139 aarch64_insn
140 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
141 {
142   uint32_t num;
143   const aarch64_field *field;
144   enum aarch64_field_kind kind;
145   va_list va;
146 
147   va_start (va, mask);
148   num = va_arg (va, uint32_t);
149   assert (num <= 5);
150   aarch64_insn value = 0x0;
151   while (num--)
152     {
153       kind = va_arg (va, enum aarch64_field_kind);
154       field = &fields[kind];
155       value <<= field->width;
156       value |= extract_field (kind, code, mask);
157     }
158   va_end (va);
159   return value;
160 }
161 
162 /* Extract the value of all fields in SELF->fields from instruction CODE.
163    The least significant bit comes from the final field.  */
164 
165 static aarch64_insn
166 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
167 {
168   aarch64_insn value;
169   unsigned int i;
170   enum aarch64_field_kind kind;
171 
172   value = 0;
173   for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
174     {
175       kind = self->fields[i];
176       value <<= fields[kind].width;
177       value |= extract_field (kind, code, 0);
178     }
179   return value;
180 }
181 
182 /* Sign-extend bit I of VALUE.  */
183 static inline uint64_t
184 sign_extend (aarch64_insn value, unsigned i)
185 {
186   uint64_t ret, sign;
187 
188   assert (i < 32);
189   ret = value;
190   sign = (uint64_t) 1 << i;
191   return ((ret & (sign + sign - 1)) ^ sign) - sign;
192 }
193 
194 /* N.B. the following inline helpfer functions create a dependency on the
195    order of operand qualifier enumerators.  */
196 
197 /* Given VALUE, return qualifier for a general purpose register.  */
198 static inline enum aarch64_opnd_qualifier
199 get_greg_qualifier_from_value (aarch64_insn value)
200 {
201   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
202   assert (value <= 0x1
203 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
204   return qualifier;
205 }
206 
207 /* Given VALUE, return qualifier for a vector register.  This does not support
208    decoding instructions that accept the 2H vector type.  */
209 
210 static inline enum aarch64_opnd_qualifier
211 get_vreg_qualifier_from_value (aarch64_insn value)
212 {
213   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
214 
215   /* Instructions using vector type 2H should not call this function.  Skip over
216      the 2H qualifier.  */
217   if (qualifier >= AARCH64_OPND_QLF_V_2H)
218     qualifier += 1;
219 
220   assert (value <= 0x8
221 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
222   return qualifier;
223 }
224 
225 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register.  */
226 static inline enum aarch64_opnd_qualifier
227 get_sreg_qualifier_from_value (aarch64_insn value)
228 {
229   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
230 
231   assert (value <= 0x4
232 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
233   return qualifier;
234 }
235 
236 /* Given the instruction in *INST which is probably half way through the
237    decoding and our caller wants to know the expected qualifier for operand
238    I.  Return such a qualifier if we can establish it; otherwise return
239    AARCH64_OPND_QLF_NIL.  */
240 
241 static aarch64_opnd_qualifier_t
242 get_expected_qualifier (const aarch64_inst *inst, int i)
243 {
244   aarch64_opnd_qualifier_seq_t qualifiers;
245   /* Should not be called if the qualifier is known.  */
246   assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
247   if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
248 			       i, qualifiers))
249     return qualifiers[i];
250   else
251     return AARCH64_OPND_QLF_NIL;
252 }
253 
254 /* Operand extractors.  */
255 
256 bool
257 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
258 		  aarch64_opnd_info *info ATTRIBUTE_UNUSED,
259 		  const aarch64_insn code ATTRIBUTE_UNUSED,
260 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
261 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
262 {
263   return true;
264 }
265 
266 bool
267 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
268 		   const aarch64_insn code,
269 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
270 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
271 {
272   info->reg.regno = extract_field (self->fields[0], code, 0);
273   return true;
274 }
275 
276 bool
277 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
278 		   const aarch64_insn code ATTRIBUTE_UNUSED,
279 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
280 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
281 {
282   assert (info->idx == 1
283 	  || info->idx ==3);
284   info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
285   return true;
286 }
287 
288 /* e.g. IC <ic_op>{, <Xt>}.  */
289 bool
290 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
291 			  const aarch64_insn code,
292 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
293 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
294 {
295   info->reg.regno = extract_field (self->fields[0], code, 0);
296   assert (info->idx == 1
297 	  && (aarch64_get_operand_class (inst->operands[0].type)
298 	      == AARCH64_OPND_CLASS_SYSTEM));
299   /* This will make the constraint checking happy and more importantly will
300      help the disassembler determine whether this operand is optional or
301      not.  */
302   info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
303 
304   return true;
305 }
306 
307 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
308 bool
309 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
310 		     const aarch64_insn code,
311 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
312 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
313 {
314   /* regno */
315   info->reglane.regno = extract_field (self->fields[0], code,
316 				       inst->opcode->mask);
317 
318   /* Index and/or type.  */
319   if (inst->opcode->iclass == asisdone
320     || inst->opcode->iclass == asimdins)
321     {
322       if (info->type == AARCH64_OPND_En
323 	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
324 	{
325 	  unsigned shift;
326 	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
327 	  assert (info->idx == 1);	/* Vn */
328 	  aarch64_insn value = extract_field (FLD_imm4, code, 0);
329 	  /* Depend on AARCH64_OPND_Ed to determine the qualifier.  */
330 	  info->qualifier = get_expected_qualifier (inst, info->idx);
331 	  shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
332 	  info->reglane.index = value >> shift;
333 	}
334       else
335 	{
336 	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
337 	     imm5<3:0>	<V>
338 	     0000	RESERVED
339 	     xxx1	B
340 	     xx10	H
341 	     x100	S
342 	     1000	D  */
343 	  int pos = -1;
344 	  aarch64_insn value = extract_field (FLD_imm5, code, 0);
345 	  while (++pos <= 3 && (value & 0x1) == 0)
346 	    value >>= 1;
347 	  if (pos > 3)
348 	    return false;
349 	  info->qualifier = get_sreg_qualifier_from_value (pos);
350 	  info->reglane.index = (unsigned) (value >> 1);
351 	}
352     }
353   else if (inst->opcode->iclass == dotproduct)
354     {
355       /* Need information in other operand(s) to help decoding.  */
356       info->qualifier = get_expected_qualifier (inst, info->idx);
357       switch (info->qualifier)
358 	{
359 	case AARCH64_OPND_QLF_S_4B:
360 	case AARCH64_OPND_QLF_S_2H:
361 	  /* L:H */
362 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
363 	  info->reglane.regno &= 0x1f;
364 	  break;
365 	default:
366 	  return false;
367 	}
368     }
369   else if (inst->opcode->iclass == cryptosm3)
370     {
371       /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>].  */
372       info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
373     }
374   else
375     {
376       /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
377          or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
378 
379       /* Need information in other operand(s) to help decoding.  */
380       info->qualifier = get_expected_qualifier (inst, info->idx);
381       switch (info->qualifier)
382 	{
383 	case AARCH64_OPND_QLF_S_H:
384 	  if (info->type == AARCH64_OPND_Em16)
385 	    {
386 	      /* h:l:m */
387 	      info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
388 						    FLD_M);
389 	      info->reglane.regno &= 0xf;
390 	    }
391 	  else
392 	    {
393 	      /* h:l */
394 	      info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
395 	    }
396 	  break;
397 	case AARCH64_OPND_QLF_S_S:
398 	  /* h:l */
399 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
400 	  break;
401 	case AARCH64_OPND_QLF_S_D:
402 	  /* H */
403 	  info->reglane.index = extract_field (FLD_H, code, 0);
404 	  break;
405 	default:
406 	  return false;
407 	}
408 
409       if (inst->opcode->op == OP_FCMLA_ELEM
410 	  && info->qualifier != AARCH64_OPND_QLF_S_H)
411 	{
412 	  /* Complex operand takes two elements.  */
413 	  if (info->reglane.index & 1)
414 	    return false;
415 	  info->reglane.index /= 2;
416 	}
417     }
418 
419   return true;
420 }
421 
422 bool
423 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
424 		     const aarch64_insn code,
425 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
426 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
427 {
428   /* R */
429   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
430   /* len */
431   info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
432   return true;
433 }
434 
435 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions.  */
436 bool
437 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
438 			  aarch64_opnd_info *info, const aarch64_insn code,
439 			  const aarch64_inst *inst,
440 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
441 {
442   aarch64_insn value;
443   /* Number of elements in each structure to be loaded/stored.  */
444   unsigned expected_num = get_opcode_dependent_value (inst->opcode);
445 
446   struct
447     {
448       unsigned is_reserved;
449       unsigned num_regs;
450       unsigned num_elements;
451     } data [] =
452   {   {0, 4, 4},
453       {1, 4, 4},
454       {0, 4, 1},
455       {0, 4, 2},
456       {0, 3, 3},
457       {1, 3, 3},
458       {0, 3, 1},
459       {0, 1, 1},
460       {0, 2, 2},
461       {1, 2, 2},
462       {0, 2, 1},
463   };
464 
465   /* Rt */
466   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
467   /* opcode */
468   value = extract_field (FLD_opcode, code, 0);
469   /* PR 21595: Check for a bogus value.  */
470   if (value >= ARRAY_SIZE (data))
471     return false;
472   if (expected_num != data[value].num_elements || data[value].is_reserved)
473     return false;
474   info->reglist.num_regs = data[value].num_regs;
475 
476   return true;
477 }
478 
479 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
480    lanes instructions.  */
481 bool
482 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
483 			    aarch64_opnd_info *info, const aarch64_insn code,
484 			    const aarch64_inst *inst,
485 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
486 {
487   aarch64_insn value;
488 
489   /* Rt */
490   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
491   /* S */
492   value = extract_field (FLD_S, code, 0);
493 
494   /* Number of registers is equal to the number of elements in
495      each structure to be loaded/stored.  */
496   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
497   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
498 
499   /* Except when it is LD1R.  */
500   if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
501     info->reglist.num_regs = 2;
502 
503   return true;
504 }
505 
506 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
507    load/store single element instructions.  */
508 bool
509 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
510 			   aarch64_opnd_info *info, const aarch64_insn code,
511 			   const aarch64_inst *inst ATTRIBUTE_UNUSED,
512 			   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
513 {
514   aarch64_field field = {0, 0};
515   aarch64_insn QSsize;		/* fields Q:S:size.  */
516   aarch64_insn opcodeh2;	/* opcode<2:1> */
517 
518   /* Rt */
519   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
520 
521   /* Decode the index, opcode<2:1> and size.  */
522   gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
523   opcodeh2 = extract_field_2 (&field, code, 0);
524   QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
525   switch (opcodeh2)
526     {
527     case 0x0:
528       info->qualifier = AARCH64_OPND_QLF_S_B;
529       /* Index encoded in "Q:S:size".  */
530       info->reglist.index = QSsize;
531       break;
532     case 0x1:
533       if (QSsize & 0x1)
534 	/* UND.  */
535 	return false;
536       info->qualifier = AARCH64_OPND_QLF_S_H;
537       /* Index encoded in "Q:S:size<1>".  */
538       info->reglist.index = QSsize >> 1;
539       break;
540     case 0x2:
541       if ((QSsize >> 1) & 0x1)
542 	/* UND.  */
543 	return false;
544       if ((QSsize & 0x1) == 0)
545 	{
546 	  info->qualifier = AARCH64_OPND_QLF_S_S;
547 	  /* Index encoded in "Q:S".  */
548 	  info->reglist.index = QSsize >> 2;
549 	}
550       else
551 	{
552 	  if (extract_field (FLD_S, code, 0))
553 	    /* UND */
554 	    return false;
555 	  info->qualifier = AARCH64_OPND_QLF_S_D;
556 	  /* Index encoded in "Q".  */
557 	  info->reglist.index = QSsize >> 3;
558 	}
559       break;
560     default:
561       return false;
562     }
563 
564   info->reglist.has_index = 1;
565   info->reglist.num_regs = 0;
566   /* Number of registers is equal to the number of elements in
567      each structure to be loaded/stored.  */
568   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
569   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
570 
571   return true;
572 }
573 
574 /* Decode fields immh:immb and/or Q for e.g.
575    SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
576    or SSHR <V><d>, <V><n>, #<shift>.  */
577 
578 bool
579 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
580 			       aarch64_opnd_info *info, const aarch64_insn code,
581 			       const aarch64_inst *inst,
582 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
583 {
584   int pos;
585   aarch64_insn Q, imm, immh;
586   enum aarch64_insn_class iclass = inst->opcode->iclass;
587 
588   immh = extract_field (FLD_immh, code, 0);
589   if (immh == 0)
590     return false;
591   imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
592   pos = 4;
593   /* Get highest set bit in immh.  */
594   while (--pos >= 0 && (immh & 0x8) == 0)
595     immh <<= 1;
596 
597   assert ((iclass == asimdshf || iclass == asisdshf)
598 	  && (info->type == AARCH64_OPND_IMM_VLSR
599 	      || info->type == AARCH64_OPND_IMM_VLSL));
600 
601   if (iclass == asimdshf)
602     {
603       Q = extract_field (FLD_Q, code, 0);
604       /* immh	Q	<T>
605 	 0000	x	SEE AdvSIMD modified immediate
606 	 0001	0	8B
607 	 0001	1	16B
608 	 001x	0	4H
609 	 001x	1	8H
610 	 01xx	0	2S
611 	 01xx	1	4S
612 	 1xxx	0	RESERVED
613 	 1xxx	1	2D  */
614       info->qualifier =
615 	get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
616     }
617   else
618     info->qualifier = get_sreg_qualifier_from_value (pos);
619 
620   if (info->type == AARCH64_OPND_IMM_VLSR)
621     /* immh	<shift>
622        0000	SEE AdvSIMD modified immediate
623        0001	(16-UInt(immh:immb))
624        001x	(32-UInt(immh:immb))
625        01xx	(64-UInt(immh:immb))
626        1xxx	(128-UInt(immh:immb))  */
627     info->imm.value = (16 << pos) - imm;
628   else
629     /* immh:immb
630        immh	<shift>
631        0000	SEE AdvSIMD modified immediate
632        0001	(UInt(immh:immb)-8)
633        001x	(UInt(immh:immb)-16)
634        01xx	(UInt(immh:immb)-32)
635        1xxx	(UInt(immh:immb)-64)  */
636     info->imm.value = imm - (8 << pos);
637 
638   return true;
639 }
640 
641 /* Decode shift immediate for e.g. sshr (imm).  */
642 bool
643 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
644 		      aarch64_opnd_info *info, const aarch64_insn code,
645 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
646 		      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
647 {
648   int64_t imm;
649   aarch64_insn val;
650   val = extract_field (FLD_size, code, 0);
651   switch (val)
652     {
653     case 0: imm = 8; break;
654     case 1: imm = 16; break;
655     case 2: imm = 32; break;
656     default: return false;
657     }
658   info->imm.value = imm;
659   return true;
660 }
661 
662 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
663    value in the field(s) will be extracted as unsigned immediate value.  */
664 bool
665 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
666 		 const aarch64_insn code,
667 		 const aarch64_inst *inst,
668 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
669 {
670   uint64_t imm;
671 
672   imm = extract_all_fields (self, code);
673 
674   if (operand_need_sign_extension (self))
675     imm = sign_extend (imm, get_operand_fields_width (self) - 1);
676 
677   if (operand_need_shift_by_two (self))
678     imm <<= 2;
679   else if (operand_need_shift_by_four (self))
680     imm <<= 4;
681 
682   if (info->type == AARCH64_OPND_ADDR_ADRP)
683     imm <<= 12;
684 
685   if (inst->operands[0].type == AARCH64_OPND_PSTATEFIELD
686       && inst->operands[0].sysreg.flags & F_IMM_IN_CRM)
687     imm &= PSTATE_DECODE_CRM_IMM (inst->operands[0].sysreg.flags);
688 
689   info->imm.value = imm;
690   return true;
691 }
692 
693 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
694 bool
695 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
696 		      const aarch64_insn code,
697 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
698 		      aarch64_operand_error *errors)
699 {
700   aarch64_ext_imm (self, info, code, inst, errors);
701   info->shifter.kind = AARCH64_MOD_LSL;
702   info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
703   return true;
704 }
705 
706 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
707      MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
708 bool
709 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
710 				  aarch64_opnd_info *info,
711 				  const aarch64_insn code,
712 				  const aarch64_inst *inst ATTRIBUTE_UNUSED,
713 				  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
714 {
715   uint64_t imm;
716   enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
717   aarch64_field field = {0, 0};
718 
719   assert (info->idx == 1);
720 
721   if (info->type == AARCH64_OPND_SIMD_FPIMM)
722     info->imm.is_fp = 1;
723 
724   /* a:b:c:d:e:f:g:h */
725   imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
726   if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
727     {
728       /* Either MOVI <Dd>, #<imm>
729 	 or     MOVI <Vd>.2D, #<imm>.
730 	 <imm> is a 64-bit immediate
731 	 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
732 	 encoded in "a:b:c:d:e:f:g:h".	*/
733       int i;
734       unsigned abcdefgh = imm;
735       for (imm = 0ull, i = 0; i < 8; i++)
736 	if (((abcdefgh >> i) & 0x1) != 0)
737 	  imm |= 0xffull << (8 * i);
738     }
739   info->imm.value = imm;
740 
741   /* cmode */
742   info->qualifier = get_expected_qualifier (inst, info->idx);
743   switch (info->qualifier)
744     {
745     case AARCH64_OPND_QLF_NIL:
746       /* no shift */
747       info->shifter.kind = AARCH64_MOD_NONE;
748       return 1;
749     case AARCH64_OPND_QLF_LSL:
750       /* shift zeros */
751       info->shifter.kind = AARCH64_MOD_LSL;
752       switch (aarch64_get_qualifier_esize (opnd0_qualifier))
753 	{
754 	case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break;	/* per word */
755 	case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break;	/* per half */
756 	case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break;	/* per byte */
757 	default: return false;
758 	}
759       /* 00: 0; 01: 8; 10:16; 11:24.  */
760       info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
761       break;
762     case AARCH64_OPND_QLF_MSL:
763       /* shift ones */
764       info->shifter.kind = AARCH64_MOD_MSL;
765       gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
766       info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
767       break;
768     default:
769       return false;
770     }
771 
772   return true;
773 }
774 
775 /* Decode an 8-bit floating-point immediate.  */
776 bool
777 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
778 		   const aarch64_insn code,
779 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
780 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
781 {
782   info->imm.value = extract_all_fields (self, code);
783   info->imm.is_fp = 1;
784   return true;
785 }
786 
787 /* Decode a 1-bit rotate immediate (#90 or #270).  */
788 bool
789 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
790 			 const aarch64_insn code,
791 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
792 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
793 {
794   uint64_t rot = extract_field (self->fields[0], code, 0);
795   assert (rot < 2U);
796   info->imm.value = rot * 180 + 90;
797   return true;
798 }
799 
800 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270).  */
801 bool
802 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
803 			 const aarch64_insn code,
804 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
805 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
806 {
807   uint64_t rot = extract_field (self->fields[0], code, 0);
808   assert (rot < 4U);
809   info->imm.value = rot * 90;
810   return true;
811 }
812 
813 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>.  */
814 bool
815 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
816 		   aarch64_opnd_info *info, const aarch64_insn code,
817 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
818 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
819 {
820   info->imm.value = 64- extract_field (FLD_scale, code, 0);
821   return true;
822 }
823 
824 /* Decode arithmetic immediate for e.g.
825      SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
826 bool
827 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
828 		  aarch64_opnd_info *info, const aarch64_insn code,
829 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
830 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
831 {
832   aarch64_insn value;
833 
834   info->shifter.kind = AARCH64_MOD_LSL;
835   /* shift */
836   value = extract_field (FLD_shift, code, 0);
837   if (value >= 2)
838     return false;
839   info->shifter.amount = value ? 12 : 0;
840   /* imm12 (unsigned) */
841   info->imm.value = extract_field (FLD_imm12, code, 0);
842 
843   return true;
844 }
845 
846 /* Return true if VALUE is a valid logical immediate encoding, storing the
847    decoded value in *RESULT if so.  ESIZE is the number of bytes in the
848    decoded immediate.  */
849 static bool
850 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
851 {
852   uint64_t imm, mask;
853   uint32_t N, R, S;
854   unsigned simd_size;
855 
856   /* value is N:immr:imms.  */
857   S = value & 0x3f;
858   R = (value >> 6) & 0x3f;
859   N = (value >> 12) & 0x1;
860 
861   /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
862      (in other words, right rotated by R), then replicated.  */
863   if (N != 0)
864     {
865       simd_size = 64;
866       mask = 0xffffffffffffffffull;
867     }
868   else
869     {
870       switch (S)
871 	{
872 	case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32;           break;
873 	case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
874 	case 0x30 ... 0x37: /* 110xxx */ simd_size =  8; S &= 0x7; break;
875 	case 0x38 ... 0x3b: /* 1110xx */ simd_size =  4; S &= 0x3; break;
876 	case 0x3c ... 0x3d: /* 11110x */ simd_size =  2; S &= 0x1; break;
877 	default: return false;
878 	}
879       mask = (1ull << simd_size) - 1;
880       /* Top bits are IGNORED.  */
881       R &= simd_size - 1;
882     }
883 
884   if (simd_size > esize * 8)
885     return false;
886 
887   /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected.  */
888   if (S == simd_size - 1)
889     return false;
890   /* S+1 consecutive bits to 1.  */
891   /* NOTE: S can't be 63 due to detection above.  */
892   imm = (1ull << (S + 1)) - 1;
893   /* Rotate to the left by simd_size - R.  */
894   if (R != 0)
895     imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
896   /* Replicate the value according to SIMD size.  */
897   switch (simd_size)
898     {
899     case  2: imm = (imm <<  2) | imm;
900       /* Fall through.  */
901     case  4: imm = (imm <<  4) | imm;
902       /* Fall through.  */
903     case  8: imm = (imm <<  8) | imm;
904       /* Fall through.  */
905     case 16: imm = (imm << 16) | imm;
906       /* Fall through.  */
907     case 32: imm = (imm << 32) | imm;
908       /* Fall through.  */
909     case 64: break;
910     default: return 0;
911     }
912 
913   *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
914 
915   return true;
916 }
917 
918 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>.  */
919 bool
920 aarch64_ext_limm (const aarch64_operand *self,
921 		  aarch64_opnd_info *info, const aarch64_insn code,
922 		  const aarch64_inst *inst,
923 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
924 {
925   uint32_t esize;
926   aarch64_insn value;
927 
928   value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
929 			  self->fields[2]);
930   esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
931   return decode_limm (esize, value, &info->imm.value);
932 }
933 
934 /* Decode a logical immediate for the BIC alias of AND (etc.).  */
935 bool
936 aarch64_ext_inv_limm (const aarch64_operand *self,
937 		      aarch64_opnd_info *info, const aarch64_insn code,
938 		      const aarch64_inst *inst,
939 		      aarch64_operand_error *errors)
940 {
941   if (!aarch64_ext_limm (self, info, code, inst, errors))
942     return false;
943   info->imm.value = ~info->imm.value;
944   return true;
945 }
946 
947 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
948    or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
949 bool
950 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
951 		aarch64_opnd_info *info,
952 		const aarch64_insn code, const aarch64_inst *inst,
953 		aarch64_operand_error *errors ATTRIBUTE_UNUSED)
954 {
955   aarch64_insn value;
956 
957   /* Rt */
958   info->reg.regno = extract_field (FLD_Rt, code, 0);
959 
960   /* size */
961   value = extract_field (FLD_ldst_size, code, 0);
962   if (inst->opcode->iclass == ldstpair_indexed
963       || inst->opcode->iclass == ldstnapair_offs
964       || inst->opcode->iclass == ldstpair_off
965       || inst->opcode->iclass == loadlit)
966     {
967       enum aarch64_opnd_qualifier qualifier;
968       switch (value)
969 	{
970 	case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
971 	case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
972 	case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
973 	default: return false;
974 	}
975       info->qualifier = qualifier;
976     }
977   else
978     {
979       /* opc1:size */
980       value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
981       if (value > 0x4)
982 	return false;
983       info->qualifier = get_sreg_qualifier_from_value (value);
984     }
985 
986   return true;
987 }
988 
989 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
990 bool
991 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
992 			 aarch64_opnd_info *info,
993 			 aarch64_insn code,
994 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
995 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
996 {
997   /* Rn */
998   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
999   return true;
1000 }
1001 
1002 /* Decode the address operand for e.g.
1003      stlur <Xt>, [<Xn|SP>{, <amount>}].  */
1004 bool
1005 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1006 			 aarch64_opnd_info *info,
1007 			 aarch64_insn code, const aarch64_inst *inst,
1008 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1009 {
1010   info->qualifier = get_expected_qualifier (inst, info->idx);
1011 
1012   /* Rn */
1013   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1014 
1015   /* simm9 */
1016   aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1017   info->addr.offset.imm = sign_extend (imm, 8);
1018   if (extract_field (self->fields[2], code, 0) == 1) {
1019     info->addr.writeback = 1;
1020     info->addr.preind = 1;
1021   }
1022   return true;
1023 }
1024 
1025 /* Decode the address operand for e.g.
1026      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1027 bool
1028 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1029 			 aarch64_opnd_info *info,
1030 			 aarch64_insn code, const aarch64_inst *inst,
1031 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1032 {
1033   aarch64_insn S, value;
1034 
1035   /* Rn */
1036   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1037   /* Rm */
1038   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1039   /* option */
1040   value = extract_field (FLD_option, code, 0);
1041   info->shifter.kind =
1042     aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1043   /* Fix-up the shifter kind; although the table-driven approach is
1044      efficient, it is slightly inflexible, thus needing this fix-up.  */
1045   if (info->shifter.kind == AARCH64_MOD_UXTX)
1046     info->shifter.kind = AARCH64_MOD_LSL;
1047   /* S */
1048   S = extract_field (FLD_S, code, 0);
1049   if (S == 0)
1050     {
1051       info->shifter.amount = 0;
1052       info->shifter.amount_present = 0;
1053     }
1054   else
1055     {
1056       int size;
1057       /* Need information in other operand(s) to help achieve the decoding
1058 	 from 'S' field.  */
1059       info->qualifier = get_expected_qualifier (inst, info->idx);
1060       /* Get the size of the data element that is accessed, which may be
1061 	 different from that of the source register size, e.g. in strb/ldrb.  */
1062       size = aarch64_get_qualifier_esize (info->qualifier);
1063       info->shifter.amount = get_logsz (size);
1064       info->shifter.amount_present = 1;
1065     }
1066 
1067   return true;
1068 }
1069 
1070 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>.  */
1071 bool
1072 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1073 		       aarch64_insn code, const aarch64_inst *inst,
1074 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1075 {
1076   aarch64_insn imm;
1077   info->qualifier = get_expected_qualifier (inst, info->idx);
1078 
1079   /* Rn */
1080   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1081   /* simm (imm9 or imm7)  */
1082   imm = extract_field (self->fields[0], code, 0);
1083   info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1084   if (self->fields[0] == FLD_imm7
1085       || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1086     /* scaled immediate in ld/st pair instructions.  */
1087     info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1088   /* qualifier */
1089   if (inst->opcode->iclass == ldst_unscaled
1090       || inst->opcode->iclass == ldstnapair_offs
1091       || inst->opcode->iclass == ldstpair_off
1092       || inst->opcode->iclass == ldst_unpriv)
1093     info->addr.writeback = 0;
1094   else
1095     {
1096       /* pre/post- index */
1097       info->addr.writeback = 1;
1098       if (extract_field (self->fields[1], code, 0) == 1)
1099 	info->addr.preind = 1;
1100       else
1101 	info->addr.postind = 1;
1102     }
1103 
1104   return true;
1105 }
1106 
1107 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}].  */
1108 bool
1109 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1110 			 aarch64_insn code,
1111 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1112 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1113 {
1114   int shift;
1115   info->qualifier = get_expected_qualifier (inst, info->idx);
1116   shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1117   /* Rn */
1118   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1119   /* uimm12 */
1120   info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1121   return true;
1122 }
1123 
1124 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}].  */
1125 bool
1126 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1127 			 aarch64_insn code,
1128 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1129 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1130 {
1131   aarch64_insn imm;
1132 
1133   info->qualifier = get_expected_qualifier (inst, info->idx);
1134   /* Rn */
1135   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1136   /* simm10 */
1137   imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1138   info->addr.offset.imm = sign_extend (imm, 9) << 3;
1139   if (extract_field (self->fields[3], code, 0) == 1) {
1140     info->addr.writeback = 1;
1141     info->addr.preind = 1;
1142   }
1143   return true;
1144 }
1145 
1146 /* Decode the address operand for e.g.
1147      LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
1148 bool
1149 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1150 			    aarch64_opnd_info *info,
1151 			    aarch64_insn code, const aarch64_inst *inst,
1152 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1153 {
1154   /* The opcode dependent area stores the number of elements in
1155      each structure to be loaded/stored.  */
1156   int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1157 
1158   /* Rn */
1159   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1160   /* Rm | #<amount>  */
1161   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1162   if (info->addr.offset.regno == 31)
1163     {
1164       if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1165 	/* Special handling of loading single structure to all lane.  */
1166 	info->addr.offset.imm = (is_ld1r ? 1
1167 				 : inst->operands[0].reglist.num_regs)
1168 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1169       else
1170 	info->addr.offset.imm = inst->operands[0].reglist.num_regs
1171 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1172 	  * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1173     }
1174   else
1175     info->addr.offset.is_reg = 1;
1176   info->addr.writeback = 1;
1177 
1178   return true;
1179 }
1180 
1181 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
1182 bool
1183 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1184 		  aarch64_opnd_info *info,
1185 		  aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1186 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1187 {
1188   aarch64_insn value;
1189   /* cond */
1190   value = extract_field (FLD_cond, code, 0);
1191   info->cond = get_cond_from_value (value);
1192   return true;
1193 }
1194 
1195 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
1196 bool
1197 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1198 		    aarch64_opnd_info *info,
1199 		    aarch64_insn code,
1200 		    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1201 		    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1202 {
1203   /* op0:op1:CRn:CRm:op2 */
1204   info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1205 				       FLD_CRm, FLD_op2);
1206   info->sysreg.flags = 0;
1207 
1208   /* If a system instruction, check which restrictions should be on the register
1209      value during decoding, these will be enforced then.  */
1210   if (inst->opcode->iclass == ic_system)
1211     {
1212       /* Check to see if it's read-only, else check if it's write only.
1213 	 if it's both or unspecified don't care.  */
1214       if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1215 	info->sysreg.flags = F_REG_READ;
1216       else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1217 	       == F_SYS_WRITE)
1218 	info->sysreg.flags = F_REG_WRITE;
1219     }
1220 
1221   return true;
1222 }
1223 
1224 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
1225 bool
1226 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1227 			 aarch64_opnd_info *info, aarch64_insn code,
1228 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1229 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1230 {
1231   int i;
1232   aarch64_insn fld_crm = extract_field (FLD_CRm, code, 0);
1233   /* op1:op2 */
1234   info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1235   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1236     if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1237       {
1238         /* PSTATEFIELD name can be encoded partially in CRm[3:1].  */
1239         uint32_t flags = aarch64_pstatefields[i].flags;
1240         if ((flags & F_REG_IN_CRM)
1241             && ((fld_crm & 0xe) != PSTATE_DECODE_CRM (flags)))
1242           continue;
1243         info->sysreg.flags = flags;
1244         return true;
1245       }
1246   /* Reserved value in <pstatefield>.  */
1247   return false;
1248 }
1249 
1250 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
1251 bool
1252 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1253 		       aarch64_opnd_info *info,
1254 		       aarch64_insn code,
1255 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1256 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1257 {
1258   int i;
1259   aarch64_insn value;
1260   const aarch64_sys_ins_reg *sysins_ops;
1261   /* op0:op1:CRn:CRm:op2 */
1262   value = extract_fields (code, 0, 5,
1263 			  FLD_op0, FLD_op1, FLD_CRn,
1264 			  FLD_CRm, FLD_op2);
1265 
1266   switch (info->type)
1267     {
1268     case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1269     case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1270     case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1271     case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1272     case AARCH64_OPND_SYSREG_SR:
1273 	sysins_ops = aarch64_sys_regs_sr;
1274 	 /* Let's remove op2 for rctx.  Refer to comments in the definition of
1275 	    aarch64_sys_regs_sr[].  */
1276 	value = value & ~(0x7);
1277 	break;
1278     default: return false;
1279     }
1280 
1281   for (i = 0; sysins_ops[i].name != NULL; ++i)
1282     if (sysins_ops[i].value == value)
1283       {
1284 	info->sysins_op = sysins_ops + i;
1285 	DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1286 		     info->sysins_op->name,
1287 		     (unsigned)info->sysins_op->value,
1288 		     aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1289 	return true;
1290       }
1291 
1292   return false;
1293 }
1294 
1295 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
1296 
1297 bool
1298 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1299 		     aarch64_opnd_info *info,
1300 		     aarch64_insn code,
1301 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1302 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1303 {
1304   /* CRm */
1305   info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1306   return true;
1307 }
1308 
1309 /* Decode the memory barrier option operand for DSB <option>nXS|#<imm>.  */
1310 
1311 bool
1312 aarch64_ext_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
1313 		     aarch64_opnd_info *info,
1314 		     aarch64_insn code,
1315 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1316 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1317 {
1318   /* For the DSB nXS barrier variant immediate is encoded in 2-bit field.  */
1319   aarch64_insn field = extract_field (FLD_CRm_dsb_nxs, code, 0);
1320   info->barrier = aarch64_barrier_dsb_nxs_options + field;
1321   return true;
1322 }
1323 
1324 /* Decode the prefetch operation option operand for e.g.
1325      PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
1326 
1327 bool
1328 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1329 		   aarch64_opnd_info *info,
1330 		   aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1331 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1332 {
1333   /* prfop in Rt */
1334   info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1335   return true;
1336 }
1337 
1338 /* Decode the hint number for an alias taking an operand.  Set info->hint_option
1339    to the matching name/value pair in aarch64_hint_options.  */
1340 
1341 bool
1342 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1343 		  aarch64_opnd_info *info,
1344 		  aarch64_insn code,
1345 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1346 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1347 {
1348   /* CRm:op2.  */
1349   unsigned hint_number;
1350   int i;
1351 
1352   hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1353 
1354   for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1355     {
1356       if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1357 	{
1358 	  info->hint_option = &(aarch64_hint_options[i]);
1359 	  return true;
1360 	}
1361     }
1362 
1363   return false;
1364 }
1365 
1366 /* Decode the extended register operand for e.g.
1367      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1368 bool
1369 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1370 			  aarch64_opnd_info *info,
1371 			  aarch64_insn code,
1372 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1373 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1374 {
1375   aarch64_insn value;
1376 
1377   /* Rm */
1378   info->reg.regno = extract_field (FLD_Rm, code, 0);
1379   /* option */
1380   value = extract_field (FLD_option, code, 0);
1381   info->shifter.kind =
1382     aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1383   /* imm3 */
1384   info->shifter.amount = extract_field (FLD_imm3, code,  0);
1385 
1386   /* This makes the constraint checking happy.  */
1387   info->shifter.operator_present = 1;
1388 
1389   /* Assume inst->operands[0].qualifier has been resolved.  */
1390   assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1391   info->qualifier = AARCH64_OPND_QLF_W;
1392   if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1393       && (info->shifter.kind == AARCH64_MOD_UXTX
1394 	  || info->shifter.kind == AARCH64_MOD_SXTX))
1395     info->qualifier = AARCH64_OPND_QLF_X;
1396 
1397   return true;
1398 }
1399 
1400 /* Decode the shifted register operand for e.g.
1401      SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
1402 bool
1403 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1404 			 aarch64_opnd_info *info,
1405 			 aarch64_insn code,
1406 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1407 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1408 {
1409   aarch64_insn value;
1410 
1411   /* Rm */
1412   info->reg.regno = extract_field (FLD_Rm, code, 0);
1413   /* shift */
1414   value = extract_field (FLD_shift, code, 0);
1415   info->shifter.kind =
1416     aarch64_get_operand_modifier_from_value (value, false /* extend_p */);
1417   if (info->shifter.kind == AARCH64_MOD_ROR
1418       && inst->opcode->iclass != log_shift)
1419     /* ROR is not available for the shifted register operand in arithmetic
1420        instructions.  */
1421     return false;
1422   /* imm6 */
1423   info->shifter.amount = extract_field (FLD_imm6, code,  0);
1424 
1425   /* This makes the constraint checking happy.  */
1426   info->shifter.operator_present = 1;
1427 
1428   return true;
1429 }
1430 
1431 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1432    where <offset> is given by the OFFSET parameter and where <factor> is
1433    1 plus SELF's operand-dependent value.  fields[0] specifies the field
1434    that holds <base>.  */
1435 static bool
1436 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1437 				 aarch64_opnd_info *info, aarch64_insn code,
1438 				 int64_t offset)
1439 {
1440   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1441   info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1442   info->addr.offset.is_reg = false;
1443   info->addr.writeback = false;
1444   info->addr.preind = true;
1445   if (offset != 0)
1446     info->shifter.kind = AARCH64_MOD_MUL_VL;
1447   info->shifter.amount = 1;
1448   info->shifter.operator_present = (info->addr.offset.imm != 0);
1449   info->shifter.amount_present = false;
1450   return true;
1451 }
1452 
1453 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1454    where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1455    SELF's operand-dependent value.  fields[0] specifies the field that
1456    holds <base>.  <simm4> is encoded in the SVE_imm4 field.  */
1457 bool
1458 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1459 			       aarch64_opnd_info *info, aarch64_insn code,
1460 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1461 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1462 {
1463   int offset;
1464 
1465   offset = extract_field (FLD_SVE_imm4, code, 0);
1466   offset = ((offset + 8) & 15) - 8;
1467   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1468 }
1469 
1470 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1471    where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1472    SELF's operand-dependent value.  fields[0] specifies the field that
1473    holds <base>.  <simm6> is encoded in the SVE_imm6 field.  */
1474 bool
1475 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1476 			       aarch64_opnd_info *info, aarch64_insn code,
1477 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1478 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1479 {
1480   int offset;
1481 
1482   offset = extract_field (FLD_SVE_imm6, code, 0);
1483   offset = (((offset + 32) & 63) - 32);
1484   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1485 }
1486 
1487 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1488    where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1489    SELF's operand-dependent value.  fields[0] specifies the field that
1490    holds <base>.  <simm9> is encoded in the concatenation of the SVE_imm6
1491    and imm3 fields, with imm3 being the less-significant part.  */
1492 bool
1493 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1494 			       aarch64_opnd_info *info,
1495 			       aarch64_insn code,
1496 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1497 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1498 {
1499   int offset;
1500 
1501   offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1502   offset = (((offset + 256) & 511) - 256);
1503   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1504 }
1505 
1506 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1507    is given by the OFFSET parameter and where <shift> is SELF's operand-
1508    dependent value.  fields[0] specifies the base register field <base>.  */
1509 static bool
1510 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1511 			      aarch64_opnd_info *info, aarch64_insn code,
1512 			      int64_t offset)
1513 {
1514   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1515   info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1516   info->addr.offset.is_reg = false;
1517   info->addr.writeback = false;
1518   info->addr.preind = true;
1519   info->shifter.operator_present = false;
1520   info->shifter.amount_present = false;
1521   return true;
1522 }
1523 
1524 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1525    is a 4-bit signed number and where <shift> is SELF's operand-dependent
1526    value.  fields[0] specifies the base register field.  */
1527 bool
1528 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1529 			    aarch64_opnd_info *info, aarch64_insn code,
1530 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1531 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1532 {
1533   int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1534   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1535 }
1536 
1537 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1538    is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1539    value.  fields[0] specifies the base register field.  */
1540 bool
1541 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1542 			    aarch64_opnd_info *info, aarch64_insn code,
1543 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1544 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1545 {
1546   int offset = extract_field (FLD_SVE_imm6, code, 0);
1547   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1548 }
1549 
1550 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1551    is SELF's operand-dependent value.  fields[0] specifies the base
1552    register field and fields[1] specifies the offset register field.  */
1553 bool
1554 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1555 			     aarch64_opnd_info *info, aarch64_insn code,
1556 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1557 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1558 {
1559   int index_regno;
1560 
1561   index_regno = extract_field (self->fields[1], code, 0);
1562   if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1563     return false;
1564 
1565   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1566   info->addr.offset.regno = index_regno;
1567   info->addr.offset.is_reg = true;
1568   info->addr.writeback = false;
1569   info->addr.preind = true;
1570   info->shifter.kind = AARCH64_MOD_LSL;
1571   info->shifter.amount = get_operand_specific_data (self);
1572   info->shifter.operator_present = (info->shifter.amount != 0);
1573   info->shifter.amount_present = (info->shifter.amount != 0);
1574   return true;
1575 }
1576 
1577 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1578    <shift> is SELF's operand-dependent value.  fields[0] specifies the
1579    base register field, fields[1] specifies the offset register field and
1580    fields[2] is a single-bit field that selects SXTW over UXTW.  */
1581 bool
1582 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1583 			     aarch64_opnd_info *info, aarch64_insn code,
1584 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1585 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1586 {
1587   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1588   info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1589   info->addr.offset.is_reg = true;
1590   info->addr.writeback = false;
1591   info->addr.preind = true;
1592   if (extract_field (self->fields[2], code, 0))
1593     info->shifter.kind = AARCH64_MOD_SXTW;
1594   else
1595     info->shifter.kind = AARCH64_MOD_UXTW;
1596   info->shifter.amount = get_operand_specific_data (self);
1597   info->shifter.operator_present = true;
1598   info->shifter.amount_present = (info->shifter.amount != 0);
1599   return true;
1600 }
1601 
1602 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1603    5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1604    fields[0] specifies the base register field.  */
1605 bool
1606 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1607 			    aarch64_opnd_info *info, aarch64_insn code,
1608 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1609 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1610 {
1611   int offset = extract_field (FLD_imm5, code, 0);
1612   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1613 }
1614 
1615 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1616    where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1617    number.  fields[0] specifies the base register field and fields[1]
1618    specifies the offset register field.  */
1619 static bool
1620 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1621 			 aarch64_insn code, enum aarch64_modifier_kind kind)
1622 {
1623   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1624   info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1625   info->addr.offset.is_reg = true;
1626   info->addr.writeback = false;
1627   info->addr.preind = true;
1628   info->shifter.kind = kind;
1629   info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1630   info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1631 				    || info->shifter.amount != 0);
1632   info->shifter.amount_present = (info->shifter.amount != 0);
1633   return true;
1634 }
1635 
1636 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1637    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1638    field and fields[1] specifies the offset register field.  */
1639 bool
1640 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1641 			     aarch64_opnd_info *info, aarch64_insn code,
1642 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1643 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1644 {
1645   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1646 }
1647 
1648 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1649    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1650    field and fields[1] specifies the offset register field.  */
1651 bool
1652 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1653 			      aarch64_opnd_info *info, aarch64_insn code,
1654 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1655 			      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1656 {
1657   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1658 }
1659 
1660 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1661    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1662    field and fields[1] specifies the offset register field.  */
1663 bool
1664 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1665 			      aarch64_opnd_info *info, aarch64_insn code,
1666 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1667 			      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1668 {
1669   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1670 }
1671 
1672 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1673    has the raw field value and that the low 8 bits decode to VALUE.  */
1674 static bool
1675 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1676 {
1677   info->shifter.kind = AARCH64_MOD_LSL;
1678   info->shifter.amount = 0;
1679   if (info->imm.value & 0x100)
1680     {
1681       if (value == 0)
1682 	/* Decode 0x100 as #0, LSL #8.  */
1683 	info->shifter.amount = 8;
1684       else
1685 	value *= 256;
1686     }
1687   info->shifter.operator_present = (info->shifter.amount != 0);
1688   info->shifter.amount_present = (info->shifter.amount != 0);
1689   info->imm.value = value;
1690   return true;
1691 }
1692 
1693 /* Decode an SVE ADD/SUB immediate.  */
1694 bool
1695 aarch64_ext_sve_aimm (const aarch64_operand *self,
1696 		      aarch64_opnd_info *info, const aarch64_insn code,
1697 		      const aarch64_inst *inst,
1698 		      aarch64_operand_error *errors)
1699 {
1700   return (aarch64_ext_imm (self, info, code, inst, errors)
1701 	  && decode_sve_aimm (info, (uint8_t) info->imm.value));
1702 }
1703 
1704 /* Decode an SVE CPY/DUP immediate.  */
1705 bool
1706 aarch64_ext_sve_asimm (const aarch64_operand *self,
1707 		       aarch64_opnd_info *info, const aarch64_insn code,
1708 		       const aarch64_inst *inst,
1709 		       aarch64_operand_error *errors)
1710 {
1711   return (aarch64_ext_imm (self, info, code, inst, errors)
1712 	  && decode_sve_aimm (info, (int8_t) info->imm.value));
1713 }
1714 
1715 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1716    The fields array specifies which field to use.  */
1717 bool
1718 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1719 				aarch64_opnd_info *info, aarch64_insn code,
1720 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1721 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1722 {
1723   if (extract_field (self->fields[0], code, 0))
1724     info->imm.value = 0x3f800000;
1725   else
1726     info->imm.value = 0x3f000000;
1727   info->imm.is_fp = true;
1728   return true;
1729 }
1730 
1731 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1732    The fields array specifies which field to use.  */
1733 bool
1734 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1735 				aarch64_opnd_info *info, aarch64_insn code,
1736 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1737 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1738 {
1739   if (extract_field (self->fields[0], code, 0))
1740     info->imm.value = 0x40000000;
1741   else
1742     info->imm.value = 0x3f000000;
1743   info->imm.is_fp = true;
1744   return true;
1745 }
1746 
1747 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1748    The fields array specifies which field to use.  */
1749 bool
1750 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1751 				aarch64_opnd_info *info, aarch64_insn code,
1752 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1753 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1754 {
1755   if (extract_field (self->fields[0], code, 0))
1756     info->imm.value = 0x3f800000;
1757   else
1758     info->imm.value = 0x0;
1759   info->imm.is_fp = true;
1760   return true;
1761 }
1762 
1763 /* Decode ZA tile vector, vector indicator, vector selector, qualifier and
1764    immediate on numerous SME instruction fields such as MOVA.  */
1765 bool
1766 aarch64_ext_sme_za_hv_tiles (const aarch64_operand *self,
1767                              aarch64_opnd_info *info, aarch64_insn code,
1768                              const aarch64_inst *inst ATTRIBUTE_UNUSED,
1769                              aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1770 {
1771   int fld_size = extract_field (self->fields[0], code, 0);
1772   int fld_q = extract_field (self->fields[1], code, 0);
1773   int fld_v = extract_field (self->fields[2], code, 0);
1774   int fld_rv = extract_field (self->fields[3], code, 0);
1775   int fld_zan_imm = extract_field (self->fields[4], code, 0);
1776 
1777   /* Deduce qualifier encoded in size and Q fields.  */
1778   if (fld_size == 0)
1779     info->qualifier = AARCH64_OPND_QLF_S_B;
1780   else if (fld_size == 1)
1781     info->qualifier = AARCH64_OPND_QLF_S_H;
1782   else if (fld_size == 2)
1783     info->qualifier = AARCH64_OPND_QLF_S_S;
1784   else if (fld_size == 3 && fld_q == 0)
1785     info->qualifier = AARCH64_OPND_QLF_S_D;
1786   else if (fld_size == 3 && fld_q == 1)
1787     info->qualifier = AARCH64_OPND_QLF_S_Q;
1788 
1789   info->za_tile_vector.index.regno = fld_rv + 12;
1790   info->za_tile_vector.v = fld_v;
1791 
1792   switch (info->qualifier)
1793     {
1794     case AARCH64_OPND_QLF_S_B:
1795       info->za_tile_vector.regno = 0;
1796       info->za_tile_vector.index.imm = fld_zan_imm;
1797       break;
1798     case AARCH64_OPND_QLF_S_H:
1799       info->za_tile_vector.regno = fld_zan_imm >> 3;
1800       info->za_tile_vector.index.imm = fld_zan_imm & 0x07;
1801       break;
1802     case AARCH64_OPND_QLF_S_S:
1803       info->za_tile_vector.regno = fld_zan_imm >> 2;
1804       info->za_tile_vector.index.imm = fld_zan_imm & 0x03;
1805       break;
1806     case AARCH64_OPND_QLF_S_D:
1807       info->za_tile_vector.regno = fld_zan_imm >> 1;
1808       info->za_tile_vector.index.imm = fld_zan_imm & 0x01;
1809       break;
1810     case AARCH64_OPND_QLF_S_Q:
1811       info->za_tile_vector.regno = fld_zan_imm;
1812       info->za_tile_vector.index.imm = 0;
1813       break;
1814     default:
1815       return false;
1816     }
1817 
1818   return true;
1819 }
1820 
1821 /* Decode in SME instruction ZERO list of up to eight 64-bit element tile names
1822    separated by commas, encoded in the "imm8" field.
1823 
1824    For programmer convenience an assembler must also accept the names of
1825    32-bit, 16-bit and 8-bit element tiles which are converted into the
1826    corresponding set of 64-bit element tiles.
1827 */
1828 bool
1829 aarch64_ext_sme_za_list (const aarch64_operand *self,
1830                          aarch64_opnd_info *info, aarch64_insn code,
1831                          const aarch64_inst *inst ATTRIBUTE_UNUSED,
1832                          aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1833 {
1834   int mask = extract_field (self->fields[0], code, 0);
1835   info->imm.value = mask;
1836   return true;
1837 }
1838 
1839 /* Decode ZA array vector select register (Rv field), optional vector and
1840    memory offset (imm4 field).
1841 */
1842 bool
1843 aarch64_ext_sme_za_array (const aarch64_operand *self,
1844                           aarch64_opnd_info *info, aarch64_insn code,
1845                           const aarch64_inst *inst ATTRIBUTE_UNUSED,
1846                           aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1847 {
1848   int regno = extract_field (self->fields[0], code, 0) + 12;
1849   int imm = extract_field (self->fields[1], code, 0);
1850   info->za_tile_vector.index.regno = regno;
1851   info->za_tile_vector.index.imm = imm;
1852   return true;
1853 }
1854 
1855 bool
1856 aarch64_ext_sme_addr_ri_u4xvl (const aarch64_operand *self,
1857                                aarch64_opnd_info *info, aarch64_insn code,
1858                                const aarch64_inst *inst ATTRIBUTE_UNUSED,
1859                                aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1860 {
1861   int regno = extract_field (self->fields[0], code, 0);
1862   int imm = extract_field (self->fields[1], code, 0);
1863   info->addr.base_regno = regno;
1864   info->addr.offset.imm = imm;
1865   /* MUL VL operator is always present for this operand.  */
1866   info->shifter.kind = AARCH64_MOD_MUL_VL;
1867   info->shifter.operator_present = (imm != 0);
1868   return true;
1869 }
1870 
1871 /* Decode {SM|ZA} filed for SMSTART and SMSTOP instructions.  */
1872 bool
1873 aarch64_ext_sme_sm_za (const aarch64_operand *self,
1874                        aarch64_opnd_info *info, aarch64_insn code,
1875                        const aarch64_inst *inst ATTRIBUTE_UNUSED,
1876                        aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1877 {
1878   info->pstatefield = 0x1b;
1879   aarch64_insn fld_crm = extract_field (self->fields[0], code, 0);
1880   fld_crm >>= 1;    /* CRm[3:1].  */
1881 
1882   if (fld_crm == 0x1)
1883     info->reg.regno = 's';
1884   else if (fld_crm == 0x2)
1885     info->reg.regno = 'z';
1886   else
1887     return false;
1888 
1889   return true;
1890 }
1891 
1892 bool
1893 aarch64_ext_sme_pred_reg_with_index (const aarch64_operand *self,
1894 				     aarch64_opnd_info *info, aarch64_insn code,
1895 				     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1896 				     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1897 {
1898   aarch64_insn fld_rm = extract_field (self->fields[0], code, 0);
1899   aarch64_insn fld_pn = extract_field (self->fields[1], code, 0);
1900   aarch64_insn fld_i1 = extract_field (self->fields[2], code, 0);
1901   aarch64_insn fld_tszh = extract_field (self->fields[3], code, 0);
1902   aarch64_insn fld_tszl = extract_field (self->fields[4], code, 0);
1903   int imm;
1904 
1905   info->za_tile_vector.regno = fld_pn;
1906   info->za_tile_vector.index.regno = fld_rm + 12;
1907 
1908   if (fld_tszh == 0x1 && fld_tszl == 0x0)
1909     {
1910       info->qualifier = AARCH64_OPND_QLF_S_D;
1911       imm = fld_i1;
1912     }
1913   else if (fld_tszl == 0x4)
1914     {
1915       info->qualifier = AARCH64_OPND_QLF_S_S;
1916       imm = (fld_i1 << 1) | fld_tszh;
1917     }
1918   else if ((fld_tszl & 0x3) == 0x2)
1919     {
1920       info->qualifier = AARCH64_OPND_QLF_S_H;
1921       imm = (fld_i1 << 2) | (fld_tszh << 1) | (fld_tszl >> 2);
1922     }
1923   else if (fld_tszl & 0x1)
1924     {
1925       info->qualifier = AARCH64_OPND_QLF_S_B;
1926       imm = (fld_i1 << 3) | (fld_tszh << 2) | (fld_tszl >> 1);
1927     }
1928   else
1929     return false;
1930 
1931   info->za_tile_vector.index.imm = imm;
1932   return true;
1933 }
1934 
1935 /* Decode Zn[MM], where MM has a 7-bit triangular encoding.  The fields
1936    array specifies which field to use for Zn.  MM is encoded in the
1937    concatenation of imm5 and SVE_tszh, with imm5 being the less
1938    significant part.  */
1939 bool
1940 aarch64_ext_sve_index (const aarch64_operand *self,
1941 		       aarch64_opnd_info *info, aarch64_insn code,
1942 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1943 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1944 {
1945   int val;
1946 
1947   info->reglane.regno = extract_field (self->fields[0], code, 0);
1948   val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1949   if ((val & 31) == 0)
1950     return 0;
1951   while ((val & 1) == 0)
1952     val /= 2;
1953   info->reglane.index = val / 2;
1954   return true;
1955 }
1956 
1957 /* Decode a logical immediate for the MOV alias of SVE DUPM.  */
1958 bool
1959 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1960 			  aarch64_opnd_info *info, const aarch64_insn code,
1961 			  const aarch64_inst *inst,
1962 			  aarch64_operand_error *errors)
1963 {
1964   int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1965   return (aarch64_ext_limm (self, info, code, inst, errors)
1966 	  && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1967 }
1968 
1969 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1970    and where MM occupies the most-significant part.  The operand-dependent
1971    value specifies the number of bits in Zn.  */
1972 bool
1973 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1974 			    aarch64_opnd_info *info, aarch64_insn code,
1975 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1976 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1977 {
1978   unsigned int reg_bits = get_operand_specific_data (self);
1979   unsigned int val = extract_all_fields (self, code);
1980   info->reglane.regno = val & ((1 << reg_bits) - 1);
1981   info->reglane.index = val >> reg_bits;
1982   return true;
1983 }
1984 
1985 /* Decode {Zn.<T> - Zm.<T>}.  The fields array specifies which field
1986    to use for Zn.  The opcode-dependent value specifies the number
1987    of registers in the list.  */
1988 bool
1989 aarch64_ext_sve_reglist (const aarch64_operand *self,
1990 			 aarch64_opnd_info *info, aarch64_insn code,
1991 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1992 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1993 {
1994   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1995   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1996   return true;
1997 }
1998 
1999 /* Decode <pattern>{, MUL #<amount>}.  The fields array specifies which
2000    fields to use for <pattern>.  <amount> - 1 is encoded in the SVE_imm4
2001    field.  */
2002 bool
2003 aarch64_ext_sve_scale (const aarch64_operand *self,
2004 		       aarch64_opnd_info *info, aarch64_insn code,
2005 		       const aarch64_inst *inst, aarch64_operand_error *errors)
2006 {
2007   int val;
2008 
2009   if (!aarch64_ext_imm (self, info, code, inst, errors))
2010     return false;
2011   val = extract_field (FLD_SVE_imm4, code, 0);
2012   info->shifter.kind = AARCH64_MOD_MUL;
2013   info->shifter.amount = val + 1;
2014   info->shifter.operator_present = (val != 0);
2015   info->shifter.amount_present = (val != 0);
2016   return true;
2017 }
2018 
2019 /* Return the top set bit in VALUE, which is expected to be relatively
2020    small.  */
2021 static uint64_t
2022 get_top_bit (uint64_t value)
2023 {
2024   while ((value & -value) != value)
2025     value -= value & -value;
2026   return value;
2027 }
2028 
2029 /* Decode an SVE shift-left immediate.  */
2030 bool
2031 aarch64_ext_sve_shlimm (const aarch64_operand *self,
2032 			aarch64_opnd_info *info, const aarch64_insn code,
2033 			const aarch64_inst *inst, aarch64_operand_error *errors)
2034 {
2035   if (!aarch64_ext_imm (self, info, code, inst, errors)
2036       || info->imm.value == 0)
2037     return false;
2038 
2039   info->imm.value -= get_top_bit (info->imm.value);
2040   return true;
2041 }
2042 
2043 /* Decode an SVE shift-right immediate.  */
2044 bool
2045 aarch64_ext_sve_shrimm (const aarch64_operand *self,
2046 			aarch64_opnd_info *info, const aarch64_insn code,
2047 			const aarch64_inst *inst, aarch64_operand_error *errors)
2048 {
2049   if (!aarch64_ext_imm (self, info, code, inst, errors)
2050       || info->imm.value == 0)
2051     return false;
2052 
2053   info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
2054   return true;
2055 }
2056 
2057 /* Decode X0-X30.  Register 31 is unallocated.  */
2058 bool
2059 aarch64_ext_x0_to_x30 (const aarch64_operand *self, aarch64_opnd_info *info,
2060 		       const aarch64_insn code,
2061 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
2062 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2063 {
2064   info->reg.regno = extract_field (self->fields[0], code, 0);
2065   return info->reg.regno <= 30;
2066 }
2067 
2068 /* Bitfields that are commonly used to encode certain operands' information
2069    may be partially used as part of the base opcode in some instructions.
2070    For example, the bit 1 of the field 'size' in
2071      FCVTXN <Vb><d>, <Va><n>
2072    is actually part of the base opcode, while only size<0> is available
2073    for encoding the register type.  Another example is the AdvSIMD
2074    instruction ORR (register), in which the field 'size' is also used for
2075    the base opcode, leaving only the field 'Q' available to encode the
2076    vector register arrangement specifier '8B' or '16B'.
2077 
2078    This function tries to deduce the qualifier from the value of partially
2079    constrained field(s).  Given the VALUE of such a field or fields, the
2080    qualifiers CANDIDATES and the MASK (indicating which bits are valid for
2081    operand encoding), the function returns the matching qualifier or
2082    AARCH64_OPND_QLF_NIL if nothing matches.
2083 
2084    N.B. CANDIDATES is a group of possible qualifiers that are valid for
2085    one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
2086    may end with AARCH64_OPND_QLF_NIL.  */
2087 
2088 static enum aarch64_opnd_qualifier
2089 get_qualifier_from_partial_encoding (aarch64_insn value,
2090 				     const enum aarch64_opnd_qualifier* \
2091 				     candidates,
2092 				     aarch64_insn mask)
2093 {
2094   int i;
2095   DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
2096   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2097     {
2098       aarch64_insn standard_value;
2099       if (candidates[i] == AARCH64_OPND_QLF_NIL)
2100 	break;
2101       standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
2102       if ((standard_value & mask) == (value & mask))
2103 	return candidates[i];
2104     }
2105   return AARCH64_OPND_QLF_NIL;
2106 }
2107 
2108 /* Given a list of qualifier sequences, return all possible valid qualifiers
2109    for operand IDX in QUALIFIERS.
2110    Assume QUALIFIERS is an array whose length is large enough.  */
2111 
2112 static void
2113 get_operand_possible_qualifiers (int idx,
2114 				 const aarch64_opnd_qualifier_seq_t *list,
2115 				 enum aarch64_opnd_qualifier *qualifiers)
2116 {
2117   int i;
2118   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2119     if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
2120       break;
2121 }
2122 
2123 /* Decode the size Q field for e.g. SHADD.
2124    We tag one operand with the qualifer according to the code;
2125    whether the qualifier is valid for this opcode or not, it is the
2126    duty of the semantic checking.  */
2127 
2128 static int
2129 decode_sizeq (aarch64_inst *inst)
2130 {
2131   int idx;
2132   enum aarch64_opnd_qualifier qualifier;
2133   aarch64_insn code;
2134   aarch64_insn value, mask;
2135   enum aarch64_field_kind fld_sz;
2136   enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2137 
2138   if (inst->opcode->iclass == asisdlse
2139      || inst->opcode->iclass == asisdlsep
2140      || inst->opcode->iclass == asisdlso
2141      || inst->opcode->iclass == asisdlsop)
2142     fld_sz = FLD_vldst_size;
2143   else
2144     fld_sz = FLD_size;
2145 
2146   code = inst->value;
2147   value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
2148   /* Obtain the info that which bits of fields Q and size are actually
2149      available for operand encoding.  Opcodes like FMAXNM and FMLA have
2150      size[1] unavailable.  */
2151   mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
2152 
2153   /* The index of the operand we are going to tag a qualifier and the qualifer
2154      itself are reasoned from the value of the size and Q fields and the
2155      possible valid qualifier lists.  */
2156   idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
2157   DEBUG_TRACE ("key idx: %d", idx);
2158 
2159   /* For most related instruciton, size:Q are fully available for operand
2160      encoding.  */
2161   if (mask == 0x7)
2162     {
2163       inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
2164       return 1;
2165     }
2166 
2167   get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2168 				   candidates);
2169 #ifdef DEBUG_AARCH64
2170   if (debug_dump)
2171     {
2172       int i;
2173       for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
2174 	   && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2175 	DEBUG_TRACE ("qualifier %d: %s", i,
2176 		     aarch64_get_qualifier_name(candidates[i]));
2177       DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
2178     }
2179 #endif /* DEBUG_AARCH64 */
2180 
2181   qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
2182 
2183   if (qualifier == AARCH64_OPND_QLF_NIL)
2184     return 0;
2185 
2186   inst->operands[idx].qualifier = qualifier;
2187   return 1;
2188 }
2189 
2190 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
2191      e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
2192 
2193 static int
2194 decode_asimd_fcvt (aarch64_inst *inst)
2195 {
2196   aarch64_field field = {0, 0};
2197   aarch64_insn value;
2198   enum aarch64_opnd_qualifier qualifier;
2199 
2200   gen_sub_field (FLD_size, 0, 1, &field);
2201   value = extract_field_2 (&field, inst->value, 0);
2202   qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
2203     : AARCH64_OPND_QLF_V_2D;
2204   switch (inst->opcode->op)
2205     {
2206     case OP_FCVTN:
2207     case OP_FCVTN2:
2208       /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
2209       inst->operands[1].qualifier = qualifier;
2210       break;
2211     case OP_FCVTL:
2212     case OP_FCVTL2:
2213       /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
2214       inst->operands[0].qualifier = qualifier;
2215       break;
2216     default:
2217       return 0;
2218     }
2219 
2220   return 1;
2221 }
2222 
2223 /* Decode size[0], i.e. bit 22, for
2224      e.g. FCVTXN <Vb><d>, <Va><n>.  */
2225 
2226 static int
2227 decode_asisd_fcvtxn (aarch64_inst *inst)
2228 {
2229   aarch64_field field = {0, 0};
2230   gen_sub_field (FLD_size, 0, 1, &field);
2231   if (!extract_field_2 (&field, inst->value, 0))
2232     return 0;
2233   inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2234   return 1;
2235 }
2236 
2237 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
2238 static int
2239 decode_fcvt (aarch64_inst *inst)
2240 {
2241   enum aarch64_opnd_qualifier qualifier;
2242   aarch64_insn value;
2243   const aarch64_field field = {15, 2};
2244 
2245   /* opc dstsize */
2246   value = extract_field_2 (&field, inst->value, 0);
2247   switch (value)
2248     {
2249     case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2250     case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2251     case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2252     default: return 0;
2253     }
2254   inst->operands[0].qualifier = qualifier;
2255 
2256   return 1;
2257 }
2258 
2259 /* Do miscellaneous decodings that are not common enough to be driven by
2260    flags.  */
2261 
2262 static int
2263 do_misc_decoding (aarch64_inst *inst)
2264 {
2265   unsigned int value;
2266   switch (inst->opcode->op)
2267     {
2268     case OP_FCVT:
2269       return decode_fcvt (inst);
2270 
2271     case OP_FCVTN:
2272     case OP_FCVTN2:
2273     case OP_FCVTL:
2274     case OP_FCVTL2:
2275       return decode_asimd_fcvt (inst);
2276 
2277     case OP_FCVTXN_S:
2278       return decode_asisd_fcvtxn (inst);
2279 
2280     case OP_MOV_P_P:
2281     case OP_MOVS_P_P:
2282       value = extract_field (FLD_SVE_Pn, inst->value, 0);
2283       return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2284 	      && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2285 
2286     case OP_MOV_Z_P_Z:
2287       return (extract_field (FLD_SVE_Zd, inst->value, 0)
2288 	      == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2289 
2290     case OP_MOV_Z_V:
2291       /* Index must be zero.  */
2292       value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2293       return value > 0 && value <= 16 && value == (value & -value);
2294 
2295     case OP_MOV_Z_Z:
2296       return (extract_field (FLD_SVE_Zn, inst->value, 0)
2297 	      == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2298 
2299     case OP_MOV_Z_Zi:
2300       /* Index must be nonzero.  */
2301       value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2302       return value > 0 && value != (value & -value);
2303 
2304     case OP_MOVM_P_P_P:
2305       return (extract_field (FLD_SVE_Pd, inst->value, 0)
2306 	      == extract_field (FLD_SVE_Pm, inst->value, 0));
2307 
2308     case OP_MOVZS_P_P_P:
2309     case OP_MOVZ_P_P_P:
2310       return (extract_field (FLD_SVE_Pn, inst->value, 0)
2311 	      == extract_field (FLD_SVE_Pm, inst->value, 0));
2312 
2313     case OP_NOTS_P_P_P_Z:
2314     case OP_NOT_P_P_P_Z:
2315       return (extract_field (FLD_SVE_Pm, inst->value, 0)
2316 	      == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2317 
2318     default:
2319       return 0;
2320     }
2321 }
2322 
2323 /* Opcodes that have fields shared by multiple operands are usually flagged
2324    with flags.  In this function, we detect such flags, decode the related
2325    field(s) and store the information in one of the related operands.  The
2326    'one' operand is not any operand but one of the operands that can
2327    accommadate all the information that has been decoded.  */
2328 
2329 static int
2330 do_special_decoding (aarch64_inst *inst)
2331 {
2332   int idx;
2333   aarch64_insn value;
2334   /* Condition for truly conditional executed instructions, e.g. b.cond.  */
2335   if (inst->opcode->flags & F_COND)
2336     {
2337       value = extract_field (FLD_cond2, inst->value, 0);
2338       inst->cond = get_cond_from_value (value);
2339     }
2340   /* 'sf' field.  */
2341   if (inst->opcode->flags & F_SF)
2342     {
2343       idx = select_operand_for_sf_field_coding (inst->opcode);
2344       value = extract_field (FLD_sf, inst->value, 0);
2345       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2346       if ((inst->opcode->flags & F_N)
2347 	  && extract_field (FLD_N, inst->value, 0) != value)
2348 	return 0;
2349     }
2350   /* 'sf' field.  */
2351   if (inst->opcode->flags & F_LSE_SZ)
2352     {
2353       idx = select_operand_for_sf_field_coding (inst->opcode);
2354       value = extract_field (FLD_lse_sz, inst->value, 0);
2355       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2356     }
2357   /* size:Q fields.  */
2358   if (inst->opcode->flags & F_SIZEQ)
2359     return decode_sizeq (inst);
2360 
2361   if (inst->opcode->flags & F_FPTYPE)
2362     {
2363       idx = select_operand_for_fptype_field_coding (inst->opcode);
2364       value = extract_field (FLD_type, inst->value, 0);
2365       switch (value)
2366 	{
2367 	case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2368 	case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2369 	case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2370 	default: return 0;
2371 	}
2372     }
2373 
2374   if (inst->opcode->flags & F_SSIZE)
2375     {
2376       /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2377 	 of the base opcode.  */
2378       aarch64_insn mask;
2379       enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2380       idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2381       value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2382       mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2383       /* For most related instruciton, the 'size' field is fully available for
2384 	 operand encoding.  */
2385       if (mask == 0x3)
2386 	inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2387       else
2388 	{
2389 	  get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2390 					   candidates);
2391 	  inst->operands[idx].qualifier
2392 	    = get_qualifier_from_partial_encoding (value, candidates, mask);
2393 	}
2394     }
2395 
2396   if (inst->opcode->flags & F_T)
2397     {
2398       /* Num of consecutive '0's on the right side of imm5<3:0>.  */
2399       int num = 0;
2400       unsigned val, Q;
2401       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2402 	      == AARCH64_OPND_CLASS_SIMD_REG);
2403       /* imm5<3:0>	q	<t>
2404 	 0000		x	reserved
2405 	 xxx1		0	8b
2406 	 xxx1		1	16b
2407 	 xx10		0	4h
2408 	 xx10		1	8h
2409 	 x100		0	2s
2410 	 x100		1	4s
2411 	 1000		0	reserved
2412 	 1000		1	2d  */
2413       val = extract_field (FLD_imm5, inst->value, 0);
2414       while ((val & 0x1) == 0 && ++num <= 3)
2415 	val >>= 1;
2416       if (num > 3)
2417 	return 0;
2418       Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2419       inst->operands[0].qualifier =
2420 	get_vreg_qualifier_from_value ((num << 1) | Q);
2421     }
2422 
2423   if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2424     {
2425       /* Use Rt to encode in the case of e.g.
2426 	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
2427       idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2428       if (idx == -1)
2429 	{
2430 	  /* Otherwise use the result operand, which has to be a integer
2431 	     register.  */
2432 	  assert (aarch64_get_operand_class (inst->opcode->operands[0])
2433 		  == AARCH64_OPND_CLASS_INT_REG);
2434 	  idx = 0;
2435 	}
2436       assert (idx == 0 || idx == 1);
2437       value = extract_field (FLD_Q, inst->value, 0);
2438       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2439     }
2440 
2441   if (inst->opcode->flags & F_LDS_SIZE)
2442     {
2443       aarch64_field field = {0, 0};
2444       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2445 	      == AARCH64_OPND_CLASS_INT_REG);
2446       gen_sub_field (FLD_opc, 0, 1, &field);
2447       value = extract_field_2 (&field, inst->value, 0);
2448       inst->operands[0].qualifier
2449 	= value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2450     }
2451 
2452   /* Miscellaneous decoding; done as the last step.  */
2453   if (inst->opcode->flags & F_MISC)
2454     return do_misc_decoding (inst);
2455 
2456   return 1;
2457 }
2458 
2459 /* Converters converting a real opcode instruction to its alias form.  */
2460 
2461 /* ROR <Wd>, <Ws>, #<shift>
2462      is equivalent to:
2463    EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
2464 static int
2465 convert_extr_to_ror (aarch64_inst *inst)
2466 {
2467   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2468     {
2469       copy_operand_info (inst, 2, 3);
2470       inst->operands[3].type = AARCH64_OPND_NIL;
2471       return 1;
2472     }
2473   return 0;
2474 }
2475 
2476 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2477      is equivalent to:
2478    USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
2479 static int
2480 convert_shll_to_xtl (aarch64_inst *inst)
2481 {
2482   if (inst->operands[2].imm.value == 0)
2483     {
2484       inst->operands[2].type = AARCH64_OPND_NIL;
2485       return 1;
2486     }
2487   return 0;
2488 }
2489 
2490 /* Convert
2491      UBFM <Xd>, <Xn>, #<shift>, #63.
2492    to
2493      LSR <Xd>, <Xn>, #<shift>.  */
2494 static int
2495 convert_bfm_to_sr (aarch64_inst *inst)
2496 {
2497   int64_t imms, val;
2498 
2499   imms = inst->operands[3].imm.value;
2500   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2501   if (imms == val)
2502     {
2503       inst->operands[3].type = AARCH64_OPND_NIL;
2504       return 1;
2505     }
2506 
2507   return 0;
2508 }
2509 
2510 /* Convert MOV to ORR.  */
2511 static int
2512 convert_orr_to_mov (aarch64_inst *inst)
2513 {
2514   /* MOV <Vd>.<T>, <Vn>.<T>
2515      is equivalent to:
2516      ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
2517   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2518     {
2519       inst->operands[2].type = AARCH64_OPND_NIL;
2520       return 1;
2521     }
2522   return 0;
2523 }
2524 
2525 /* When <imms> >= <immr>, the instruction written:
2526      SBFX <Xd>, <Xn>, #<lsb>, #<width>
2527    is equivalent to:
2528      SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
2529 
2530 static int
2531 convert_bfm_to_bfx (aarch64_inst *inst)
2532 {
2533   int64_t immr, imms;
2534 
2535   immr = inst->operands[2].imm.value;
2536   imms = inst->operands[3].imm.value;
2537   if (imms >= immr)
2538     {
2539       int64_t lsb = immr;
2540       inst->operands[2].imm.value = lsb;
2541       inst->operands[3].imm.value = imms + 1 - lsb;
2542       /* The two opcodes have different qualifiers for
2543 	 the immediate operands; reset to help the checking.  */
2544       reset_operand_qualifier (inst, 2);
2545       reset_operand_qualifier (inst, 3);
2546       return 1;
2547     }
2548 
2549   return 0;
2550 }
2551 
2552 /* When <imms> < <immr>, the instruction written:
2553      SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2554    is equivalent to:
2555      SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
2556 
2557 static int
2558 convert_bfm_to_bfi (aarch64_inst *inst)
2559 {
2560   int64_t immr, imms, val;
2561 
2562   immr = inst->operands[2].imm.value;
2563   imms = inst->operands[3].imm.value;
2564   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2565   if (imms < immr)
2566     {
2567       inst->operands[2].imm.value = (val - immr) & (val - 1);
2568       inst->operands[3].imm.value = imms + 1;
2569       /* The two opcodes have different qualifiers for
2570 	 the immediate operands; reset to help the checking.  */
2571       reset_operand_qualifier (inst, 2);
2572       reset_operand_qualifier (inst, 3);
2573       return 1;
2574     }
2575 
2576   return 0;
2577 }
2578 
2579 /* The instruction written:
2580      BFC <Xd>, #<lsb>, #<width>
2581    is equivalent to:
2582      BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1).  */
2583 
2584 static int
2585 convert_bfm_to_bfc (aarch64_inst *inst)
2586 {
2587   int64_t immr, imms, val;
2588 
2589   /* Should have been assured by the base opcode value.  */
2590   assert (inst->operands[1].reg.regno == 0x1f);
2591 
2592   immr = inst->operands[2].imm.value;
2593   imms = inst->operands[3].imm.value;
2594   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2595   if (imms < immr)
2596     {
2597       /* Drop XZR from the second operand.  */
2598       copy_operand_info (inst, 1, 2);
2599       copy_operand_info (inst, 2, 3);
2600       inst->operands[3].type = AARCH64_OPND_NIL;
2601 
2602       /* Recalculate the immediates.  */
2603       inst->operands[1].imm.value = (val - immr) & (val - 1);
2604       inst->operands[2].imm.value = imms + 1;
2605 
2606       /* The two opcodes have different qualifiers for the operands; reset to
2607 	 help the checking.  */
2608       reset_operand_qualifier (inst, 1);
2609       reset_operand_qualifier (inst, 2);
2610       reset_operand_qualifier (inst, 3);
2611 
2612       return 1;
2613     }
2614 
2615   return 0;
2616 }
2617 
2618 /* The instruction written:
2619      LSL <Xd>, <Xn>, #<shift>
2620    is equivalent to:
2621      UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
2622 
2623 static int
2624 convert_ubfm_to_lsl (aarch64_inst *inst)
2625 {
2626   int64_t immr = inst->operands[2].imm.value;
2627   int64_t imms = inst->operands[3].imm.value;
2628   int64_t val
2629     = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2630 
2631   if ((immr == 0 && imms == val) || immr == imms + 1)
2632     {
2633       inst->operands[3].type = AARCH64_OPND_NIL;
2634       inst->operands[2].imm.value = val - imms;
2635       return 1;
2636     }
2637 
2638   return 0;
2639 }
2640 
2641 /* CINC <Wd>, <Wn>, <cond>
2642      is equivalent to:
2643    CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2644      where <cond> is not AL or NV.  */
2645 
2646 static int
2647 convert_from_csel (aarch64_inst *inst)
2648 {
2649   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2650       && (inst->operands[3].cond->value & 0xe) != 0xe)
2651     {
2652       copy_operand_info (inst, 2, 3);
2653       inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2654       inst->operands[3].type = AARCH64_OPND_NIL;
2655       return 1;
2656     }
2657   return 0;
2658 }
2659 
2660 /* CSET <Wd>, <cond>
2661      is equivalent to:
2662    CSINC <Wd>, WZR, WZR, invert(<cond>)
2663      where <cond> is not AL or NV.  */
2664 
2665 static int
2666 convert_csinc_to_cset (aarch64_inst *inst)
2667 {
2668   if (inst->operands[1].reg.regno == 0x1f
2669       && inst->operands[2].reg.regno == 0x1f
2670       && (inst->operands[3].cond->value & 0xe) != 0xe)
2671     {
2672       copy_operand_info (inst, 1, 3);
2673       inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2674       inst->operands[3].type = AARCH64_OPND_NIL;
2675       inst->operands[2].type = AARCH64_OPND_NIL;
2676       return 1;
2677     }
2678   return 0;
2679 }
2680 
2681 /* MOV <Wd>, #<imm>
2682      is equivalent to:
2683    MOVZ <Wd>, #<imm16>, LSL #<shift>.
2684 
2685    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2686    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2687    or where a MOVN has an immediate that could be encoded by MOVZ, or where
2688    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2689    machine-instruction mnemonic must be used.  */
2690 
2691 static int
2692 convert_movewide_to_mov (aarch64_inst *inst)
2693 {
2694   uint64_t value = inst->operands[1].imm.value;
2695   /* MOVZ/MOVN #0 have a shift amount other than LSL #0.  */
2696   if (value == 0 && inst->operands[1].shifter.amount != 0)
2697     return 0;
2698   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2699   inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2700   value <<= inst->operands[1].shifter.amount;
2701   /* As an alias convertor, it has to be clear that the INST->OPCODE
2702      is the opcode of the real instruction.  */
2703   if (inst->opcode->op == OP_MOVN)
2704     {
2705       int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2706       value = ~value;
2707       /* A MOVN has an immediate that could be encoded by MOVZ.  */
2708       if (aarch64_wide_constant_p (value, is32, NULL))
2709 	return 0;
2710     }
2711   inst->operands[1].imm.value = value;
2712   inst->operands[1].shifter.amount = 0;
2713   return 1;
2714 }
2715 
2716 /* MOV <Wd>, #<imm>
2717      is equivalent to:
2718    ORR <Wd>, WZR, #<imm>.
2719 
2720    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2721    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2722    or where a MOVN has an immediate that could be encoded by MOVZ, or where
2723    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2724    machine-instruction mnemonic must be used.  */
2725 
2726 static int
2727 convert_movebitmask_to_mov (aarch64_inst *inst)
2728 {
2729   int is32;
2730   uint64_t value;
2731 
2732   /* Should have been assured by the base opcode value.  */
2733   assert (inst->operands[1].reg.regno == 0x1f);
2734   copy_operand_info (inst, 1, 2);
2735   is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2736   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2737   value = inst->operands[1].imm.value;
2738   /* ORR has an immediate that could be generated by a MOVZ or MOVN
2739      instruction.  */
2740   if (inst->operands[0].reg.regno != 0x1f
2741       && (aarch64_wide_constant_p (value, is32, NULL)
2742 	  || aarch64_wide_constant_p (~value, is32, NULL)))
2743     return 0;
2744 
2745   inst->operands[2].type = AARCH64_OPND_NIL;
2746   return 1;
2747 }
2748 
2749 /* Some alias opcodes are disassembled by being converted from their real-form.
2750    N.B. INST->OPCODE is the real opcode rather than the alias.  */
2751 
2752 static int
2753 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2754 {
2755   switch (alias->op)
2756     {
2757     case OP_ASR_IMM:
2758     case OP_LSR_IMM:
2759       return convert_bfm_to_sr (inst);
2760     case OP_LSL_IMM:
2761       return convert_ubfm_to_lsl (inst);
2762     case OP_CINC:
2763     case OP_CINV:
2764     case OP_CNEG:
2765       return convert_from_csel (inst);
2766     case OP_CSET:
2767     case OP_CSETM:
2768       return convert_csinc_to_cset (inst);
2769     case OP_UBFX:
2770     case OP_BFXIL:
2771     case OP_SBFX:
2772       return convert_bfm_to_bfx (inst);
2773     case OP_SBFIZ:
2774     case OP_BFI:
2775     case OP_UBFIZ:
2776       return convert_bfm_to_bfi (inst);
2777     case OP_BFC:
2778       return convert_bfm_to_bfc (inst);
2779     case OP_MOV_V:
2780       return convert_orr_to_mov (inst);
2781     case OP_MOV_IMM_WIDE:
2782     case OP_MOV_IMM_WIDEN:
2783       return convert_movewide_to_mov (inst);
2784     case OP_MOV_IMM_LOG:
2785       return convert_movebitmask_to_mov (inst);
2786     case OP_ROR_IMM:
2787       return convert_extr_to_ror (inst);
2788     case OP_SXTL:
2789     case OP_SXTL2:
2790     case OP_UXTL:
2791     case OP_UXTL2:
2792       return convert_shll_to_xtl (inst);
2793     default:
2794       return 0;
2795     }
2796 }
2797 
2798 static bool
2799 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2800 		       aarch64_inst *, int, aarch64_operand_error *errors);
2801 
2802 /* Given the instruction information in *INST, check if the instruction has
2803    any alias form that can be used to represent *INST.  If the answer is yes,
2804    update *INST to be in the form of the determined alias.  */
2805 
2806 /* In the opcode description table, the following flags are used in opcode
2807    entries to help establish the relations between the real and alias opcodes:
2808 
2809 	F_ALIAS:	opcode is an alias
2810 	F_HAS_ALIAS:	opcode has alias(es)
2811 	F_P1
2812 	F_P2
2813 	F_P3:		Disassembly preference priority 1-3 (the larger the
2814 			higher).  If nothing is specified, it is the priority
2815 			0 by default, i.e. the lowest priority.
2816 
2817    Although the relation between the machine and the alias instructions are not
2818    explicitly described, it can be easily determined from the base opcode
2819    values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2820    description entries:
2821 
2822    The mask of an alias opcode must be equal to or a super-set (i.e. more
2823    constrained) of that of the aliased opcode; so is the base opcode value.
2824 
2825    if (opcode_has_alias (real) && alias_opcode_p (opcode)
2826        && (opcode->mask & real->mask) == real->mask
2827        && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2828    then OPCODE is an alias of, and only of, the REAL instruction
2829 
2830    The alias relationship is forced flat-structured to keep related algorithm
2831    simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2832 
2833    During the disassembling, the decoding decision tree (in
2834    opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2835    if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2836    not specified), the disassembler will check whether there is any alias
2837    instruction exists for this real instruction.  If there is, the disassembler
2838    will try to disassemble the 32-bit binary again using the alias's rule, or
2839    try to convert the IR to the form of the alias.  In the case of the multiple
2840    aliases, the aliases are tried one by one from the highest priority
2841    (currently the flag F_P3) to the lowest priority (no priority flag), and the
2842    first succeeds first adopted.
2843 
2844    You may ask why there is a need for the conversion of IR from one form to
2845    another in handling certain aliases.  This is because on one hand it avoids
2846    adding more operand code to handle unusual encoding/decoding; on other
2847    hand, during the disassembling, the conversion is an effective approach to
2848    check the condition of an alias (as an alias may be adopted only if certain
2849    conditions are met).
2850 
2851    In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2852    aarch64_opcode_table and generated aarch64_find_alias_opcode and
2853    aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help.  */
2854 
2855 static void
2856 determine_disassembling_preference (struct aarch64_inst *inst,
2857 				    aarch64_operand_error *errors)
2858 {
2859   const aarch64_opcode *opcode;
2860   const aarch64_opcode *alias;
2861 
2862   opcode = inst->opcode;
2863 
2864   /* This opcode does not have an alias, so use itself.  */
2865   if (!opcode_has_alias (opcode))
2866     return;
2867 
2868   alias = aarch64_find_alias_opcode (opcode);
2869   assert (alias);
2870 
2871 #ifdef DEBUG_AARCH64
2872   if (debug_dump)
2873     {
2874       const aarch64_opcode *tmp = alias;
2875       printf ("####   LIST    orderd: ");
2876       while (tmp)
2877 	{
2878 	  printf ("%s, ", tmp->name);
2879 	  tmp = aarch64_find_next_alias_opcode (tmp);
2880 	}
2881       printf ("\n");
2882     }
2883 #endif /* DEBUG_AARCH64 */
2884 
2885   for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2886     {
2887       DEBUG_TRACE ("try %s", alias->name);
2888       assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2889 
2890       /* An alias can be a pseudo opcode which will never be used in the
2891 	 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2892 	 aliasing AND.  */
2893       if (pseudo_opcode_p (alias))
2894 	{
2895 	  DEBUG_TRACE ("skip pseudo %s", alias->name);
2896 	  continue;
2897 	}
2898 
2899       if ((inst->value & alias->mask) != alias->opcode)
2900 	{
2901 	  DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2902 	  continue;
2903 	}
2904 
2905       if (!AARCH64_CPU_HAS_FEATURE (arch_variant, *alias->avariant))
2906 	{
2907 	  DEBUG_TRACE ("skip %s: we're missing features", alias->name);
2908 	  continue;
2909 	}
2910 
2911       /* No need to do any complicated transformation on operands, if the alias
2912 	 opcode does not have any operand.  */
2913       if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2914 	{
2915 	  DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2916 	  aarch64_replace_opcode (inst, alias);
2917 	  return;
2918 	}
2919       if (alias->flags & F_CONV)
2920 	{
2921 	  aarch64_inst copy;
2922 	  memcpy (&copy, inst, sizeof (aarch64_inst));
2923 	  /* ALIAS is the preference as long as the instruction can be
2924 	     successfully converted to the form of ALIAS.  */
2925 	  if (convert_to_alias (&copy, alias) == 1)
2926 	    {
2927 	      aarch64_replace_opcode (&copy, alias);
2928 	      if (aarch64_match_operands_constraint (&copy, NULL) != 1)
2929 		{
2930 		  DEBUG_TRACE ("FAILED with alias %s ", alias->name);
2931 		}
2932 	      else
2933 		{
2934 		  DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2935 		  memcpy (inst, &copy, sizeof (aarch64_inst));
2936 		}
2937 	      return;
2938 	    }
2939 	}
2940       else
2941 	{
2942 	  /* Directly decode the alias opcode.  */
2943 	  aarch64_inst temp;
2944 	  memset (&temp, '\0', sizeof (aarch64_inst));
2945 	  if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2946 	    {
2947 	      DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2948 	      memcpy (inst, &temp, sizeof (aarch64_inst));
2949 	      return;
2950 	    }
2951 	}
2952     }
2953 }
2954 
2955 /* Some instructions (including all SVE ones) use the instruction class
2956    to describe how a qualifiers_list index is represented in the instruction
2957    encoding.  If INST is such an instruction, decode the appropriate fields
2958    and fill in the operand qualifiers accordingly.  Return true if no
2959    problems are found.  */
2960 
2961 static bool
2962 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2963 {
2964   int i, variant;
2965 
2966   variant = 0;
2967   switch (inst->opcode->iclass)
2968     {
2969     case sve_cpy:
2970       variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2971       break;
2972 
2973     case sve_index:
2974       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2975       if ((i & 31) == 0)
2976 	return false;
2977       while ((i & 1) == 0)
2978 	{
2979 	  i >>= 1;
2980 	  variant += 1;
2981 	}
2982       break;
2983 
2984     case sve_limm:
2985       /* Pick the smallest applicable element size.  */
2986       if ((inst->value & 0x20600) == 0x600)
2987 	variant = 0;
2988       else if ((inst->value & 0x20400) == 0x400)
2989 	variant = 1;
2990       else if ((inst->value & 0x20000) == 0)
2991 	variant = 2;
2992       else
2993 	variant = 3;
2994       break;
2995 
2996     case sve_misc:
2997       /* sve_misc instructions have only a single variant.  */
2998       break;
2999 
3000     case sve_movprfx:
3001       variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
3002       break;
3003 
3004     case sve_pred_zm:
3005       variant = extract_field (FLD_SVE_M_4, inst->value, 0);
3006       break;
3007 
3008     case sve_shift_pred:
3009       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
3010     sve_shift:
3011       if (i == 0)
3012 	return false;
3013       while (i != 1)
3014 	{
3015 	  i >>= 1;
3016 	  variant += 1;
3017 	}
3018       break;
3019 
3020     case sve_shift_unpred:
3021       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3022       goto sve_shift;
3023 
3024     case sve_size_bhs:
3025       variant = extract_field (FLD_size, inst->value, 0);
3026       if (variant >= 3)
3027 	return false;
3028       break;
3029 
3030     case sve_size_bhsd:
3031       variant = extract_field (FLD_size, inst->value, 0);
3032       break;
3033 
3034     case sve_size_hsd:
3035       i = extract_field (FLD_size, inst->value, 0);
3036       if (i < 1)
3037 	return false;
3038       variant = i - 1;
3039       break;
3040 
3041     case sve_size_bh:
3042     case sve_size_sd:
3043       variant = extract_field (FLD_SVE_sz, inst->value, 0);
3044       break;
3045 
3046     case sve_size_sd2:
3047       variant = extract_field (FLD_SVE_sz2, inst->value, 0);
3048       break;
3049 
3050     case sve_size_hsd2:
3051       i = extract_field (FLD_SVE_size, inst->value, 0);
3052       if (i < 1)
3053 	return false;
3054       variant = i - 1;
3055       break;
3056 
3057     case sve_size_13:
3058       /* Ignore low bit of this field since that is set in the opcode for
3059 	 instructions of this iclass.  */
3060       i = (extract_field (FLD_size, inst->value, 0) & 2);
3061       variant = (i >> 1);
3062       break;
3063 
3064     case sve_shift_tsz_bhsd:
3065       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3066       if (i == 0)
3067 	return false;
3068       while (i != 1)
3069 	{
3070 	  i >>= 1;
3071 	  variant += 1;
3072 	}
3073       break;
3074 
3075     case sve_size_tsz_bhs:
3076       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3077       if (i == 0)
3078 	return false;
3079       while (i != 1)
3080 	{
3081 	  if (i & 1)
3082 	    return false;
3083 	  i >>= 1;
3084 	  variant += 1;
3085 	}
3086       break;
3087 
3088     case sve_shift_tsz_hsd:
3089       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3090       if (i == 0)
3091 	return false;
3092       while (i != 1)
3093 	{
3094 	  i >>= 1;
3095 	  variant += 1;
3096 	}
3097       break;
3098 
3099     default:
3100       /* No mapping between instruction class and qualifiers.  */
3101       return true;
3102     }
3103 
3104   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3105     inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
3106   return true;
3107 }
3108 /* Decode the CODE according to OPCODE; fill INST.  Return 0 if the decoding
3109    fails, which meanes that CODE is not an instruction of OPCODE; otherwise
3110    return 1.
3111 
3112    If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
3113    determined and used to disassemble CODE; this is done just before the
3114    return.  */
3115 
3116 static bool
3117 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
3118 		       aarch64_inst *inst, int noaliases_p,
3119 		       aarch64_operand_error *errors)
3120 {
3121   int i;
3122 
3123   DEBUG_TRACE ("enter with %s", opcode->name);
3124 
3125   assert (opcode && inst);
3126 
3127   /* Clear inst.  */
3128   memset (inst, '\0', sizeof (aarch64_inst));
3129 
3130   /* Check the base opcode.  */
3131   if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
3132     {
3133       DEBUG_TRACE ("base opcode match FAIL");
3134       goto decode_fail;
3135     }
3136 
3137   inst->opcode = opcode;
3138   inst->value = code;
3139 
3140   /* Assign operand codes and indexes.  */
3141   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3142     {
3143       if (opcode->operands[i] == AARCH64_OPND_NIL)
3144 	break;
3145       inst->operands[i].type = opcode->operands[i];
3146       inst->operands[i].idx = i;
3147     }
3148 
3149   /* Call the opcode decoder indicated by flags.  */
3150   if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
3151     {
3152       DEBUG_TRACE ("opcode flag-based decoder FAIL");
3153       goto decode_fail;
3154     }
3155 
3156   /* Possibly use the instruction class to determine the correct
3157      qualifier.  */
3158   if (!aarch64_decode_variant_using_iclass (inst))
3159     {
3160       DEBUG_TRACE ("iclass-based decoder FAIL");
3161       goto decode_fail;
3162     }
3163 
3164   /* Call operand decoders.  */
3165   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3166     {
3167       const aarch64_operand *opnd;
3168       enum aarch64_opnd type;
3169 
3170       type = opcode->operands[i];
3171       if (type == AARCH64_OPND_NIL)
3172 	break;
3173       opnd = &aarch64_operands[type];
3174       if (operand_has_extractor (opnd)
3175 	  && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
3176 					 errors)))
3177 	{
3178 	  DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
3179 	  goto decode_fail;
3180 	}
3181     }
3182 
3183   /* If the opcode has a verifier, then check it now.  */
3184   if (opcode->verifier
3185       && opcode->verifier (inst, code, 0, false, errors, NULL) != ERR_OK)
3186     {
3187       DEBUG_TRACE ("operand verifier FAIL");
3188       goto decode_fail;
3189     }
3190 
3191   /* Match the qualifiers.  */
3192   if (aarch64_match_operands_constraint (inst, NULL) == 1)
3193     {
3194       /* Arriving here, the CODE has been determined as a valid instruction
3195 	 of OPCODE and *INST has been filled with information of this OPCODE
3196 	 instruction.  Before the return, check if the instruction has any
3197 	 alias and should be disassembled in the form of its alias instead.
3198 	 If the answer is yes, *INST will be updated.  */
3199       if (!noaliases_p)
3200 	determine_disassembling_preference (inst, errors);
3201       DEBUG_TRACE ("SUCCESS");
3202       return true;
3203     }
3204   else
3205     {
3206       DEBUG_TRACE ("constraint matching FAIL");
3207     }
3208 
3209  decode_fail:
3210   return false;
3211 }
3212 
3213 /* This does some user-friendly fix-up to *INST.  It is currently focus on
3214    the adjustment of qualifiers to help the printed instruction
3215    recognized/understood more easily.  */
3216 
3217 static void
3218 user_friendly_fixup (aarch64_inst *inst)
3219 {
3220   switch (inst->opcode->iclass)
3221     {
3222     case testbranch:
3223       /* TBNZ Xn|Wn, #uimm6, label
3224 	 Test and Branch Not Zero: conditionally jumps to label if bit number
3225 	 uimm6 in register Xn is not zero.  The bit number implies the width of
3226 	 the register, which may be written and should be disassembled as Wn if
3227 	 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3228 	 */
3229       if (inst->operands[1].imm.value < 32)
3230 	inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3231       break;
3232     default: break;
3233     }
3234 }
3235 
3236 /* Decode INSN and fill in *INST the instruction information.  An alias
3237    opcode may be filled in *INSN if NOALIASES_P is FALSE.  Return zero on
3238    success.  */
3239 
3240 enum err_type
3241 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3242 		     bool noaliases_p,
3243 		     aarch64_operand_error *errors)
3244 {
3245   const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3246 
3247 #ifdef DEBUG_AARCH64
3248   if (debug_dump)
3249     {
3250       const aarch64_opcode *tmp = opcode;
3251       printf ("\n");
3252       DEBUG_TRACE ("opcode lookup:");
3253       while (tmp != NULL)
3254 	{
3255 	  aarch64_verbose ("  %s", tmp->name);
3256 	  tmp = aarch64_find_next_opcode (tmp);
3257 	}
3258     }
3259 #endif /* DEBUG_AARCH64 */
3260 
3261   /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3262      distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3263      opcode field and value, apart from the difference that one of them has an
3264      extra field as part of the opcode, but such a field is used for operand
3265      encoding in other opcode(s) ('immh' in the case of the example).  */
3266   while (opcode != NULL)
3267     {
3268       /* But only one opcode can be decoded successfully for, as the
3269 	 decoding routine will check the constraint carefully.  */
3270       if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3271 	return ERR_OK;
3272       opcode = aarch64_find_next_opcode (opcode);
3273     }
3274 
3275   return ERR_UND;
3276 }
3277 
3278 /* Print operands.  */
3279 
3280 static void
3281 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3282 		const aarch64_opnd_info *opnds, struct disassemble_info *info,
3283 		bool *has_notes)
3284 {
3285   char *notes = NULL;
3286   int i, pcrel_p, num_printed;
3287   for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3288     {
3289       char str[128];
3290       char cmt[128];
3291 
3292       /* We regard the opcode operand info more, however we also look into
3293 	 the inst->operands to support the disassembling of the optional
3294 	 operand.
3295 	 The two operand code should be the same in all cases, apart from
3296 	 when the operand can be optional.  */
3297       if (opcode->operands[i] == AARCH64_OPND_NIL
3298 	  || opnds[i].type == AARCH64_OPND_NIL)
3299 	break;
3300 
3301       /* Generate the operand string in STR.  */
3302       aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3303 			     &info->target, &notes, cmt, sizeof (cmt),
3304 			     arch_variant);
3305 
3306       /* Print the delimiter (taking account of omitted operand(s)).  */
3307       if (str[0] != '\0')
3308 	(*info->fprintf_func) (info->stream, "%s",
3309 			       num_printed++ == 0 ? "\t" : ", ");
3310 
3311       /* Print the operand.  */
3312       if (pcrel_p)
3313 	(*info->print_address_func) (info->target, info);
3314       else
3315 	{
3316 	  (*info->fprintf_func) (info->stream, "%s", str);
3317 
3318 	  /* Print the comment.  This works because only the last operand
3319 	     ever adds a comment.  If that ever changes then we'll need to
3320 	     be smarter here.  */
3321 	  if (cmt[0] != '\0')
3322 	    (*info->fprintf_func) (info->stream, "\t// %s", cmt);
3323 	}
3324     }
3325 
3326     if (notes && !no_notes)
3327       {
3328 	*has_notes = true;
3329 	(*info->fprintf_func) (info->stream, "  // note: %s", notes);
3330       }
3331 }
3332 
3333 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed.  */
3334 
3335 static void
3336 remove_dot_suffix (char *name, const aarch64_inst *inst)
3337 {
3338   char *ptr;
3339   size_t len;
3340 
3341   ptr = strchr (inst->opcode->name, '.');
3342   assert (ptr && inst->cond);
3343   len = ptr - inst->opcode->name;
3344   assert (len < 8);
3345   strncpy (name, inst->opcode->name, len);
3346   name[len] = '\0';
3347 }
3348 
3349 /* Print the instruction mnemonic name.  */
3350 
3351 static void
3352 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3353 {
3354   if (inst->opcode->flags & F_COND)
3355     {
3356       /* For instructions that are truly conditionally executed, e.g. b.cond,
3357 	 prepare the full mnemonic name with the corresponding condition
3358 	 suffix.  */
3359       char name[8];
3360 
3361       remove_dot_suffix (name, inst);
3362       (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3363     }
3364   else
3365     (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3366 }
3367 
3368 /* Decide whether we need to print a comment after the operands of
3369    instruction INST.  */
3370 
3371 static void
3372 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3373 {
3374   if (inst->opcode->flags & F_COND)
3375     {
3376       char name[8];
3377       unsigned int i, num_conds;
3378 
3379       remove_dot_suffix (name, inst);
3380       num_conds = ARRAY_SIZE (inst->cond->names);
3381       for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3382 	(*info->fprintf_func) (info->stream, "%s %s.%s",
3383 			       i == 1 ? "  //" : ",",
3384 			       name, inst->cond->names[i]);
3385     }
3386 }
3387 
3388 /* Build notes from verifiers into a string for printing.  */
3389 
3390 static void
3391 print_verifier_notes (aarch64_operand_error *detail,
3392 		      struct disassemble_info *info)
3393 {
3394   if (no_notes)
3395     return;
3396 
3397   /* The output of the verifier cannot be a fatal error, otherwise the assembly
3398      would not have succeeded.  We can safely ignore these.  */
3399   assert (detail->non_fatal);
3400 
3401   (*info->fprintf_func) (info->stream, "  // note: ");
3402   switch (detail->kind)
3403     {
3404     case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
3405       (*info->fprintf_func) (info->stream,
3406 			     _("this `%s' should have an immediately"
3407 			       " preceding `%s'"),
3408 			     detail->data[0].s, detail->data[1].s);
3409       break;
3410 
3411     case AARCH64_OPDE_EXPECTED_A_AFTER_B:
3412       (*info->fprintf_func) (info->stream,
3413 			     _("expected `%s' after previous `%s'"),
3414 			     detail->data[0].s, detail->data[1].s);
3415       break;
3416 
3417     default:
3418       assert (detail->error);
3419       (*info->fprintf_func) (info->stream, "%s", detail->error);
3420       if (detail->index >= 0)
3421 	(*info->fprintf_func) (info->stream, " at operand %d",
3422 			       detail->index + 1);
3423       break;
3424     }
3425 }
3426 
3427 /* Print the instruction according to *INST.  */
3428 
3429 static void
3430 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3431 		    const aarch64_insn code,
3432 		    struct disassemble_info *info,
3433 		    aarch64_operand_error *mismatch_details)
3434 {
3435   bool has_notes = false;
3436 
3437   print_mnemonic_name (inst, info);
3438   print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3439   print_comment (inst, info);
3440 
3441   /* We've already printed a note, not enough space to print more so exit.
3442      Usually notes shouldn't overlap so it shouldn't happen that we have a note
3443      from a register and instruction at the same time.  */
3444   if (has_notes)
3445     return;
3446 
3447   /* Always run constraint verifiers, this is needed because constraints need to
3448      maintain a global state regardless of whether the instruction has the flag
3449      set or not.  */
3450   enum err_type result = verify_constraints (inst, code, pc, false,
3451 					     mismatch_details, &insn_sequence);
3452   switch (result)
3453     {
3454     case ERR_VFI:
3455       print_verifier_notes (mismatch_details, info);
3456       break;
3457     case ERR_UND:
3458     case ERR_UNP:
3459     case ERR_NYI:
3460     default:
3461       break;
3462     }
3463 }
3464 
3465 /* Entry-point of the instruction disassembler and printer.  */
3466 
3467 static void
3468 print_insn_aarch64_word (bfd_vma pc,
3469 			 uint32_t word,
3470 			 struct disassemble_info *info,
3471 			 aarch64_operand_error *errors)
3472 {
3473   static const char *err_msg[ERR_NR_ENTRIES+1] =
3474     {
3475       [ERR_OK]  = "_",
3476       [ERR_UND] = "undefined",
3477       [ERR_UNP] = "unpredictable",
3478       [ERR_NYI] = "NYI"
3479     };
3480 
3481   enum err_type ret;
3482   aarch64_inst inst;
3483 
3484   info->insn_info_valid = 1;
3485   info->branch_delay_insns = 0;
3486   info->data_size = 0;
3487   info->target = 0;
3488   info->target2 = 0;
3489 
3490   if (info->flags & INSN_HAS_RELOC)
3491     /* If the instruction has a reloc associated with it, then
3492        the offset field in the instruction will actually be the
3493        addend for the reloc.  (If we are using REL type relocs).
3494        In such cases, we can ignore the pc when computing
3495        addresses, since the addend is not currently pc-relative.  */
3496     pc = 0;
3497 
3498   ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3499 
3500   if (((word >> 21) & 0x3ff) == 1)
3501     {
3502       /* RESERVED for ALES.  */
3503       assert (ret != ERR_OK);
3504       ret = ERR_NYI;
3505     }
3506 
3507   switch (ret)
3508     {
3509     case ERR_UND:
3510     case ERR_UNP:
3511     case ERR_NYI:
3512       /* Handle undefined instructions.  */
3513       info->insn_type = dis_noninsn;
3514       (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3515 			     word, err_msg[ret]);
3516       break;
3517     case ERR_OK:
3518       user_friendly_fixup (&inst);
3519       print_aarch64_insn (pc, &inst, word, info, errors);
3520       break;
3521     default:
3522       abort ();
3523     }
3524 }
3525 
3526 /* Disallow mapping symbols ($x, $d etc) from
3527    being displayed in symbol relative addresses.  */
3528 
3529 bool
3530 aarch64_symbol_is_valid (asymbol * sym,
3531 			 struct disassemble_info * info ATTRIBUTE_UNUSED)
3532 {
3533   const char * name;
3534 
3535   if (sym == NULL)
3536     return false;
3537 
3538   name = bfd_asymbol_name (sym);
3539 
3540   return name
3541     && (name[0] != '$'
3542 	|| (name[1] != 'x' && name[1] != 'd')
3543 	|| (name[2] != '\0' && name[2] != '.'));
3544 }
3545 
3546 /* Print data bytes on INFO->STREAM.  */
3547 
3548 static void
3549 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3550 		 uint32_t word,
3551 		 struct disassemble_info *info,
3552 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3553 {
3554   switch (info->bytes_per_chunk)
3555     {
3556     case 1:
3557       info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3558       break;
3559     case 2:
3560       info->fprintf_func (info->stream, ".short\t0x%04x", word);
3561       break;
3562     case 4:
3563       info->fprintf_func (info->stream, ".word\t0x%08x", word);
3564       break;
3565     default:
3566       abort ();
3567     }
3568 }
3569 
3570 /* Try to infer the code or data type from a symbol.
3571    Returns nonzero if *MAP_TYPE was set.  */
3572 
3573 static int
3574 get_sym_code_type (struct disassemble_info *info, int n,
3575 		   enum map_type *map_type)
3576 {
3577   asymbol * as;
3578   elf_symbol_type *es;
3579   unsigned int type;
3580   const char *name;
3581 
3582   /* If the symbol is in a different section, ignore it.  */
3583   if (info->section != NULL && info->section != info->symtab[n]->section)
3584     return false;
3585 
3586   if (n >= info->symtab_size)
3587     return false;
3588 
3589   as = info->symtab[n];
3590   if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
3591     return false;
3592   es = (elf_symbol_type *) as;
3593 
3594   type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3595 
3596   /* If the symbol has function type then use that.  */
3597   if (type == STT_FUNC)
3598     {
3599       *map_type = MAP_INSN;
3600       return true;
3601     }
3602 
3603   /* Check for mapping symbols.  */
3604   name = bfd_asymbol_name(info->symtab[n]);
3605   if (name[0] == '$'
3606       && (name[1] == 'x' || name[1] == 'd')
3607       && (name[2] == '\0' || name[2] == '.'))
3608     {
3609       *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3610       return true;
3611     }
3612 
3613   return false;
3614 }
3615 
3616 /* Set the feature bits in arch_variant in order to get the correct disassembly
3617    for the chosen architecture variant.
3618 
3619    Currently we only restrict disassembly for Armv8-R and otherwise enable all
3620    non-R-profile features.  */
3621 static void
3622 select_aarch64_variant (unsigned mach)
3623 {
3624   switch (mach)
3625     {
3626     case bfd_mach_aarch64_8R:
3627       arch_variant = AARCH64_ARCH_V8_R;
3628       break;
3629     default:
3630       arch_variant = AARCH64_ANY & ~(AARCH64_FEATURE_V8_R);
3631     }
3632 }
3633 
3634 /* Entry-point of the AArch64 disassembler.  */
3635 
3636 int
3637 print_insn_aarch64 (bfd_vma pc,
3638 		    struct disassemble_info *info)
3639 {
3640   bfd_byte	buffer[INSNLEN];
3641   int		status;
3642   void		(*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3643 			    aarch64_operand_error *);
3644   bool   found = false;
3645   unsigned int	size = 4;
3646   unsigned long	data;
3647   aarch64_operand_error errors;
3648   static bool set_features;
3649 
3650   if (info->disassembler_options)
3651     {
3652       set_default_aarch64_dis_options (info);
3653 
3654       parse_aarch64_dis_options (info->disassembler_options);
3655 
3656       /* To avoid repeated parsing of these options, we remove them here.  */
3657       info->disassembler_options = NULL;
3658     }
3659 
3660   if (!set_features)
3661     {
3662       select_aarch64_variant (info->mach);
3663       set_features = true;
3664     }
3665 
3666   /* Aarch64 instructions are always little-endian */
3667   info->endian_code = BFD_ENDIAN_LITTLE;
3668 
3669   /* Default to DATA.  A text section is required by the ABI to contain an
3670      INSN mapping symbol at the start.  A data section has no such
3671      requirement, hence if no mapping symbol is found the section must
3672      contain only data.  This however isn't very useful if the user has
3673      fully stripped the binaries.  If this is the case use the section
3674      attributes to determine the default.  If we have no section default to
3675      INSN as well, as we may be disassembling some raw bytes on a baremetal
3676      HEX file or similar.  */
3677   enum map_type type = MAP_DATA;
3678   if ((info->section && info->section->flags & SEC_CODE) || !info->section)
3679     type = MAP_INSN;
3680 
3681   /* First check the full symtab for a mapping symbol, even if there
3682      are no usable non-mapping symbols for this address.  */
3683   if (info->symtab_size != 0
3684       && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3685     {
3686       int last_sym = -1;
3687       bfd_vma addr, section_vma = 0;
3688       bool can_use_search_opt_p;
3689       int n;
3690 
3691       if (pc <= last_mapping_addr)
3692 	last_mapping_sym = -1;
3693 
3694       /* Start scanning at the start of the function, or wherever
3695 	 we finished last time.  */
3696       n = info->symtab_pos + 1;
3697 
3698       /* If the last stop offset is different from the current one it means we
3699 	 are disassembling a different glob of bytes.  As such the optimization
3700 	 would not be safe and we should start over.  */
3701       can_use_search_opt_p = last_mapping_sym >= 0
3702 			     && info->stop_offset == last_stop_offset;
3703 
3704       if (n >= last_mapping_sym && can_use_search_opt_p)
3705 	n = last_mapping_sym;
3706 
3707       /* Look down while we haven't passed the location being disassembled.
3708 	 The reason for this is that there's no defined order between a symbol
3709 	 and an mapping symbol that may be at the same address.  We may have to
3710 	 look at least one position ahead.  */
3711       for (; n < info->symtab_size; n++)
3712 	{
3713 	  addr = bfd_asymbol_value (info->symtab[n]);
3714 	  if (addr > pc)
3715 	    break;
3716 	  if (get_sym_code_type (info, n, &type))
3717 	    {
3718 	      last_sym = n;
3719 	      found = true;
3720 	    }
3721 	}
3722 
3723       if (!found)
3724 	{
3725 	  n = info->symtab_pos;
3726 	  if (n >= last_mapping_sym && can_use_search_opt_p)
3727 	    n = last_mapping_sym;
3728 
3729 	  /* No mapping symbol found at this address.  Look backwards
3730 	     for a preceeding one, but don't go pass the section start
3731 	     otherwise a data section with no mapping symbol can pick up
3732 	     a text mapping symbol of a preceeding section.  The documentation
3733 	     says section can be NULL, in which case we will seek up all the
3734 	     way to the top.  */
3735 	  if (info->section)
3736 	    section_vma = info->section->vma;
3737 
3738 	  for (; n >= 0; n--)
3739 	    {
3740 	      addr = bfd_asymbol_value (info->symtab[n]);
3741 	      if (addr < section_vma)
3742 		break;
3743 
3744 	      if (get_sym_code_type (info, n, &type))
3745 		{
3746 		  last_sym = n;
3747 		  found = true;
3748 		  break;
3749 		}
3750 	    }
3751 	}
3752 
3753       last_mapping_sym = last_sym;
3754       last_type = type;
3755       last_stop_offset = info->stop_offset;
3756 
3757       /* Look a little bit ahead to see if we should print out
3758 	 less than four bytes of data.  If there's a symbol,
3759 	 mapping or otherwise, after two bytes then don't
3760 	 print more.  */
3761       if (last_type == MAP_DATA)
3762 	{
3763 	  size = 4 - (pc & 3);
3764 	  for (n = last_sym + 1; n < info->symtab_size; n++)
3765 	    {
3766 	      addr = bfd_asymbol_value (info->symtab[n]);
3767 	      if (addr > pc)
3768 		{
3769 		  if (addr - pc < size)
3770 		    size = addr - pc;
3771 		  break;
3772 		}
3773 	    }
3774 	  /* If the next symbol is after three bytes, we need to
3775 	     print only part of the data, so that we can use either
3776 	     .byte or .short.  */
3777 	  if (size == 3)
3778 	    size = (pc & 1) ? 1 : 2;
3779 	}
3780     }
3781   else
3782     last_type = type;
3783 
3784   /* PR 10263: Disassemble data if requested to do so by the user.  */
3785   if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
3786     {
3787       /* size was set above.  */
3788       info->bytes_per_chunk = size;
3789       info->display_endian = info->endian;
3790       printer = print_insn_data;
3791     }
3792   else
3793     {
3794       info->bytes_per_chunk = size = INSNLEN;
3795       info->display_endian = info->endian_code;
3796       printer = print_insn_aarch64_word;
3797     }
3798 
3799   status = (*info->read_memory_func) (pc, buffer, size, info);
3800   if (status != 0)
3801     {
3802       (*info->memory_error_func) (status, pc, info);
3803       return -1;
3804     }
3805 
3806   data = bfd_get_bits (buffer, size * 8,
3807 		       info->display_endian == BFD_ENDIAN_BIG);
3808 
3809   (*printer) (pc, data, info, &errors);
3810 
3811   return size;
3812 }
3813 
3814 void
3815 print_aarch64_disassembler_options (FILE *stream)
3816 {
3817   fprintf (stream, _("\n\
3818 The following AARCH64 specific disassembler options are supported for use\n\
3819 with the -M switch (multiple options should be separated by commas):\n"));
3820 
3821   fprintf (stream, _("\n\
3822   no-aliases         Don't print instruction aliases.\n"));
3823 
3824   fprintf (stream, _("\n\
3825   aliases            Do print instruction aliases.\n"));
3826 
3827   fprintf (stream, _("\n\
3828   no-notes         Don't print instruction notes.\n"));
3829 
3830   fprintf (stream, _("\n\
3831   notes            Do print instruction notes.\n"));
3832 
3833 #ifdef DEBUG_AARCH64
3834   fprintf (stream, _("\n\
3835   debug_dump         Temp switch for debug trace.\n"));
3836 #endif /* DEBUG_AARCH64 */
3837 
3838   fprintf (stream, _("\n"));
3839 }
3840