xref: /netbsd-src/external/gpl3/gdb/dist/opcodes/aarch64-dis.c (revision 4adfa041e14dc071f6d5e464e5b88c9721db4b02)
1 /* aarch64-dis.c -- AArch64 disassembler.
2    Copyright (C) 2009-2024 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include <stdint.h>
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 #include "safe-ctype.h"
29 #include "obstack.h"
30 
31 #define obstack_chunk_alloc xmalloc
32 #define obstack_chunk_free free
33 
34 #define INSNLEN 4
35 
36 /* This character is used to encode style information within the output
37    buffers.  See get_style_text and print_operands for more details.  */
38 #define STYLE_MARKER_CHAR '\002'
39 
40 /* Cached mapping symbol state.  */
41 enum map_type
42 {
43   MAP_INSN,
44   MAP_DATA
45 };
46 
47 static aarch64_feature_set arch_variant; /* See select_aarch64_variant.  */
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_stop_offset = 0;
51 static bfd_vma last_mapping_addr = 0;
52 
53 /* Other options */
54 static int no_aliases = 0;	/* If set disassemble as most general inst.  */
55 static int no_notes = 1;	/* If set do not print disassemble notes in the
56 				  output as comments.  */
57 
58 /* Currently active instruction sequence.  */
59 static aarch64_instr_sequence insn_sequence;
60 
61 static void
62 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
63 {
64 }
65 
66 static void
67 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
68 {
69   /* Try to match options that are simple flags */
70   if (startswith (option, "no-aliases"))
71     {
72       no_aliases = 1;
73       return;
74     }
75 
76   if (startswith (option, "aliases"))
77     {
78       no_aliases = 0;
79       return;
80     }
81 
82   if (startswith (option, "no-notes"))
83     {
84       no_notes = 1;
85       return;
86     }
87 
88   if (startswith (option, "notes"))
89     {
90       no_notes = 0;
91       return;
92     }
93 
94 #ifdef DEBUG_AARCH64
95   if (startswith (option, "debug_dump"))
96     {
97       debug_dump = 1;
98       return;
99     }
100 #endif /* DEBUG_AARCH64 */
101 
102   /* Invalid option.  */
103   opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
104 }
105 
106 static void
107 parse_aarch64_dis_options (const char *options)
108 {
109   const char *option_end;
110 
111   if (options == NULL)
112     return;
113 
114   while (*options != '\0')
115     {
116       /* Skip empty options.  */
117       if (*options == ',')
118 	{
119 	  options++;
120 	  continue;
121 	}
122 
123       /* We know that *options is neither NUL or a comma.  */
124       option_end = options + 1;
125       while (*option_end != ',' && *option_end != '\0')
126 	option_end++;
127 
128       parse_aarch64_dis_option (options, option_end - options);
129 
130       /* Go on to the next one.  If option_end points to a comma, it
131 	 will be skipped above.  */
132       options = option_end;
133     }
134 }
135 
136 /* Functions doing the instruction disassembling.  */
137 
138 /* The unnamed arguments consist of the number of fields and information about
139    these fields where the VALUE will be extracted from CODE and returned.
140    MASK can be zero or the base mask of the opcode.
141 
142    N.B. the fields are required to be in such an order than the most signficant
143    field for VALUE comes the first, e.g. the <index> in
144     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
145    is encoded in H:L:M in some cases, the fields H:L:M should be passed in
146    the order of H, L, M.  */
147 
148 aarch64_insn
149 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
150 {
151   uint32_t num;
152   const aarch64_field *field;
153   enum aarch64_field_kind kind;
154   va_list va;
155 
156   va_start (va, mask);
157   num = va_arg (va, uint32_t);
158   assert (num <= 5);
159   aarch64_insn value = 0x0;
160   while (num--)
161     {
162       kind = va_arg (va, enum aarch64_field_kind);
163       field = &fields[kind];
164       value <<= field->width;
165       value |= extract_field (kind, code, mask);
166     }
167   va_end (va);
168   return value;
169 }
170 
171 /* Extract the value of all fields in SELF->fields after START from
172    instruction CODE.  The least significant bit comes from the final field.  */
173 
174 static aarch64_insn
175 extract_all_fields_after (const aarch64_operand *self, unsigned int start,
176 			  aarch64_insn code)
177 {
178   aarch64_insn value;
179   unsigned int i;
180   enum aarch64_field_kind kind;
181 
182   value = 0;
183   for (i = start;
184        i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
185     {
186       kind = self->fields[i];
187       value <<= fields[kind].width;
188       value |= extract_field (kind, code, 0);
189     }
190   return value;
191 }
192 
193 /* Extract the value of all fields in SELF->fields from instruction CODE.
194    The least significant bit comes from the final field.  */
195 
196 static aarch64_insn
197 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
198 {
199   return extract_all_fields_after (self, 0, code);
200 }
201 
202 /* Sign-extend bit I of VALUE.  */
203 static inline uint64_t
204 sign_extend (aarch64_insn value, unsigned i)
205 {
206   uint64_t ret, sign;
207 
208   assert (i < 32);
209   ret = value;
210   sign = (uint64_t) 1 << i;
211   return ((ret & (sign + sign - 1)) ^ sign) - sign;
212 }
213 
214 /* N.B. the following inline helpfer functions create a dependency on the
215    order of operand qualifier enumerators.  */
216 
217 /* Given VALUE, return qualifier for a general purpose register.  */
218 static inline enum aarch64_opnd_qualifier
219 get_greg_qualifier_from_value (aarch64_insn value)
220 {
221   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
222   if (value <= 0x1
223       && aarch64_get_qualifier_standard_value (qualifier) == value)
224     return qualifier;
225   return AARCH64_OPND_QLF_ERR;
226 }
227 
228 /* Given VALUE, return qualifier for a vector register.  This does not support
229    decoding instructions that accept the 2H vector type.  */
230 
231 static inline enum aarch64_opnd_qualifier
232 get_vreg_qualifier_from_value (aarch64_insn value)
233 {
234   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
235 
236   /* Instructions using vector type 2H should not call this function.  Skip over
237      the 2H qualifier.  */
238   if (qualifier >= AARCH64_OPND_QLF_V_2H)
239     qualifier += 1;
240 
241   if (value <= 0x8
242       && aarch64_get_qualifier_standard_value (qualifier) == value)
243     return qualifier;
244   return AARCH64_OPND_QLF_ERR;
245 }
246 
247 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register.  */
248 static inline enum aarch64_opnd_qualifier
249 get_sreg_qualifier_from_value (aarch64_insn value)
250 {
251   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
252 
253   if (value <= 0x4
254       && aarch64_get_qualifier_standard_value (qualifier) == value)
255     return qualifier;
256   return AARCH64_OPND_QLF_ERR;
257 }
258 
259 /* Given the instruction in *INST which is probably half way through the
260    decoding and our caller wants to know the expected qualifier for operand
261    I.  Return such a qualifier if we can establish it; otherwise return
262    AARCH64_OPND_QLF_NIL.  */
263 
264 static aarch64_opnd_qualifier_t
265 get_expected_qualifier (const aarch64_inst *inst, int i)
266 {
267   aarch64_opnd_qualifier_seq_t qualifiers;
268   /* Should not be called if the qualifier is known.  */
269   if (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL)
270     {
271       int invalid_count;
272       if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
273 				   i, qualifiers, &invalid_count))
274 	return qualifiers[i];
275       else
276 	return AARCH64_OPND_QLF_NIL;
277     }
278   else
279     return AARCH64_OPND_QLF_ERR;
280 }
281 
282 /* Operand extractors.  */
283 
284 bool
285 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
286 		  aarch64_opnd_info *info ATTRIBUTE_UNUSED,
287 		  const aarch64_insn code ATTRIBUTE_UNUSED,
288 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
289 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
290 {
291   return true;
292 }
293 
294 bool
295 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
296 		   const aarch64_insn code,
297 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
298 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
299 {
300   info->reg.regno = (extract_field (self->fields[0], code, 0)
301 		     + get_operand_specific_data (self));
302   return true;
303 }
304 
305 bool
306 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
307 		   const aarch64_insn code ATTRIBUTE_UNUSED,
308 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
309 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
310 {
311   assert (info->idx == 1
312 	  || info->idx == 2
313 	  || info->idx == 3
314 	  || info->idx == 5);
315 
316   unsigned prev_regno = inst->operands[info->idx - 1].reg.regno;
317   info->reg.regno = (prev_regno == 0x1f) ? 0x1f
318 					 : prev_regno + 1;
319   return true;
320 }
321 
322 /* e.g. IC <ic_op>{, <Xt>}.  */
323 bool
324 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
325 			  const aarch64_insn code,
326 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
327 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
328 {
329   info->reg.regno = extract_field (self->fields[0], code, 0);
330   assert (info->idx == 1
331 	  && (aarch64_get_operand_class (inst->operands[0].type)
332 	      == AARCH64_OPND_CLASS_SYSTEM));
333   /* This will make the constraint checking happy and more importantly will
334      help the disassembler determine whether this operand is optional or
335      not.  */
336   info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
337 
338   return true;
339 }
340 
341 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
342 bool
343 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
344 		     const aarch64_insn code,
345 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
346 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
347 {
348   /* regno */
349   info->reglane.regno = extract_field (self->fields[0], code,
350 				       inst->opcode->mask);
351 
352   /* Index and/or type.  */
353   if (inst->opcode->iclass == asisdone
354     || inst->opcode->iclass == asimdins)
355     {
356       if (info->type == AARCH64_OPND_En
357 	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
358 	{
359 	  unsigned shift;
360 	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
361 	  assert (info->idx == 1);	/* Vn */
362 	  aarch64_insn value = extract_field (FLD_imm4_11, code, 0);
363 	  /* Depend on AARCH64_OPND_Ed to determine the qualifier.  */
364 	  info->qualifier = get_expected_qualifier (inst, info->idx);
365 	  if (info->qualifier == AARCH64_OPND_QLF_ERR)
366 	    return 0;
367 	  shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
368 	  info->reglane.index = value >> shift;
369 	}
370       else
371 	{
372 	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
373 	     imm5<3:0>	<V>
374 	     0000	RESERVED
375 	     xxx1	B
376 	     xx10	H
377 	     x100	S
378 	     1000	D  */
379 	  int pos = -1;
380 	  aarch64_insn value = extract_field (FLD_imm5, code, 0);
381 	  while (++pos <= 3 && (value & 0x1) == 0)
382 	    value >>= 1;
383 	  if (pos > 3)
384 	    return false;
385 	  info->qualifier = get_sreg_qualifier_from_value (pos);
386 	  if (info->qualifier == AARCH64_OPND_QLF_ERR)
387 	    return 0;
388 	  info->reglane.index = (unsigned) (value >> 1);
389 	}
390     }
391   else if (inst->opcode->iclass == dotproduct)
392     {
393       /* Need information in other operand(s) to help decoding.  */
394       info->qualifier = get_expected_qualifier (inst, info->idx);
395       if (info->qualifier == AARCH64_OPND_QLF_ERR)
396 	return 0;
397       switch (info->qualifier)
398 	{
399 	case AARCH64_OPND_QLF_S_4B:
400 	case AARCH64_OPND_QLF_S_2H:
401 	  /* L:H */
402 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
403 	  info->reglane.regno &= 0x1f;
404 	  break;
405 	default:
406 	  return false;
407 	}
408     }
409   else if (inst->opcode->iclass == cryptosm3)
410     {
411       /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>].  */
412       info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
413     }
414   else
415     {
416       /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
417          or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
418 
419       /* Need information in other operand(s) to help decoding.  */
420       info->qualifier = get_expected_qualifier (inst, info->idx);
421       if (info->qualifier == AARCH64_OPND_QLF_ERR)
422 	return 0;
423       switch (info->qualifier)
424 	{
425 	case AARCH64_OPND_QLF_S_H:
426 	  if (info->type == AARCH64_OPND_Em16)
427 	    {
428 	      /* h:l:m */
429 	      info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
430 						    FLD_M);
431 	      info->reglane.regno &= 0xf;
432 	    }
433 	  else
434 	    {
435 	      /* h:l */
436 	      info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
437 	    }
438 	  break;
439 	case AARCH64_OPND_QLF_S_S:
440 	  /* h:l */
441 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
442 	  break;
443 	case AARCH64_OPND_QLF_S_D:
444 	  /* H */
445 	  info->reglane.index = extract_field (FLD_H, code, 0);
446 	  break;
447 	default:
448 	  return false;
449 	}
450 
451       if (inst->opcode->op == OP_FCMLA_ELEM
452 	  && info->qualifier != AARCH64_OPND_QLF_S_H)
453 	{
454 	  /* Complex operand takes two elements.  */
455 	  if (info->reglane.index & 1)
456 	    return false;
457 	  info->reglane.index /= 2;
458 	}
459     }
460 
461   return true;
462 }
463 
464 bool
465 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
466 		     const aarch64_insn code,
467 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
468 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
469 {
470   /* R */
471   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
472   /* len */
473   info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
474   info->reglist.stride = 1;
475   return true;
476 }
477 
478 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions.  */
479 bool
480 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
481 			  aarch64_opnd_info *info, const aarch64_insn code,
482 			  const aarch64_inst *inst,
483 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
484 {
485   aarch64_insn value;
486   /* Number of elements in each structure to be loaded/stored.  */
487   unsigned expected_num = get_opcode_dependent_value (inst->opcode);
488 
489   struct
490     {
491       unsigned is_reserved;
492       unsigned num_regs;
493       unsigned num_elements;
494     } data [] =
495   {   {0, 4, 4},
496       {1, 4, 4},
497       {0, 4, 1},
498       {0, 4, 2},
499       {0, 3, 3},
500       {1, 3, 3},
501       {0, 3, 1},
502       {0, 1, 1},
503       {0, 2, 2},
504       {1, 2, 2},
505       {0, 2, 1},
506   };
507 
508   /* Rt */
509   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
510   /* opcode */
511   value = extract_field (FLD_opcode, code, 0);
512   /* PR 21595: Check for a bogus value.  */
513   if (value >= ARRAY_SIZE (data))
514     return false;
515   if (expected_num != data[value].num_elements || data[value].is_reserved)
516     return false;
517   info->reglist.num_regs = data[value].num_regs;
518   info->reglist.stride = 1;
519 
520   return true;
521 }
522 
523 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
524    lanes instructions.  */
525 bool
526 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
527 			    aarch64_opnd_info *info, const aarch64_insn code,
528 			    const aarch64_inst *inst,
529 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
530 {
531   aarch64_insn value;
532 
533   /* Rt */
534   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
535   /* S */
536   value = extract_field (FLD_S, code, 0);
537 
538   /* Number of registers is equal to the number of elements in
539      each structure to be loaded/stored.  */
540   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
541   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
542 
543   /* Except when it is LD1R.  */
544   if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
545     info->reglist.num_regs = 2;
546 
547   info->reglist.stride = 1;
548   return true;
549 }
550 
551 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
552    load/store single element instructions.  */
553 bool
554 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
555 			   aarch64_opnd_info *info, const aarch64_insn code,
556 			   const aarch64_inst *inst ATTRIBUTE_UNUSED,
557 			   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
558 {
559   aarch64_field field = {0, 0};
560   aarch64_insn QSsize;		/* fields Q:S:size.  */
561   aarch64_insn opcodeh2;	/* opcode<2:1> */
562 
563   /* Rt */
564   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
565 
566   /* Decode the index, opcode<2:1> and size.  */
567   gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
568   opcodeh2 = extract_field_2 (&field, code, 0);
569   QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
570   switch (opcodeh2)
571     {
572     case 0x0:
573       info->qualifier = AARCH64_OPND_QLF_S_B;
574       /* Index encoded in "Q:S:size".  */
575       info->reglist.index = QSsize;
576       break;
577     case 0x1:
578       if (QSsize & 0x1)
579 	/* UND.  */
580 	return false;
581       info->qualifier = AARCH64_OPND_QLF_S_H;
582       /* Index encoded in "Q:S:size<1>".  */
583       info->reglist.index = QSsize >> 1;
584       break;
585     case 0x2:
586       if ((QSsize >> 1) & 0x1)
587 	/* UND.  */
588 	return false;
589       if ((QSsize & 0x1) == 0)
590 	{
591 	  info->qualifier = AARCH64_OPND_QLF_S_S;
592 	  /* Index encoded in "Q:S".  */
593 	  info->reglist.index = QSsize >> 2;
594 	}
595       else
596 	{
597 	  if (extract_field (FLD_S, code, 0))
598 	    /* UND */
599 	    return false;
600 	  info->qualifier = AARCH64_OPND_QLF_S_D;
601 	  /* Index encoded in "Q".  */
602 	  info->reglist.index = QSsize >> 3;
603 	}
604       break;
605     default:
606       return false;
607     }
608 
609   info->reglist.has_index = 1;
610   info->reglist.num_regs = 0;
611   info->reglist.stride = 1;
612   /* Number of registers is equal to the number of elements in
613      each structure to be loaded/stored.  */
614   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
615   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
616 
617   return true;
618 }
619 
620 /* Decode fields immh:immb and/or Q for e.g.
621    SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
622    or SSHR <V><d>, <V><n>, #<shift>.  */
623 
624 bool
625 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
626 			       aarch64_opnd_info *info, const aarch64_insn code,
627 			       const aarch64_inst *inst,
628 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
629 {
630   int pos;
631   aarch64_insn Q, imm, immh;
632   enum aarch64_insn_class iclass = inst->opcode->iclass;
633 
634   immh = extract_field (FLD_immh, code, 0);
635   if (immh == 0)
636     return false;
637   imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
638   pos = 4;
639   /* Get highest set bit in immh.  */
640   while (--pos >= 0 && (immh & 0x8) == 0)
641     immh <<= 1;
642 
643   assert ((iclass == asimdshf || iclass == asisdshf)
644 	  && (info->type == AARCH64_OPND_IMM_VLSR
645 	      || info->type == AARCH64_OPND_IMM_VLSL));
646 
647   if (iclass == asimdshf)
648     {
649       Q = extract_field (FLD_Q, code, 0);
650       /* immh	Q	<T>
651 	 0000	x	SEE AdvSIMD modified immediate
652 	 0001	0	8B
653 	 0001	1	16B
654 	 001x	0	4H
655 	 001x	1	8H
656 	 01xx	0	2S
657 	 01xx	1	4S
658 	 1xxx	0	RESERVED
659 	 1xxx	1	2D  */
660       info->qualifier =
661 	get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
662       if (info->qualifier == AARCH64_OPND_QLF_ERR)
663 	return false;
664     }
665   else
666     {
667       info->qualifier = get_sreg_qualifier_from_value (pos);
668       if (info->qualifier == AARCH64_OPND_QLF_ERR)
669 	return 0;
670     }
671 
672   if (info->type == AARCH64_OPND_IMM_VLSR)
673     /* immh	<shift>
674        0000	SEE AdvSIMD modified immediate
675        0001	(16-UInt(immh:immb))
676        001x	(32-UInt(immh:immb))
677        01xx	(64-UInt(immh:immb))
678        1xxx	(128-UInt(immh:immb))  */
679     info->imm.value = (16 << pos) - imm;
680   else
681     /* immh:immb
682        immh	<shift>
683        0000	SEE AdvSIMD modified immediate
684        0001	(UInt(immh:immb)-8)
685        001x	(UInt(immh:immb)-16)
686        01xx	(UInt(immh:immb)-32)
687        1xxx	(UInt(immh:immb)-64)  */
688     info->imm.value = imm - (8 << pos);
689 
690   return true;
691 }
692 
693 /* Decode shift immediate for e.g. sshr (imm).  */
694 bool
695 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
696 		      aarch64_opnd_info *info, const aarch64_insn code,
697 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
698 		      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
699 {
700   int64_t imm;
701   aarch64_insn val;
702   val = extract_field (FLD_size, code, 0);
703   switch (val)
704     {
705     case 0: imm = 8; break;
706     case 1: imm = 16; break;
707     case 2: imm = 32; break;
708     default: return false;
709     }
710   info->imm.value = imm;
711   return true;
712 }
713 
714 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
715    value in the field(s) will be extracted as unsigned immediate value.  */
716 bool
717 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
718 		 const aarch64_insn code,
719 		 const aarch64_inst *inst,
720 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
721 {
722   uint64_t imm;
723 
724   imm = extract_all_fields (self, code);
725 
726   if (operand_need_sign_extension (self))
727     imm = sign_extend (imm, get_operand_fields_width (self) - 1);
728 
729   if (operand_need_shift_by_two (self))
730     imm <<= 2;
731   else if (operand_need_shift_by_three (self))
732     imm <<= 3;
733   else if (operand_need_shift_by_four (self))
734     imm <<= 4;
735 
736   if (info->type == AARCH64_OPND_ADDR_ADRP)
737     imm <<= 12;
738 
739   if (inst->operands[0].type == AARCH64_OPND_PSTATEFIELD
740       && inst->operands[0].sysreg.flags & F_IMM_IN_CRM)
741     imm &= PSTATE_DECODE_CRM_IMM (inst->operands[0].sysreg.flags);
742 
743   info->imm.value = imm;
744   return true;
745 }
746 
747 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
748 bool
749 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
750 		      const aarch64_insn code,
751 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
752 		      aarch64_operand_error *errors)
753 {
754   aarch64_ext_imm (self, info, code, inst, errors);
755   info->shifter.kind = AARCH64_MOD_LSL;
756   info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
757   return true;
758 }
759 
760 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
761      MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
762 bool
763 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
764 				  aarch64_opnd_info *info,
765 				  const aarch64_insn code,
766 				  const aarch64_inst *inst ATTRIBUTE_UNUSED,
767 				  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
768 {
769   uint64_t imm;
770   enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
771   aarch64_field field = {0, 0};
772 
773   assert (info->idx == 1);
774 
775   if (info->type == AARCH64_OPND_SIMD_FPIMM)
776     info->imm.is_fp = 1;
777 
778   /* a:b:c:d:e:f:g:h */
779   imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
780   if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
781     {
782       /* Either MOVI <Dd>, #<imm>
783 	 or     MOVI <Vd>.2D, #<imm>.
784 	 <imm> is a 64-bit immediate
785 	 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
786 	 encoded in "a:b:c:d:e:f:g:h".	*/
787       int i;
788       unsigned abcdefgh = imm;
789       for (imm = 0ull, i = 0; i < 8; i++)
790 	if (((abcdefgh >> i) & 0x1) != 0)
791 	  imm |= 0xffull << (8 * i);
792     }
793   info->imm.value = imm;
794 
795   /* cmode */
796   info->qualifier = get_expected_qualifier (inst, info->idx);
797   if (info->qualifier == AARCH64_OPND_QLF_ERR)
798     return 0;
799   switch (info->qualifier)
800     {
801     case AARCH64_OPND_QLF_NIL:
802       /* no shift */
803       info->shifter.kind = AARCH64_MOD_NONE;
804       return 1;
805     case AARCH64_OPND_QLF_LSL:
806       /* shift zeros */
807       info->shifter.kind = AARCH64_MOD_LSL;
808       switch (aarch64_get_qualifier_esize (opnd0_qualifier))
809 	{
810 	case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break;	/* per word */
811 	case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break;	/* per half */
812 	case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break;	/* per byte */
813 	default: return false;
814 	}
815       /* 00: 0; 01: 8; 10:16; 11:24.  */
816       info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
817       break;
818     case AARCH64_OPND_QLF_MSL:
819       /* shift ones */
820       info->shifter.kind = AARCH64_MOD_MSL;
821       gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
822       info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
823       break;
824     default:
825       return false;
826     }
827 
828   return true;
829 }
830 
831 /* Decode an 8-bit floating-point immediate.  */
832 bool
833 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
834 		   const aarch64_insn code,
835 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
836 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
837 {
838   info->imm.value = extract_all_fields (self, code);
839   info->imm.is_fp = 1;
840   return true;
841 }
842 
843 /* Decode a 1-bit rotate immediate (#90 or #270).  */
844 bool
845 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
846 			 const aarch64_insn code,
847 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
848 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
849 {
850   uint64_t rot = extract_field (self->fields[0], code, 0);
851   assert (rot < 2U);
852   info->imm.value = rot * 180 + 90;
853   return true;
854 }
855 
856 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270).  */
857 bool
858 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
859 			 const aarch64_insn code,
860 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
861 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
862 {
863   uint64_t rot = extract_field (self->fields[0], code, 0);
864   assert (rot < 4U);
865   info->imm.value = rot * 90;
866   return true;
867 }
868 
869 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>.  */
870 bool
871 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
872 		   aarch64_opnd_info *info, const aarch64_insn code,
873 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
874 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
875 {
876   info->imm.value = 64- extract_field (FLD_scale, code, 0);
877   return true;
878 }
879 
880 /* Decode arithmetic immediate for e.g.
881      SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
882 bool
883 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
884 		  aarch64_opnd_info *info, const aarch64_insn code,
885 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
886 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
887 {
888   aarch64_insn value;
889 
890   info->shifter.kind = AARCH64_MOD_LSL;
891   /* shift */
892   value = extract_field (FLD_shift, code, 0);
893   if (value >= 2)
894     return false;
895   info->shifter.amount = value ? 12 : 0;
896   /* imm12 (unsigned) */
897   info->imm.value = extract_field (FLD_imm12, code, 0);
898 
899   return true;
900 }
901 
902 /* Return true if VALUE is a valid logical immediate encoding, storing the
903    decoded value in *RESULT if so.  ESIZE is the number of bytes in the
904    decoded immediate.  */
905 static bool
906 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
907 {
908   uint64_t imm, mask;
909   uint32_t N, R, S;
910   unsigned simd_size;
911 
912   /* value is N:immr:imms.  */
913   S = value & 0x3f;
914   R = (value >> 6) & 0x3f;
915   N = (value >> 12) & 0x1;
916 
917   /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
918      (in other words, right rotated by R), then replicated.  */
919   if (N != 0)
920     {
921       simd_size = 64;
922       mask = 0xffffffffffffffffull;
923     }
924   else
925     {
926       switch (S)
927 	{
928 	case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32;           break;
929 	case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
930 	case 0x30 ... 0x37: /* 110xxx */ simd_size =  8; S &= 0x7; break;
931 	case 0x38 ... 0x3b: /* 1110xx */ simd_size =  4; S &= 0x3; break;
932 	case 0x3c ... 0x3d: /* 11110x */ simd_size =  2; S &= 0x1; break;
933 	default: return false;
934 	}
935       mask = (1ull << simd_size) - 1;
936       /* Top bits are IGNORED.  */
937       R &= simd_size - 1;
938     }
939 
940   if (simd_size > esize * 8)
941     return false;
942 
943   /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected.  */
944   if (S == simd_size - 1)
945     return false;
946   /* S+1 consecutive bits to 1.  */
947   /* NOTE: S can't be 63 due to detection above.  */
948   imm = (1ull << (S + 1)) - 1;
949   /* Rotate to the left by simd_size - R.  */
950   if (R != 0)
951     imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
952   /* Replicate the value according to SIMD size.  */
953   switch (simd_size)
954     {
955     case  2: imm = (imm <<  2) | imm;
956       /* Fall through.  */
957     case  4: imm = (imm <<  4) | imm;
958       /* Fall through.  */
959     case  8: imm = (imm <<  8) | imm;
960       /* Fall through.  */
961     case 16: imm = (imm << 16) | imm;
962       /* Fall through.  */
963     case 32: imm = (imm << 32) | imm;
964       /* Fall through.  */
965     case 64: break;
966     default: return 0;
967     }
968 
969   *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
970 
971   return true;
972 }
973 
974 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>.  */
975 bool
976 aarch64_ext_limm (const aarch64_operand *self,
977 		  aarch64_opnd_info *info, const aarch64_insn code,
978 		  const aarch64_inst *inst,
979 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
980 {
981   uint32_t esize;
982   aarch64_insn value;
983 
984   value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
985 			  self->fields[2]);
986   esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
987   return decode_limm (esize, value, &info->imm.value);
988 }
989 
990 /* Decode a logical immediate for the BIC alias of AND (etc.).  */
991 bool
992 aarch64_ext_inv_limm (const aarch64_operand *self,
993 		      aarch64_opnd_info *info, const aarch64_insn code,
994 		      const aarch64_inst *inst,
995 		      aarch64_operand_error *errors)
996 {
997   if (!aarch64_ext_limm (self, info, code, inst, errors))
998     return false;
999   info->imm.value = ~info->imm.value;
1000   return true;
1001 }
1002 
1003 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
1004    or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
1005 bool
1006 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
1007 		aarch64_opnd_info *info,
1008 		const aarch64_insn code, const aarch64_inst *inst,
1009 		aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1010 {
1011   aarch64_insn value;
1012 
1013   /* Rt */
1014   info->reg.regno = extract_field (FLD_Rt, code, 0);
1015 
1016   /* size */
1017   value = extract_field (FLD_ldst_size, code, 0);
1018   if (inst->opcode->iclass == ldstpair_indexed
1019       || inst->opcode->iclass == ldstnapair_offs
1020       || inst->opcode->iclass == ldstpair_off
1021       || inst->opcode->iclass == loadlit)
1022     {
1023       enum aarch64_opnd_qualifier qualifier;
1024       switch (value)
1025 	{
1026 	case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1027 	case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1028 	case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
1029 	default: return false;
1030 	}
1031       info->qualifier = qualifier;
1032     }
1033   else
1034     {
1035       /* opc1:size */
1036       value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
1037       if (value > 0x4)
1038 	return false;
1039       info->qualifier = get_sreg_qualifier_from_value (value);
1040       if (info->qualifier == AARCH64_OPND_QLF_ERR)
1041 	return false;
1042     }
1043 
1044   return true;
1045 }
1046 
1047 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
1048 bool
1049 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
1050 			 aarch64_opnd_info *info,
1051 			 aarch64_insn code,
1052 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1053 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1054 {
1055   /* Rn */
1056   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1057   return true;
1058 }
1059 
1060 /* Decode the address operand for rcpc3 instructions with optional load/store
1061    datasize offset, e.g. STILPP <Xs>, <Xt>, [<Xn|SP>{,#-16}]! and
1062    LIDAP <Xs>, <Xt>, [<Xn|SP>]{,#-16}.  */
1063 bool
1064 aarch64_ext_rcpc3_addr_opt_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1065 				   aarch64_opnd_info *info,
1066 				   aarch64_insn code,
1067 				   const aarch64_inst *inst ATTRIBUTE_UNUSED,
1068 				   aarch64_operand_error *err ATTRIBUTE_UNUSED)
1069 {
1070   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1071   if (!extract_field (FLD_opc2, code, 0))
1072     {
1073       info->addr.writeback = 1;
1074 
1075       enum aarch64_opnd type;
1076       for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
1077 	{
1078 	  aarch64_opnd_info opnd = info[i];
1079 	  type = opnd.type;
1080 	  if (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS)
1081 	    break;
1082 	}
1083 
1084       assert (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS);
1085       int offset = calc_ldst_datasize (inst->operands);
1086 
1087       switch (type)
1088 	{
1089 	case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
1090 	case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
1091 	  info->addr.offset.imm = -offset;
1092 	  info->addr.preind = 1;
1093 	  break;
1094 	case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
1095 	case AARCH64_OPND_RCPC3_ADDR_POSTIND:
1096 	  info->addr.offset.imm = offset;
1097 	  info->addr.postind = 1;
1098 	  break;
1099 	default:
1100 	  return false;
1101 	}
1102     }
1103   return true;
1104 }
1105 
1106 bool
1107 aarch64_ext_rcpc3_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1108 			       aarch64_opnd_info *info,
1109 			       aarch64_insn code,
1110 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1111 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1112 {
1113   info->qualifier = get_expected_qualifier (inst, info->idx);
1114   if (info->qualifier == AARCH64_OPND_QLF_ERR)
1115     return 0;
1116 
1117   /* Rn */
1118   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1119 
1120   /* simm9 */
1121   aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1122   info->addr.offset.imm = sign_extend (imm, 8);
1123   return true;
1124 }
1125 
1126 /* Decode the address operand for e.g.
1127      stlur <Xt>, [<Xn|SP>{, <amount>}].  */
1128 bool
1129 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1130 			 aarch64_opnd_info *info,
1131 			 aarch64_insn code, const aarch64_inst *inst,
1132 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1133 {
1134   info->qualifier = get_expected_qualifier (inst, info->idx);
1135   if (info->qualifier == AARCH64_OPND_QLF_ERR)
1136     return 0;
1137 
1138   /* Rn */
1139   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1140 
1141   /* simm9 */
1142   aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1143   info->addr.offset.imm = sign_extend (imm, 8);
1144   if (extract_field (self->fields[2], code, 0) == 1) {
1145     info->addr.writeback = 1;
1146     info->addr.preind = 1;
1147   }
1148   return true;
1149 }
1150 
1151 /* Decode the address operand for e.g.
1152      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1153 bool
1154 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1155 			 aarch64_opnd_info *info,
1156 			 aarch64_insn code, const aarch64_inst *inst,
1157 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1158 {
1159   aarch64_insn S, value;
1160 
1161   /* Rn */
1162   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1163   /* Rm */
1164   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1165   /* option */
1166   value = extract_field (FLD_option, code, 0);
1167   info->shifter.kind =
1168     aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1169   /* Fix-up the shifter kind; although the table-driven approach is
1170      efficient, it is slightly inflexible, thus needing this fix-up.  */
1171   if (info->shifter.kind == AARCH64_MOD_UXTX)
1172     info->shifter.kind = AARCH64_MOD_LSL;
1173   /* S */
1174   S = extract_field (FLD_S, code, 0);
1175   if (S == 0)
1176     {
1177       info->shifter.amount = 0;
1178       info->shifter.amount_present = 0;
1179     }
1180   else
1181     {
1182       int size;
1183       /* Need information in other operand(s) to help achieve the decoding
1184 	 from 'S' field.  */
1185       info->qualifier = get_expected_qualifier (inst, info->idx);
1186       if (info->qualifier == AARCH64_OPND_QLF_ERR)
1187 	return 0;
1188       /* Get the size of the data element that is accessed, which may be
1189 	 different from that of the source register size, e.g. in strb/ldrb.  */
1190       size = aarch64_get_qualifier_esize (info->qualifier);
1191       info->shifter.amount = get_logsz (size);
1192       info->shifter.amount_present = 1;
1193     }
1194 
1195   return true;
1196 }
1197 
1198 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>.  */
1199 bool
1200 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1201 		       aarch64_insn code, const aarch64_inst *inst,
1202 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1203 {
1204   aarch64_insn imm;
1205   info->qualifier = get_expected_qualifier (inst, info->idx);
1206   if (info->qualifier == AARCH64_OPND_QLF_ERR)
1207     return 0;
1208 
1209   /* Rn */
1210   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1211   /* simm (imm9 or imm7)  */
1212   imm = extract_field (self->fields[0], code, 0);
1213   info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1214   if (self->fields[0] == FLD_imm7
1215       || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1216     /* scaled immediate in ld/st pair instructions.  */
1217     info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1218   /* qualifier */
1219   if (inst->opcode->iclass == ldst_unscaled
1220       || inst->opcode->iclass == ldstnapair_offs
1221       || inst->opcode->iclass == ldstpair_off
1222       || inst->opcode->iclass == ldst_unpriv)
1223     info->addr.writeback = 0;
1224   else
1225     {
1226       /* pre/post- index */
1227       info->addr.writeback = 1;
1228       if (extract_field (self->fields[1], code, 0) == 1)
1229 	info->addr.preind = 1;
1230       else
1231 	info->addr.postind = 1;
1232     }
1233 
1234   return true;
1235 }
1236 
1237 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}].  */
1238 bool
1239 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1240 			 aarch64_insn code,
1241 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1242 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1243 {
1244   int shift;
1245   info->qualifier = get_expected_qualifier (inst, info->idx);
1246   if (info->qualifier == AARCH64_OPND_QLF_ERR)
1247     return 0;
1248   shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1249   /* Rn */
1250   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1251   /* uimm12 */
1252   info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1253   return true;
1254 }
1255 
1256 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}].  */
1257 bool
1258 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1259 			 aarch64_insn code,
1260 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1261 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1262 {
1263   aarch64_insn imm;
1264 
1265   info->qualifier = get_expected_qualifier (inst, info->idx);
1266   if (info->qualifier == AARCH64_OPND_QLF_ERR)
1267     return 0;
1268   /* Rn */
1269   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1270   /* simm10 */
1271   imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1272   info->addr.offset.imm = sign_extend (imm, 9) << 3;
1273   if (extract_field (self->fields[3], code, 0) == 1) {
1274     info->addr.writeback = 1;
1275     info->addr.preind = 1;
1276   }
1277   return true;
1278 }
1279 
1280 /* Decode the address operand for e.g.
1281      LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
1282 bool
1283 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1284 			    aarch64_opnd_info *info,
1285 			    aarch64_insn code, const aarch64_inst *inst,
1286 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1287 {
1288   /* The opcode dependent area stores the number of elements in
1289      each structure to be loaded/stored.  */
1290   int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1291 
1292   /* Rn */
1293   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1294   /* Rm | #<amount>  */
1295   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1296   if (info->addr.offset.regno == 31)
1297     {
1298       if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1299 	/* Special handling of loading single structure to all lane.  */
1300 	info->addr.offset.imm = (is_ld1r ? 1
1301 				 : inst->operands[0].reglist.num_regs)
1302 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1303       else
1304 	info->addr.offset.imm = inst->operands[0].reglist.num_regs
1305 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1306 	  * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1307     }
1308   else
1309     info->addr.offset.is_reg = 1;
1310   info->addr.writeback = 1;
1311 
1312   return true;
1313 }
1314 
1315 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
1316 bool
1317 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1318 		  aarch64_opnd_info *info,
1319 		  aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1320 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1321 {
1322   aarch64_insn value;
1323   /* cond */
1324   value = extract_field (FLD_cond, code, 0);
1325   info->cond = get_cond_from_value (value);
1326   return true;
1327 }
1328 
1329 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
1330 bool
1331 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1332 		    aarch64_opnd_info *info,
1333 		    aarch64_insn code,
1334 		    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1335 		    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1336 {
1337   /* op0:op1:CRn:CRm:op2 */
1338   info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1339 				       FLD_CRm, FLD_op2);
1340   info->sysreg.flags = 0;
1341 
1342   /* If a system instruction, check which restrictions should be on the register
1343      value during decoding, these will be enforced then.  */
1344   if (inst->opcode->iclass == ic_system)
1345     {
1346       /* Check to see if it's read-only, else check if it's write only.
1347 	 if it's both or unspecified don't care.  */
1348       if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1349 	info->sysreg.flags = F_REG_READ;
1350       else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1351 	       == F_SYS_WRITE)
1352 	info->sysreg.flags = F_REG_WRITE;
1353     }
1354 
1355   return true;
1356 }
1357 
1358 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
1359 bool
1360 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1361 			 aarch64_opnd_info *info, aarch64_insn code,
1362 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1363 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1364 {
1365   int i;
1366   aarch64_insn fld_crm = extract_field (FLD_CRm, code, 0);
1367   /* op1:op2 */
1368   info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1369   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1370     if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1371       {
1372         /* PSTATEFIELD name can be encoded partially in CRm[3:1].  */
1373         uint32_t flags = aarch64_pstatefields[i].flags;
1374         if ((flags & F_REG_IN_CRM)
1375             && ((fld_crm & 0xe) != PSTATE_DECODE_CRM (flags)))
1376           continue;
1377         info->sysreg.flags = flags;
1378         return true;
1379       }
1380   /* Reserved value in <pstatefield>.  */
1381   return false;
1382 }
1383 
1384 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
1385 bool
1386 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1387 		       aarch64_opnd_info *info,
1388 		       aarch64_insn code,
1389 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1390 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1391 {
1392   int i;
1393   aarch64_insn value;
1394   const aarch64_sys_ins_reg *sysins_ops;
1395   /* op0:op1:CRn:CRm:op2 */
1396   value = extract_fields (code, 0, 5,
1397 			  FLD_op0, FLD_op1, FLD_CRn,
1398 			  FLD_CRm, FLD_op2);
1399 
1400   switch (info->type)
1401     {
1402     case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1403     case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1404     case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1405     case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1406     case AARCH64_OPND_SYSREG_TLBIP: sysins_ops = aarch64_sys_regs_tlbi; break;
1407     case AARCH64_OPND_SYSREG_SR:
1408 	sysins_ops = aarch64_sys_regs_sr;
1409 	 /* Let's remove op2 for rctx.  Refer to comments in the definition of
1410 	    aarch64_sys_regs_sr[].  */
1411 	value = value & ~(0x7);
1412 	break;
1413     default: return false;
1414     }
1415 
1416   for (i = 0; sysins_ops[i].name != NULL; ++i)
1417     if (sysins_ops[i].value == value)
1418       {
1419 	info->sysins_op = sysins_ops + i;
1420 	DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1421 		     info->sysins_op->name,
1422 		     (unsigned)info->sysins_op->value,
1423 		     aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1424 	return true;
1425       }
1426 
1427   return false;
1428 }
1429 
1430 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
1431 
1432 bool
1433 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1434 		     aarch64_opnd_info *info,
1435 		     aarch64_insn code,
1436 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1437 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1438 {
1439   /* CRm */
1440   info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1441   return true;
1442 }
1443 
1444 /* Decode the memory barrier option operand for DSB <option>nXS|#<imm>.  */
1445 
1446 bool
1447 aarch64_ext_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
1448 		     aarch64_opnd_info *info,
1449 		     aarch64_insn code,
1450 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1451 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1452 {
1453   /* For the DSB nXS barrier variant immediate is encoded in 2-bit field.  */
1454   aarch64_insn field = extract_field (FLD_CRm_dsb_nxs, code, 0);
1455   info->barrier = aarch64_barrier_dsb_nxs_options + field;
1456   return true;
1457 }
1458 
1459 /* Decode the prefetch operation option operand for e.g.
1460      PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
1461 
1462 bool
1463 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1464 		   aarch64_opnd_info *info,
1465 		   aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1466 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1467 {
1468   /* prfop in Rt */
1469   info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1470   return true;
1471 }
1472 
1473 /* Decode the hint number for an alias taking an operand.  Set info->hint_option
1474    to the matching name/value pair in aarch64_hint_options.  */
1475 
1476 bool
1477 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1478 		  aarch64_opnd_info *info,
1479 		  aarch64_insn code,
1480 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1481 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1482 {
1483   /* CRm:op2.  */
1484   unsigned hint_number;
1485   int i;
1486 
1487   hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1488 
1489   for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1490     {
1491       if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1492 	{
1493 	  info->hint_option = &(aarch64_hint_options[i]);
1494 	  return true;
1495 	}
1496     }
1497 
1498   return false;
1499 }
1500 
1501 /* Decode the extended register operand for e.g.
1502      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1503 bool
1504 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1505 			  aarch64_opnd_info *info,
1506 			  aarch64_insn code,
1507 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1508 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1509 {
1510   aarch64_insn value;
1511 
1512   /* Rm */
1513   info->reg.regno = extract_field (FLD_Rm, code, 0);
1514   /* option */
1515   value = extract_field (FLD_option, code, 0);
1516   info->shifter.kind =
1517     aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1518   /* imm3 */
1519   info->shifter.amount = extract_field (FLD_imm3_10, code,  0);
1520 
1521   /* This makes the constraint checking happy.  */
1522   info->shifter.operator_present = 1;
1523 
1524   /* Assume inst->operands[0].qualifier has been resolved.  */
1525   assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1526   info->qualifier = AARCH64_OPND_QLF_W;
1527   if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1528       && (info->shifter.kind == AARCH64_MOD_UXTX
1529 	  || info->shifter.kind == AARCH64_MOD_SXTX))
1530     info->qualifier = AARCH64_OPND_QLF_X;
1531 
1532   return true;
1533 }
1534 
1535 /* Decode the shifted register operand for e.g.
1536      SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
1537 bool
1538 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1539 			 aarch64_opnd_info *info,
1540 			 aarch64_insn code,
1541 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1542 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1543 {
1544   aarch64_insn value;
1545 
1546   /* Rm */
1547   info->reg.regno = extract_field (FLD_Rm, code, 0);
1548   /* shift */
1549   value = extract_field (FLD_shift, code, 0);
1550   info->shifter.kind =
1551     aarch64_get_operand_modifier_from_value (value, false /* extend_p */);
1552   if (info->shifter.kind == AARCH64_MOD_ROR
1553       && inst->opcode->iclass != log_shift)
1554     /* ROR is not available for the shifted register operand in arithmetic
1555        instructions.  */
1556     return false;
1557   /* imm6 */
1558   info->shifter.amount = extract_field (FLD_imm6_10, code,  0);
1559 
1560   /* This makes the constraint checking happy.  */
1561   info->shifter.operator_present = 1;
1562 
1563   return true;
1564 }
1565 
1566 /* Decode the LSL-shifted register operand for e.g.
1567      ADDPT <Xd|SP>, <Xn|SP>, <Xm>{, LSL #<amount>}.  */
1568 bool
1569 aarch64_ext_reg_lsl_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1570 			     aarch64_opnd_info *info,
1571 			     aarch64_insn code,
1572 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1573 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1574 {
1575   /* Rm */
1576   info->reg.regno = extract_field (FLD_Rm, code, 0);
1577   /* imm3 */
1578   info->shifter.kind = AARCH64_MOD_LSL;
1579   info->shifter.amount = extract_field (FLD_imm3_10, code,  0);
1580   return true;
1581 }
1582 
1583 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1584    where <offset> is given by the OFFSET parameter and where <factor> is
1585    1 plus SELF's operand-dependent value.  fields[0] specifies the field
1586    that holds <base>.  */
1587 static bool
1588 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1589 				 aarch64_opnd_info *info, aarch64_insn code,
1590 				 int64_t offset)
1591 {
1592   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1593   info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1594   info->addr.offset.is_reg = false;
1595   info->addr.writeback = false;
1596   info->addr.preind = true;
1597   if (offset != 0)
1598     info->shifter.kind = AARCH64_MOD_MUL_VL;
1599   info->shifter.amount = 1;
1600   info->shifter.operator_present = (info->addr.offset.imm != 0);
1601   info->shifter.amount_present = false;
1602   return true;
1603 }
1604 
1605 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1606    where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1607    SELF's operand-dependent value.  fields[0] specifies the field that
1608    holds <base>.  <simm4> is encoded in the SVE_imm4 field.  */
1609 bool
1610 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1611 			       aarch64_opnd_info *info, aarch64_insn code,
1612 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1613 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1614 {
1615   int offset;
1616 
1617   offset = extract_field (FLD_SVE_imm4, code, 0);
1618   offset = ((offset + 8) & 15) - 8;
1619   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1620 }
1621 
1622 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1623    where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1624    SELF's operand-dependent value.  fields[0] specifies the field that
1625    holds <base>.  <simm6> is encoded in the SVE_imm6 field.  */
1626 bool
1627 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1628 			       aarch64_opnd_info *info, aarch64_insn code,
1629 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1630 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1631 {
1632   int offset;
1633 
1634   offset = extract_field (FLD_SVE_imm6, code, 0);
1635   offset = (((offset + 32) & 63) - 32);
1636   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1637 }
1638 
1639 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1640    where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1641    SELF's operand-dependent value.  fields[0] specifies the field that
1642    holds <base>.  <simm9> is encoded in the concatenation of the SVE_imm6
1643    and imm3 fields, with imm3 being the less-significant part.  */
1644 bool
1645 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1646 			       aarch64_opnd_info *info,
1647 			       aarch64_insn code,
1648 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1649 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1650 {
1651   int offset;
1652 
1653   offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3_10);
1654   offset = (((offset + 256) & 511) - 256);
1655   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1656 }
1657 
1658 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1659    is given by the OFFSET parameter and where <shift> is SELF's operand-
1660    dependent value.  fields[0] specifies the base register field <base>.  */
1661 static bool
1662 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1663 			      aarch64_opnd_info *info, aarch64_insn code,
1664 			      int64_t offset)
1665 {
1666   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1667   info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1668   info->addr.offset.is_reg = false;
1669   info->addr.writeback = false;
1670   info->addr.preind = true;
1671   info->shifter.operator_present = false;
1672   info->shifter.amount_present = false;
1673   return true;
1674 }
1675 
1676 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1677    is a 4-bit signed number and where <shift> is SELF's operand-dependent
1678    value.  fields[0] specifies the base register field.  */
1679 bool
1680 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1681 			    aarch64_opnd_info *info, aarch64_insn code,
1682 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1683 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1684 {
1685   int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1686   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1687 }
1688 
1689 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1690    is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1691    value.  fields[0] specifies the base register field.  */
1692 bool
1693 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1694 			    aarch64_opnd_info *info, aarch64_insn code,
1695 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1696 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1697 {
1698   int offset = extract_field (FLD_SVE_imm6, code, 0);
1699   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1700 }
1701 
1702 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1703    is SELF's operand-dependent value.  fields[0] specifies the base
1704    register field and fields[1] specifies the offset register field.  */
1705 bool
1706 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1707 			     aarch64_opnd_info *info, aarch64_insn code,
1708 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1709 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1710 {
1711   int index_regno;
1712 
1713   index_regno = extract_field (self->fields[1], code, 0);
1714   if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1715     return false;
1716 
1717   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1718   info->addr.offset.regno = index_regno;
1719   info->addr.offset.is_reg = true;
1720   info->addr.writeback = false;
1721   info->addr.preind = true;
1722   info->shifter.kind = AARCH64_MOD_LSL;
1723   info->shifter.amount = get_operand_specific_data (self);
1724   info->shifter.operator_present = (info->shifter.amount != 0);
1725   info->shifter.amount_present = (info->shifter.amount != 0);
1726   return true;
1727 }
1728 
1729 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1730    <shift> is SELF's operand-dependent value.  fields[0] specifies the
1731    base register field, fields[1] specifies the offset register field and
1732    fields[2] is a single-bit field that selects SXTW over UXTW.  */
1733 bool
1734 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1735 			     aarch64_opnd_info *info, aarch64_insn code,
1736 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1737 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1738 {
1739   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1740   info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1741   info->addr.offset.is_reg = true;
1742   info->addr.writeback = false;
1743   info->addr.preind = true;
1744   if (extract_field (self->fields[2], code, 0))
1745     info->shifter.kind = AARCH64_MOD_SXTW;
1746   else
1747     info->shifter.kind = AARCH64_MOD_UXTW;
1748   info->shifter.amount = get_operand_specific_data (self);
1749   info->shifter.operator_present = true;
1750   info->shifter.amount_present = (info->shifter.amount != 0);
1751   return true;
1752 }
1753 
1754 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1755    5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1756    fields[0] specifies the base register field.  */
1757 bool
1758 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1759 			    aarch64_opnd_info *info, aarch64_insn code,
1760 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1761 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1762 {
1763   int offset = extract_field (FLD_imm5, code, 0);
1764   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1765 }
1766 
1767 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1768    where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1769    number.  fields[0] specifies the base register field and fields[1]
1770    specifies the offset register field.  */
1771 static bool
1772 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1773 			 aarch64_insn code, enum aarch64_modifier_kind kind)
1774 {
1775   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1776   info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1777   info->addr.offset.is_reg = true;
1778   info->addr.writeback = false;
1779   info->addr.preind = true;
1780   info->shifter.kind = kind;
1781   info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1782   info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1783 				    || info->shifter.amount != 0);
1784   info->shifter.amount_present = (info->shifter.amount != 0);
1785   return true;
1786 }
1787 
1788 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1789    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1790    field and fields[1] specifies the offset register field.  */
1791 bool
1792 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1793 			     aarch64_opnd_info *info, aarch64_insn code,
1794 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1795 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1796 {
1797   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1798 }
1799 
1800 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1801    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1802    field and fields[1] specifies the offset register field.  */
1803 bool
1804 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1805 			      aarch64_opnd_info *info, aarch64_insn code,
1806 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1807 			      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1808 {
1809   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1810 }
1811 
1812 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1813    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1814    field and fields[1] specifies the offset register field.  */
1815 bool
1816 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1817 			      aarch64_opnd_info *info, aarch64_insn code,
1818 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1819 			      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1820 {
1821   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1822 }
1823 
1824 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1825    has the raw field value and that the low 8 bits decode to VALUE.  */
1826 static bool
1827 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1828 {
1829   info->shifter.kind = AARCH64_MOD_LSL;
1830   info->shifter.amount = 0;
1831   if (info->imm.value & 0x100)
1832     {
1833       if (value == 0)
1834 	/* Decode 0x100 as #0, LSL #8.  */
1835 	info->shifter.amount = 8;
1836       else
1837 	value *= 256;
1838     }
1839   info->shifter.operator_present = (info->shifter.amount != 0);
1840   info->shifter.amount_present = (info->shifter.amount != 0);
1841   info->imm.value = value;
1842   return true;
1843 }
1844 
1845 /* Decode an SVE ADD/SUB immediate.  */
1846 bool
1847 aarch64_ext_sve_aimm (const aarch64_operand *self,
1848 		      aarch64_opnd_info *info, const aarch64_insn code,
1849 		      const aarch64_inst *inst,
1850 		      aarch64_operand_error *errors)
1851 {
1852   return (aarch64_ext_imm (self, info, code, inst, errors)
1853 	  && decode_sve_aimm (info, (uint8_t) info->imm.value));
1854 }
1855 
1856 bool
1857 aarch64_ext_sve_aligned_reglist (const aarch64_operand *self,
1858 				 aarch64_opnd_info *info, aarch64_insn code,
1859 				 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1860 				 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1861 {
1862   unsigned int num_regs = get_operand_specific_data (self);
1863   unsigned int val = extract_field (self->fields[0], code, 0);
1864   info->reglist.first_regno = val * num_regs;
1865   info->reglist.num_regs = num_regs;
1866   info->reglist.stride = 1;
1867   return true;
1868 }
1869 
1870 /* Decode an SVE CPY/DUP immediate.  */
1871 bool
1872 aarch64_ext_sve_asimm (const aarch64_operand *self,
1873 		       aarch64_opnd_info *info, const aarch64_insn code,
1874 		       const aarch64_inst *inst,
1875 		       aarch64_operand_error *errors)
1876 {
1877   return (aarch64_ext_imm (self, info, code, inst, errors)
1878 	  && decode_sve_aimm (info, (int8_t) info->imm.value));
1879 }
1880 
1881 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1882    The fields array specifies which field to use.  */
1883 bool
1884 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1885 				aarch64_opnd_info *info, aarch64_insn code,
1886 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1887 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1888 {
1889   if (extract_field (self->fields[0], code, 0))
1890     info->imm.value = 0x3f800000;
1891   else
1892     info->imm.value = 0x3f000000;
1893   info->imm.is_fp = true;
1894   return true;
1895 }
1896 
1897 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1898    The fields array specifies which field to use.  */
1899 bool
1900 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1901 				aarch64_opnd_info *info, aarch64_insn code,
1902 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1903 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1904 {
1905   if (extract_field (self->fields[0], code, 0))
1906     info->imm.value = 0x40000000;
1907   else
1908     info->imm.value = 0x3f000000;
1909   info->imm.is_fp = true;
1910   return true;
1911 }
1912 
1913 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1914    The fields array specifies which field to use.  */
1915 bool
1916 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1917 				aarch64_opnd_info *info, aarch64_insn code,
1918 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1919 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1920 {
1921   if (extract_field (self->fields[0], code, 0))
1922     info->imm.value = 0x3f800000;
1923   else
1924     info->imm.value = 0x0;
1925   info->imm.is_fp = true;
1926   return true;
1927 }
1928 
1929 /* Decode ZA tile vector, vector indicator, vector selector, qualifier and
1930    immediate on numerous SME instruction fields such as MOVA.  */
1931 bool
1932 aarch64_ext_sme_za_hv_tiles (const aarch64_operand *self,
1933                              aarch64_opnd_info *info, aarch64_insn code,
1934                              const aarch64_inst *inst ATTRIBUTE_UNUSED,
1935                              aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1936 {
1937   int fld_size = extract_field (self->fields[0], code, 0);
1938   int fld_q = extract_field (self->fields[1], code, 0);
1939   int fld_v = extract_field (self->fields[2], code, 0);
1940   int fld_rv = extract_field (self->fields[3], code, 0);
1941   int fld_zan_imm = extract_field (self->fields[4], code, 0);
1942 
1943   /* Deduce qualifier encoded in size and Q fields.  */
1944   if (fld_size == 0)
1945     {
1946       info->indexed_za.regno = 0;
1947       info->indexed_za.index.imm = fld_zan_imm;
1948     }
1949   else if (fld_size == 1)
1950     {
1951       info->indexed_za.regno = fld_zan_imm >> 3;
1952       info->indexed_za.index.imm = fld_zan_imm & 0x07;
1953     }
1954   else if (fld_size == 2)
1955     {
1956       info->indexed_za.regno = fld_zan_imm >> 2;
1957       info->indexed_za.index.imm = fld_zan_imm & 0x03;
1958     }
1959   else if (fld_size == 3 && fld_q == 0)
1960     {
1961       info->indexed_za.regno = fld_zan_imm >> 1;
1962       info->indexed_za.index.imm = fld_zan_imm & 0x01;
1963     }
1964   else if (fld_size == 3 && fld_q == 1)
1965     {
1966       info->indexed_za.regno = fld_zan_imm;
1967       info->indexed_za.index.imm = 0;
1968     }
1969   else
1970     return false;
1971 
1972   info->indexed_za.index.regno = fld_rv + 12;
1973   info->indexed_za.v = fld_v;
1974 
1975   return true;
1976 }
1977 
1978 bool
1979 aarch64_ext_sme_za_hv_tiles_range (const aarch64_operand *self,
1980 				   aarch64_opnd_info *info, aarch64_insn code,
1981 				   const aarch64_inst *inst ATTRIBUTE_UNUSED,
1982 				   aarch64_operand_error *errors
1983 				     ATTRIBUTE_UNUSED)
1984 {
1985   int ebytes = aarch64_get_qualifier_esize (info->qualifier);
1986   int range_size = get_opcode_dependent_value (inst->opcode);
1987   int fld_v = extract_field (self->fields[0], code, 0);
1988   int fld_rv = extract_field (self->fields[1], code, 0);
1989   int fld_zan_imm = extract_field (self->fields[2], code, 0);
1990   int max_value = 16 / range_size / ebytes;
1991 
1992   if (max_value == 0)
1993     max_value = 1;
1994 
1995   int regno = fld_zan_imm / max_value;
1996   if (regno >= ebytes)
1997     return false;
1998 
1999   info->indexed_za.regno = regno;
2000   info->indexed_za.index.imm = (fld_zan_imm % max_value) * range_size;
2001   info->indexed_za.index.countm1 = range_size - 1;
2002   info->indexed_za.index.regno = fld_rv + 12;
2003   info->indexed_za.v = fld_v;
2004 
2005   return true;
2006 }
2007 
2008 /* Decode in SME instruction ZERO list of up to eight 64-bit element tile names
2009    separated by commas, encoded in the "imm8" field.
2010 
2011    For programmer convenience an assembler must also accept the names of
2012    32-bit, 16-bit and 8-bit element tiles which are converted into the
2013    corresponding set of 64-bit element tiles.
2014 */
2015 bool
2016 aarch64_ext_sme_za_list (const aarch64_operand *self,
2017                          aarch64_opnd_info *info, aarch64_insn code,
2018                          const aarch64_inst *inst ATTRIBUTE_UNUSED,
2019                          aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2020 {
2021   int mask = extract_field (self->fields[0], code, 0);
2022   info->imm.value = mask;
2023   return true;
2024 }
2025 
2026 /* Decode ZA array vector select register (Rv field), optional vector and
2027    memory offset (imm4_11 field).
2028 */
2029 bool
2030 aarch64_ext_sme_za_array (const aarch64_operand *self,
2031                           aarch64_opnd_info *info, aarch64_insn code,
2032                           const aarch64_inst *inst,
2033                           aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2034 {
2035   int regno = extract_field (self->fields[0], code, 0);
2036   if (info->type == AARCH64_OPND_SME_ZA_array_off4)
2037     regno += 12;
2038   else
2039     regno += 8;
2040   int imm = extract_field (self->fields[1], code, 0);
2041   int num_offsets = get_operand_specific_data (self);
2042   if (num_offsets == 0)
2043     num_offsets = 1;
2044   info->indexed_za.index.regno = regno;
2045   info->indexed_za.index.imm = imm * num_offsets;
2046   info->indexed_za.index.countm1 = num_offsets - 1;
2047   info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2048   return true;
2049 }
2050 
2051 /* Decode two ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds.  */
2052 bool
2053 aarch64_ext_sme_za_vrs1 (const aarch64_operand *self,
2054 			  aarch64_opnd_info *info, aarch64_insn code,
2055 			  const aarch64_inst *inst,
2056 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2057 {
2058   int v = extract_field (self->fields[0], code, 0);
2059   int regno = 12 + extract_field (self->fields[1], code, 0);
2060   int imm, za_reg, num_offset = 2;
2061 
2062   switch (info->qualifier)
2063     {
2064     case AARCH64_OPND_QLF_S_B:
2065       imm = extract_field (self->fields[2], code, 0);
2066       info->indexed_za.index.imm = imm * num_offset;
2067       break;
2068     case AARCH64_OPND_QLF_S_H:
2069     case AARCH64_OPND_QLF_S_S:
2070       za_reg = extract_field (self->fields[2], code, 0);
2071       imm = extract_field (self->fields[3], code, 0);
2072       info->indexed_za.index.imm = imm * num_offset;
2073       info->indexed_za.regno = za_reg;
2074       break;
2075     case AARCH64_OPND_QLF_S_D:
2076       za_reg = extract_field (self->fields[2], code, 0);
2077       info->indexed_za.regno = za_reg;
2078       break;
2079     default:
2080       return false;
2081     }
2082 
2083   info->indexed_za.index.regno = regno;
2084   info->indexed_za.index.countm1 = num_offset - 1;
2085   info->indexed_za.v = v;
2086   info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2087   return true;
2088 }
2089 
2090 /* Decode four ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds.  */
2091 bool
2092 aarch64_ext_sme_za_vrs2 (const aarch64_operand *self,
2093 			  aarch64_opnd_info *info, aarch64_insn code,
2094 			  const aarch64_inst *inst,
2095 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2096 {
2097   int v = extract_field (self->fields[0], code, 0);
2098   int regno = 12 + extract_field (self->fields[1], code, 0);
2099   int imm, za_reg, num_offset =4;
2100 
2101   switch (info->qualifier)
2102     {
2103     case AARCH64_OPND_QLF_S_B:
2104       imm = extract_field (self->fields[2], code, 0);
2105       info->indexed_za.index.imm = imm * num_offset;
2106       break;
2107     case AARCH64_OPND_QLF_S_H:
2108       za_reg = extract_field (self->fields[2], code, 0);
2109       imm = extract_field (self->fields[3], code, 0);
2110       info->indexed_za.index.imm = imm * num_offset;
2111       info->indexed_za.regno = za_reg;
2112       break;
2113     case AARCH64_OPND_QLF_S_S:
2114     case AARCH64_OPND_QLF_S_D:
2115       za_reg = extract_field (self->fields[2], code, 0);
2116       info->indexed_za.regno = za_reg;
2117       break;
2118     default:
2119       return false;
2120     }
2121 
2122   info->indexed_za.index.regno = regno;
2123   info->indexed_za.index.countm1 = num_offset - 1;
2124   info->indexed_za.v = v;
2125   info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2126   return true;
2127 }
2128 
2129 bool
2130 aarch64_ext_sme_addr_ri_u4xvl (const aarch64_operand *self,
2131                                aarch64_opnd_info *info, aarch64_insn code,
2132                                const aarch64_inst *inst ATTRIBUTE_UNUSED,
2133                                aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2134 {
2135   int regno = extract_field (self->fields[0], code, 0);
2136   int imm = extract_field (self->fields[1], code, 0);
2137   info->addr.base_regno = regno;
2138   info->addr.offset.imm = imm;
2139   /* MUL VL operator is always present for this operand.  */
2140   info->shifter.kind = AARCH64_MOD_MUL_VL;
2141   info->shifter.operator_present = (imm != 0);
2142   return true;
2143 }
2144 
2145 /* Decode {SM|ZA} filed for SMSTART and SMSTOP instructions.  */
2146 bool
2147 aarch64_ext_sme_sm_za (const aarch64_operand *self,
2148                        aarch64_opnd_info *info, aarch64_insn code,
2149                        const aarch64_inst *inst ATTRIBUTE_UNUSED,
2150                        aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2151 {
2152   info->pstatefield = 0x1b;
2153   aarch64_insn fld_crm = extract_field (self->fields[0], code, 0);
2154   fld_crm >>= 1;    /* CRm[3:1].  */
2155 
2156   if (fld_crm == 0x1)
2157     info->reg.regno = 's';
2158   else if (fld_crm == 0x2)
2159     info->reg.regno = 'z';
2160   else
2161     return false;
2162 
2163   return true;
2164 }
2165 
2166 bool
2167 aarch64_ext_sme_pred_reg_with_index (const aarch64_operand *self,
2168 				     aarch64_opnd_info *info, aarch64_insn code,
2169 				     const aarch64_inst *inst ATTRIBUTE_UNUSED,
2170 				     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2171 {
2172   aarch64_insn fld_rm = extract_field (self->fields[0], code, 0);
2173   aarch64_insn fld_pn = extract_field (self->fields[1], code, 0);
2174   aarch64_insn fld_i1 = extract_field (self->fields[2], code, 0);
2175   aarch64_insn fld_tszh = extract_field (self->fields[3], code, 0);
2176   aarch64_insn fld_tszl = extract_field (self->fields[4], code, 0);
2177   int imm;
2178 
2179   info->indexed_za.regno = fld_pn;
2180   info->indexed_za.index.regno = fld_rm + 12;
2181 
2182   if (fld_tszl & 0x1)
2183     imm = (fld_i1 << 3) | (fld_tszh << 2) | (fld_tszl >> 1);
2184   else if (fld_tszl & 0x2)
2185     imm = (fld_i1 << 2) | (fld_tszh << 1) | (fld_tszl >> 2);
2186   else if (fld_tszl & 0x4)
2187     imm = (fld_i1 << 1) | fld_tszh;
2188   else if (fld_tszh)
2189     imm = fld_i1;
2190   else
2191     return false;
2192 
2193   info->indexed_za.index.imm = imm;
2194   return true;
2195 }
2196 
2197 /* Decode Zn[MM], where MM has a 7-bit triangular encoding.  The fields
2198    array specifies which field to use for Zn.  MM is encoded in the
2199    concatenation of imm5 and SVE_tszh, with imm5 being the less
2200    significant part.  */
2201 bool
2202 aarch64_ext_sve_index (const aarch64_operand *self,
2203 		       aarch64_opnd_info *info, aarch64_insn code,
2204 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
2205 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2206 {
2207   int val;
2208 
2209   info->reglane.regno = extract_field (self->fields[0], code, 0);
2210   val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
2211   if ((val & 31) == 0)
2212     return 0;
2213   while ((val & 1) == 0)
2214     val /= 2;
2215   info->reglane.index = val / 2;
2216   return true;
2217 }
2218 
2219 /* Decode Zn.<T>[<imm>], where <imm> is an immediate with range of 0 to one less
2220    than the number of elements in 128 bit, which can encode il:tsz.  */
2221 bool
2222 aarch64_ext_sve_index_imm (const aarch64_operand *self,
2223 			   aarch64_opnd_info *info, aarch64_insn code,
2224 			   const aarch64_inst *inst ATTRIBUTE_UNUSED,
2225 			   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2226 {
2227   int val;
2228 
2229   info->reglane.regno = extract_field (self->fields[0], code, 0);
2230   val = extract_fields (code, 0, 2, self->fields[2], self->fields[1]);
2231   if ((val & 15) == 0)
2232     return 0;
2233   while ((val & 1) == 0)
2234     val /= 2;
2235   info->reglane.index = val / 2;
2236   return true;
2237 }
2238 
2239 /* Decode a logical immediate for the MOV alias of SVE DUPM.  */
2240 bool
2241 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
2242 			  aarch64_opnd_info *info, const aarch64_insn code,
2243 			  const aarch64_inst *inst,
2244 			  aarch64_operand_error *errors)
2245 {
2246   int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
2247   return (aarch64_ext_limm (self, info, code, inst, errors)
2248 	  && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
2249 }
2250 
2251 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
2252    and where MM occupies the most-significant part.  The operand-dependent
2253    value specifies the number of bits in Zn.  */
2254 bool
2255 aarch64_ext_sve_quad_index (const aarch64_operand *self,
2256 			    aarch64_opnd_info *info, aarch64_insn code,
2257 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
2258 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2259 {
2260   unsigned int reg_bits = get_operand_specific_data (self);
2261   unsigned int val = extract_all_fields (self, code);
2262   info->reglane.regno = val & ((1 << reg_bits) - 1);
2263   info->reglane.index = val >> reg_bits;
2264   return true;
2265 }
2266 
2267 /* Decode {Zn.<T> - Zm.<T>}.  The fields array specifies which field
2268    to use for Zn.  The opcode-dependent value specifies the number
2269    of registers in the list.  */
2270 bool
2271 aarch64_ext_sve_reglist (const aarch64_operand *self,
2272 			 aarch64_opnd_info *info, aarch64_insn code,
2273 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2274 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2275 {
2276   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2277   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
2278   info->reglist.stride = 1;
2279   return true;
2280 }
2281 
2282 /* Decode {Zn.<T> , Zm.<T>}.  The fields array specifies which field
2283    to use for Zn.  The opcode-dependent value specifies the number
2284    of registers in the list.  */
2285 bool
2286 aarch64_ext_sve_reglist_zt (const aarch64_operand *self,
2287 			    aarch64_opnd_info *info, aarch64_insn code,
2288 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
2289 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2290 {
2291   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2292   info->reglist.num_regs = get_operand_specific_data (self);
2293   info->reglist.stride = 1;
2294   return true;
2295 }
2296 
2297 /* Decode a strided register list.  The first field holds the top bit
2298    (0 or 16) and the second field holds the lower bits.  The stride is
2299    16 divided by the list length.  */
2300 bool
2301 aarch64_ext_sve_strided_reglist (const aarch64_operand *self,
2302 				 aarch64_opnd_info *info, aarch64_insn code,
2303 				 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2304 				 aarch64_operand_error *errors
2305 				   ATTRIBUTE_UNUSED)
2306 {
2307   unsigned int upper = extract_field (self->fields[0], code, 0);
2308   unsigned int lower = extract_field (self->fields[1], code, 0);
2309   info->reglist.first_regno = upper * 16 + lower;
2310   info->reglist.num_regs = get_operand_specific_data (self);
2311   info->reglist.stride = 16 / info->reglist.num_regs;
2312   return true;
2313 }
2314 
2315 /* Decode <pattern>{, MUL #<amount>}.  The fields array specifies which
2316    fields to use for <pattern>.  <amount> - 1 is encoded in the SVE_imm4
2317    field.  */
2318 bool
2319 aarch64_ext_sve_scale (const aarch64_operand *self,
2320 		       aarch64_opnd_info *info, aarch64_insn code,
2321 		       const aarch64_inst *inst, aarch64_operand_error *errors)
2322 {
2323   int val;
2324 
2325   if (!aarch64_ext_imm (self, info, code, inst, errors))
2326     return false;
2327   val = extract_field (FLD_SVE_imm4, code, 0);
2328   info->shifter.kind = AARCH64_MOD_MUL;
2329   info->shifter.amount = val + 1;
2330   info->shifter.operator_present = (val != 0);
2331   info->shifter.amount_present = (val != 0);
2332   return true;
2333 }
2334 
2335 /* Return the top set bit in VALUE, which is expected to be relatively
2336    small.  */
2337 static uint64_t
2338 get_top_bit (uint64_t value)
2339 {
2340   while ((value & -value) != value)
2341     value -= value & -value;
2342   return value;
2343 }
2344 
2345 /* Decode an SVE shift-left immediate.  */
2346 bool
2347 aarch64_ext_sve_shlimm (const aarch64_operand *self,
2348 			aarch64_opnd_info *info, const aarch64_insn code,
2349 			const aarch64_inst *inst, aarch64_operand_error *errors)
2350 {
2351   if (!aarch64_ext_imm (self, info, code, inst, errors)
2352       || info->imm.value == 0)
2353     return false;
2354 
2355   info->imm.value -= get_top_bit (info->imm.value);
2356   return true;
2357 }
2358 
2359 /* Decode an SVE shift-right immediate.  */
2360 bool
2361 aarch64_ext_sve_shrimm (const aarch64_operand *self,
2362 			aarch64_opnd_info *info, const aarch64_insn code,
2363 			const aarch64_inst *inst, aarch64_operand_error *errors)
2364 {
2365   if (!aarch64_ext_imm (self, info, code, inst, errors)
2366       || info->imm.value == 0)
2367     return false;
2368 
2369   info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
2370   return true;
2371 }
2372 
2373 /* Decode X0-X30.  Register 31 is unallocated.  */
2374 bool
2375 aarch64_ext_x0_to_x30 (const aarch64_operand *self, aarch64_opnd_info *info,
2376 		       const aarch64_insn code,
2377 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
2378 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2379 {
2380   info->reg.regno = extract_field (self->fields[0], code, 0);
2381   return info->reg.regno <= 30;
2382 }
2383 
2384 /* Decode an indexed register, with the first field being the register
2385    number and the remaining fields being the index.  */
2386 bool
2387 aarch64_ext_simple_index (const aarch64_operand *self, aarch64_opnd_info *info,
2388 			  const aarch64_insn code,
2389 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
2390 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2391 {
2392   int bias = get_operand_specific_data (self);
2393   info->reglane.regno = extract_field (self->fields[0], code, 0) + bias;
2394   info->reglane.index = extract_all_fields_after (self, 1, code);
2395   return true;
2396 }
2397 
2398 /* Decode a plain shift-right immediate, when there is only a single
2399    element size.  */
2400 bool
2401 aarch64_ext_plain_shrimm (const aarch64_operand *self, aarch64_opnd_info *info,
2402 			  const aarch64_insn code,
2403 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
2404 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2405 {
2406   unsigned int base = 1 << get_operand_field_width (self, 0);
2407   info->imm.value = base - extract_field (self->fields[0], code, 0);
2408   return true;
2409 }
2410 
2411 /* Bitfields that are commonly used to encode certain operands' information
2412    may be partially used as part of the base opcode in some instructions.
2413    For example, the bit 1 of the field 'size' in
2414      FCVTXN <Vb><d>, <Va><n>
2415    is actually part of the base opcode, while only size<0> is available
2416    for encoding the register type.  Another example is the AdvSIMD
2417    instruction ORR (register), in which the field 'size' is also used for
2418    the base opcode, leaving only the field 'Q' available to encode the
2419    vector register arrangement specifier '8B' or '16B'.
2420 
2421    This function tries to deduce the qualifier from the value of partially
2422    constrained field(s).  Given the VALUE of such a field or fields, the
2423    qualifiers CANDIDATES and the MASK (indicating which bits are valid for
2424    operand encoding), the function returns the matching qualifier or
2425    AARCH64_OPND_QLF_NIL if nothing matches.
2426 
2427    N.B. CANDIDATES is a group of possible qualifiers that are valid for
2428    one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
2429    may end with AARCH64_OPND_QLF_NIL.  */
2430 
2431 static enum aarch64_opnd_qualifier
2432 get_qualifier_from_partial_encoding (aarch64_insn value,
2433 				     const enum aarch64_opnd_qualifier* \
2434 				     candidates,
2435 				     aarch64_insn mask)
2436 {
2437   int i;
2438   DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
2439   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2440     {
2441       aarch64_insn standard_value;
2442       if (candidates[i] == AARCH64_OPND_QLF_NIL)
2443 	break;
2444       standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
2445       if ((standard_value & mask) == (value & mask))
2446 	return candidates[i];
2447     }
2448   return AARCH64_OPND_QLF_NIL;
2449 }
2450 
2451 /* Given a list of qualifier sequences, return all possible valid qualifiers
2452    for operand IDX in QUALIFIERS.
2453    Assume QUALIFIERS is an array whose length is large enough.  */
2454 
2455 static void
2456 get_operand_possible_qualifiers (int idx,
2457 				 const aarch64_opnd_qualifier_seq_t *list,
2458 				 enum aarch64_opnd_qualifier *qualifiers)
2459 {
2460   int i;
2461   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2462     if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
2463       break;
2464 }
2465 
2466 /* Decode the size Q field for e.g. SHADD.
2467    We tag one operand with the qualifer according to the code;
2468    whether the qualifier is valid for this opcode or not, it is the
2469    duty of the semantic checking.  */
2470 
2471 static int
2472 decode_sizeq (aarch64_inst *inst)
2473 {
2474   int idx;
2475   enum aarch64_opnd_qualifier qualifier;
2476   aarch64_insn code;
2477   aarch64_insn value, mask;
2478   enum aarch64_field_kind fld_sz;
2479   enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2480 
2481   if (inst->opcode->iclass == asisdlse
2482      || inst->opcode->iclass == asisdlsep
2483      || inst->opcode->iclass == asisdlso
2484      || inst->opcode->iclass == asisdlsop)
2485     fld_sz = FLD_vldst_size;
2486   else
2487     fld_sz = FLD_size;
2488 
2489   code = inst->value;
2490   value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
2491   /* Obtain the info that which bits of fields Q and size are actually
2492      available for operand encoding.  Opcodes like FMAXNM and FMLA have
2493      size[1] unavailable.  */
2494   mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
2495 
2496   /* The index of the operand we are going to tag a qualifier and the qualifer
2497      itself are reasoned from the value of the size and Q fields and the
2498      possible valid qualifier lists.  */
2499   idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
2500   DEBUG_TRACE ("key idx: %d", idx);
2501 
2502   /* For most related instruciton, size:Q are fully available for operand
2503      encoding.  */
2504   if (mask == 0x7)
2505     {
2506       inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
2507       if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2508 	return 0;
2509       return 1;
2510     }
2511 
2512   get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2513 				   candidates);
2514 #ifdef DEBUG_AARCH64
2515   if (debug_dump)
2516     {
2517       int i;
2518       for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
2519 	   && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2520 	DEBUG_TRACE ("qualifier %d: %s", i,
2521 		     aarch64_get_qualifier_name(candidates[i]));
2522       DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
2523     }
2524 #endif /* DEBUG_AARCH64 */
2525 
2526   qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
2527 
2528   if (qualifier == AARCH64_OPND_QLF_NIL)
2529     return 0;
2530 
2531   inst->operands[idx].qualifier = qualifier;
2532   return 1;
2533 }
2534 
2535 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
2536      e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
2537 
2538 static int
2539 decode_asimd_fcvt (aarch64_inst *inst)
2540 {
2541   aarch64_field field = {0, 0};
2542   aarch64_insn value;
2543   enum aarch64_opnd_qualifier qualifier;
2544 
2545   gen_sub_field (FLD_size, 0, 1, &field);
2546   value = extract_field_2 (&field, inst->value, 0);
2547   qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
2548     : AARCH64_OPND_QLF_V_2D;
2549   switch (inst->opcode->op)
2550     {
2551     case OP_FCVTN:
2552     case OP_FCVTN2:
2553       /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
2554       inst->operands[1].qualifier = qualifier;
2555       break;
2556     case OP_FCVTL:
2557     case OP_FCVTL2:
2558       /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
2559       inst->operands[0].qualifier = qualifier;
2560       break;
2561     default:
2562       return 0;
2563     }
2564 
2565   return 1;
2566 }
2567 
2568 /* Decode size[0], i.e. bit 22, for
2569      e.g. FCVTXN <Vb><d>, <Va><n>.  */
2570 
2571 static int
2572 decode_asisd_fcvtxn (aarch64_inst *inst)
2573 {
2574   aarch64_field field = {0, 0};
2575   gen_sub_field (FLD_size, 0, 1, &field);
2576   if (!extract_field_2 (&field, inst->value, 0))
2577     return 0;
2578   inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2579   return 1;
2580 }
2581 
2582 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
2583 static int
2584 decode_fcvt (aarch64_inst *inst)
2585 {
2586   enum aarch64_opnd_qualifier qualifier;
2587   aarch64_insn value;
2588   const aarch64_field field = {15, 2};
2589 
2590   /* opc dstsize */
2591   value = extract_field_2 (&field, inst->value, 0);
2592   switch (value)
2593     {
2594     case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2595     case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2596     case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2597     default: return 0;
2598     }
2599   inst->operands[0].qualifier = qualifier;
2600 
2601   return 1;
2602 }
2603 
2604 /* Do miscellaneous decodings that are not common enough to be driven by
2605    flags.  */
2606 
2607 static int
2608 do_misc_decoding (aarch64_inst *inst)
2609 {
2610   unsigned int value;
2611   switch (inst->opcode->op)
2612     {
2613     case OP_FCVT:
2614       return decode_fcvt (inst);
2615 
2616     case OP_FCVTN:
2617     case OP_FCVTN2:
2618     case OP_FCVTL:
2619     case OP_FCVTL2:
2620       return decode_asimd_fcvt (inst);
2621 
2622     case OP_FCVTXN_S:
2623       return decode_asisd_fcvtxn (inst);
2624 
2625     case OP_MOV_P_P:
2626     case OP_MOVS_P_P:
2627       value = extract_field (FLD_SVE_Pn, inst->value, 0);
2628       return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2629 	      && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2630 
2631     case OP_MOV_Z_P_Z:
2632       return (extract_field (FLD_SVE_Zd, inst->value, 0)
2633 	      == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2634 
2635     case OP_MOV_Z_V:
2636       /* Index must be zero.  */
2637       value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2638       return value > 0 && value <= 16 && value == (value & -value);
2639 
2640     case OP_MOV_Z_Z:
2641       return (extract_field (FLD_SVE_Zn, inst->value, 0)
2642 	      == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2643 
2644     case OP_MOV_Z_Zi:
2645       /* Index must be nonzero.  */
2646       value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2647       return value > 0 && value != (value & -value);
2648 
2649     case OP_MOVM_P_P_P:
2650       return (extract_field (FLD_SVE_Pd, inst->value, 0)
2651 	      == extract_field (FLD_SVE_Pm, inst->value, 0));
2652 
2653     case OP_MOVZS_P_P_P:
2654     case OP_MOVZ_P_P_P:
2655       return (extract_field (FLD_SVE_Pn, inst->value, 0)
2656 	      == extract_field (FLD_SVE_Pm, inst->value, 0));
2657 
2658     case OP_NOTS_P_P_P_Z:
2659     case OP_NOT_P_P_P_Z:
2660       return (extract_field (FLD_SVE_Pm, inst->value, 0)
2661 	      == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2662 
2663     default:
2664       return 0;
2665     }
2666 }
2667 
2668 /* Opcodes that have fields shared by multiple operands are usually flagged
2669    with flags.  In this function, we detect such flags, decode the related
2670    field(s) and store the information in one of the related operands.  The
2671    'one' operand is not any operand but one of the operands that can
2672    accommadate all the information that has been decoded.  */
2673 
2674 static int
2675 do_special_decoding (aarch64_inst *inst)
2676 {
2677   int idx;
2678   aarch64_insn value;
2679   /* Condition for truly conditional executed instructions, e.g. b.cond.  */
2680   if (inst->opcode->flags & F_COND)
2681     {
2682       value = extract_field (FLD_cond2, inst->value, 0);
2683       inst->cond = get_cond_from_value (value);
2684     }
2685   /* 'sf' field.  */
2686   if (inst->opcode->flags & F_SF)
2687     {
2688       idx = select_operand_for_sf_field_coding (inst->opcode);
2689       value = extract_field (FLD_sf, inst->value, 0);
2690       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2691       if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2692 	return 0;
2693       if ((inst->opcode->flags & F_N)
2694 	  && extract_field (FLD_N, inst->value, 0) != value)
2695 	return 0;
2696     }
2697   /* 'sf' field.  */
2698   if (inst->opcode->flags & F_LSE_SZ)
2699     {
2700       idx = select_operand_for_sf_field_coding (inst->opcode);
2701       value = extract_field (FLD_lse_sz, inst->value, 0);
2702       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2703       if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2704 	return 0;
2705     }
2706   /* rcpc3 'size' field.  */
2707   if (inst->opcode->flags & F_RCPC3_SIZE)
2708     {
2709       value = extract_field (FLD_rcpc3_size, inst->value, 0);
2710       for (int i = 0;
2711 	   aarch64_operands[inst->operands[i].type].op_class != AARCH64_OPND_CLASS_ADDRESS;
2712 	   i++)
2713 	{
2714 	  if (aarch64_operands[inst->operands[i].type].op_class
2715 	      == AARCH64_OPND_CLASS_INT_REG)
2716 	    {
2717 	      inst->operands[i].qualifier = get_greg_qualifier_from_value (value & 1);
2718 	      if (inst->operands[i].qualifier == AARCH64_OPND_QLF_ERR)
2719 		return 0;
2720 	    }
2721 	  else if (aarch64_operands[inst->operands[i].type].op_class
2722 	      == AARCH64_OPND_CLASS_FP_REG)
2723 	    {
2724 	      value += (extract_field (FLD_opc1, inst->value, 0) << 2);
2725 	      inst->operands[i].qualifier = get_sreg_qualifier_from_value (value);
2726 	      if (inst->operands[i].qualifier == AARCH64_OPND_QLF_ERR)
2727 		return 0;
2728 	    }
2729 	}
2730     }
2731 
2732   /* size:Q fields.  */
2733   if (inst->opcode->flags & F_SIZEQ)
2734     return decode_sizeq (inst);
2735 
2736   if (inst->opcode->flags & F_FPTYPE)
2737     {
2738       idx = select_operand_for_fptype_field_coding (inst->opcode);
2739       value = extract_field (FLD_type, inst->value, 0);
2740       switch (value)
2741 	{
2742 	case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2743 	case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2744 	case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2745 	default: return 0;
2746 	}
2747     }
2748 
2749   if (inst->opcode->flags & F_SSIZE)
2750     {
2751       /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2752 	 of the base opcode.  */
2753       aarch64_insn mask;
2754       enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2755       idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2756       value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2757       mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2758       /* For most related instruciton, the 'size' field is fully available for
2759 	 operand encoding.  */
2760       if (mask == 0x3)
2761 	{
2762 	  inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2763 	  if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2764 	    return 0;
2765 	}
2766       else
2767 	{
2768 	  get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2769 					   candidates);
2770 	  inst->operands[idx].qualifier
2771 	    = get_qualifier_from_partial_encoding (value, candidates, mask);
2772 	}
2773     }
2774 
2775   if (inst->opcode->flags & F_T)
2776     {
2777       /* Num of consecutive '0's on the right side of imm5<3:0>.  */
2778       int num = 0;
2779       unsigned val, Q;
2780       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2781 	      == AARCH64_OPND_CLASS_SIMD_REG);
2782       /* imm5<3:0>	q	<t>
2783 	 0000		x	reserved
2784 	 xxx1		0	8b
2785 	 xxx1		1	16b
2786 	 xx10		0	4h
2787 	 xx10		1	8h
2788 	 x100		0	2s
2789 	 x100		1	4s
2790 	 1000		0	reserved
2791 	 1000		1	2d  */
2792       val = extract_field (FLD_imm5, inst->value, 0);
2793       while ((val & 0x1) == 0 && ++num <= 3)
2794 	val >>= 1;
2795       if (num > 3)
2796 	return 0;
2797       Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2798       inst->operands[0].qualifier =
2799 	get_vreg_qualifier_from_value ((num << 1) | Q);
2800       if (inst->operands[0].qualifier == AARCH64_OPND_QLF_ERR)
2801 	return 0;
2802 
2803     }
2804 
2805   if ((inst->opcode->flags & F_OPD_SIZE) && inst->opcode->iclass == sve2_urqvs)
2806     {
2807       unsigned size;
2808       size = (unsigned) extract_field (FLD_size, inst->value,
2809 				       inst->opcode->mask);
2810       inst->operands[0].qualifier
2811 	= get_vreg_qualifier_from_value (1 + (size << 1));
2812       if (inst->operands[0].qualifier == AARCH64_OPND_QLF_ERR)
2813 	return 0;
2814       inst->operands[2].qualifier = get_sreg_qualifier_from_value (size);
2815       if (inst->operands[2].qualifier == AARCH64_OPND_QLF_ERR)
2816 	return 0;
2817     }
2818 
2819   if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2820     {
2821       /* Use Rt to encode in the case of e.g.
2822 	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
2823       idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2824       if (idx == -1)
2825 	{
2826 	  /* Otherwise use the result operand, which has to be a integer
2827 	     register.  */
2828 	  assert (aarch64_get_operand_class (inst->opcode->operands[0])
2829 		  == AARCH64_OPND_CLASS_INT_REG);
2830 	  idx = 0;
2831 	}
2832       assert (idx == 0 || idx == 1);
2833       value = extract_field (FLD_Q, inst->value, 0);
2834       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2835       if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2836 	return 0;
2837     }
2838 
2839   if (inst->opcode->flags & F_LDS_SIZE)
2840     {
2841       aarch64_field field = {0, 0};
2842       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2843 	      == AARCH64_OPND_CLASS_INT_REG);
2844       gen_sub_field (FLD_opc, 0, 1, &field);
2845       value = extract_field_2 (&field, inst->value, 0);
2846       inst->operands[0].qualifier
2847 	= value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2848     }
2849 
2850   /* Miscellaneous decoding; done as the last step.  */
2851   if (inst->opcode->flags & F_MISC)
2852     return do_misc_decoding (inst);
2853 
2854   return 1;
2855 }
2856 
2857 /* Converters converting a real opcode instruction to its alias form.  */
2858 
2859 /* ROR <Wd>, <Ws>, #<shift>
2860      is equivalent to:
2861    EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
2862 static int
2863 convert_extr_to_ror (aarch64_inst *inst)
2864 {
2865   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2866     {
2867       copy_operand_info (inst, 2, 3);
2868       inst->operands[3].type = AARCH64_OPND_NIL;
2869       return 1;
2870     }
2871   return 0;
2872 }
2873 
2874 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2875      is equivalent to:
2876    USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
2877 static int
2878 convert_shll_to_xtl (aarch64_inst *inst)
2879 {
2880   if (inst->operands[2].imm.value == 0)
2881     {
2882       inst->operands[2].type = AARCH64_OPND_NIL;
2883       return 1;
2884     }
2885   return 0;
2886 }
2887 
2888 /* Convert
2889      UBFM <Xd>, <Xn>, #<shift>, #63.
2890    to
2891      LSR <Xd>, <Xn>, #<shift>.  */
2892 static int
2893 convert_bfm_to_sr (aarch64_inst *inst)
2894 {
2895   int64_t imms, val;
2896 
2897   imms = inst->operands[3].imm.value;
2898   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2899   if (imms == val)
2900     {
2901       inst->operands[3].type = AARCH64_OPND_NIL;
2902       return 1;
2903     }
2904 
2905   return 0;
2906 }
2907 
2908 /* Convert MOV to ORR.  */
2909 static int
2910 convert_orr_to_mov (aarch64_inst *inst)
2911 {
2912   /* MOV <Vd>.<T>, <Vn>.<T>
2913      is equivalent to:
2914      ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
2915   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2916     {
2917       inst->operands[2].type = AARCH64_OPND_NIL;
2918       return 1;
2919     }
2920   return 0;
2921 }
2922 
2923 /* When <imms> >= <immr>, the instruction written:
2924      SBFX <Xd>, <Xn>, #<lsb>, #<width>
2925    is equivalent to:
2926      SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
2927 
2928 static int
2929 convert_bfm_to_bfx (aarch64_inst *inst)
2930 {
2931   int64_t immr, imms;
2932 
2933   immr = inst->operands[2].imm.value;
2934   imms = inst->operands[3].imm.value;
2935   if (imms >= immr)
2936     {
2937       int64_t lsb = immr;
2938       inst->operands[2].imm.value = lsb;
2939       inst->operands[3].imm.value = imms + 1 - lsb;
2940       /* The two opcodes have different qualifiers for
2941 	 the immediate operands; reset to help the checking.  */
2942       reset_operand_qualifier (inst, 2);
2943       reset_operand_qualifier (inst, 3);
2944       return 1;
2945     }
2946 
2947   return 0;
2948 }
2949 
2950 /* When <imms> < <immr>, the instruction written:
2951      SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2952    is equivalent to:
2953      SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
2954 
2955 static int
2956 convert_bfm_to_bfi (aarch64_inst *inst)
2957 {
2958   int64_t immr, imms, val;
2959 
2960   immr = inst->operands[2].imm.value;
2961   imms = inst->operands[3].imm.value;
2962   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2963   if (imms < immr)
2964     {
2965       inst->operands[2].imm.value = (val - immr) & (val - 1);
2966       inst->operands[3].imm.value = imms + 1;
2967       /* The two opcodes have different qualifiers for
2968 	 the immediate operands; reset to help the checking.  */
2969       reset_operand_qualifier (inst, 2);
2970       reset_operand_qualifier (inst, 3);
2971       return 1;
2972     }
2973 
2974   return 0;
2975 }
2976 
2977 /* The instruction written:
2978      BFC <Xd>, #<lsb>, #<width>
2979    is equivalent to:
2980      BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1).  */
2981 
2982 static int
2983 convert_bfm_to_bfc (aarch64_inst *inst)
2984 {
2985   int64_t immr, imms, val;
2986 
2987   /* Should have been assured by the base opcode value.  */
2988   assert (inst->operands[1].reg.regno == 0x1f);
2989 
2990   immr = inst->operands[2].imm.value;
2991   imms = inst->operands[3].imm.value;
2992   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2993   if (imms < immr)
2994     {
2995       /* Drop XZR from the second operand.  */
2996       copy_operand_info (inst, 1, 2);
2997       copy_operand_info (inst, 2, 3);
2998       inst->operands[3].type = AARCH64_OPND_NIL;
2999 
3000       /* Recalculate the immediates.  */
3001       inst->operands[1].imm.value = (val - immr) & (val - 1);
3002       inst->operands[2].imm.value = imms + 1;
3003 
3004       /* The two opcodes have different qualifiers for the operands; reset to
3005 	 help the checking.  */
3006       reset_operand_qualifier (inst, 1);
3007       reset_operand_qualifier (inst, 2);
3008       reset_operand_qualifier (inst, 3);
3009 
3010       return 1;
3011     }
3012 
3013   return 0;
3014 }
3015 
3016 /* The instruction written:
3017      LSL <Xd>, <Xn>, #<shift>
3018    is equivalent to:
3019      UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
3020 
3021 static int
3022 convert_ubfm_to_lsl (aarch64_inst *inst)
3023 {
3024   int64_t immr = inst->operands[2].imm.value;
3025   int64_t imms = inst->operands[3].imm.value;
3026   int64_t val
3027     = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
3028 
3029   if ((immr == 0 && imms == val) || immr == imms + 1)
3030     {
3031       inst->operands[3].type = AARCH64_OPND_NIL;
3032       inst->operands[2].imm.value = val - imms;
3033       return 1;
3034     }
3035 
3036   return 0;
3037 }
3038 
3039 /* CINC <Wd>, <Wn>, <cond>
3040      is equivalent to:
3041    CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
3042      where <cond> is not AL or NV.  */
3043 
3044 static int
3045 convert_from_csel (aarch64_inst *inst)
3046 {
3047   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
3048       && (inst->operands[3].cond->value & 0xe) != 0xe)
3049     {
3050       copy_operand_info (inst, 2, 3);
3051       inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
3052       inst->operands[3].type = AARCH64_OPND_NIL;
3053       return 1;
3054     }
3055   return 0;
3056 }
3057 
3058 /* CSET <Wd>, <cond>
3059      is equivalent to:
3060    CSINC <Wd>, WZR, WZR, invert(<cond>)
3061      where <cond> is not AL or NV.  */
3062 
3063 static int
3064 convert_csinc_to_cset (aarch64_inst *inst)
3065 {
3066   if (inst->operands[1].reg.regno == 0x1f
3067       && inst->operands[2].reg.regno == 0x1f
3068       && (inst->operands[3].cond->value & 0xe) != 0xe)
3069     {
3070       copy_operand_info (inst, 1, 3);
3071       inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
3072       inst->operands[3].type = AARCH64_OPND_NIL;
3073       inst->operands[2].type = AARCH64_OPND_NIL;
3074       return 1;
3075     }
3076   return 0;
3077 }
3078 
3079 /* MOV <Wd>, #<imm>
3080      is equivalent to:
3081    MOVZ <Wd>, #<imm16_5>, LSL #<shift>.
3082 
3083    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3084    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3085    or where a MOVN has an immediate that could be encoded by MOVZ, or where
3086    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3087    machine-instruction mnemonic must be used.  */
3088 
3089 static int
3090 convert_movewide_to_mov (aarch64_inst *inst)
3091 {
3092   uint64_t value = inst->operands[1].imm.value;
3093   /* MOVZ/MOVN #0 have a shift amount other than LSL #0.  */
3094   if (value == 0 && inst->operands[1].shifter.amount != 0)
3095     return 0;
3096   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3097   inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
3098   value <<= inst->operands[1].shifter.amount;
3099   /* As an alias convertor, it has to be clear that the INST->OPCODE
3100      is the opcode of the real instruction.  */
3101   if (inst->opcode->op == OP_MOVN)
3102     {
3103       int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3104       value = ~value;
3105       /* A MOVN has an immediate that could be encoded by MOVZ.  */
3106       if (aarch64_wide_constant_p (value, is32, NULL))
3107 	return 0;
3108     }
3109   inst->operands[1].imm.value = value;
3110   inst->operands[1].shifter.amount = 0;
3111   return 1;
3112 }
3113 
3114 /* MOV <Wd>, #<imm>
3115      is equivalent to:
3116    ORR <Wd>, WZR, #<imm>.
3117 
3118    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3119    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3120    or where a MOVN has an immediate that could be encoded by MOVZ, or where
3121    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3122    machine-instruction mnemonic must be used.  */
3123 
3124 static int
3125 convert_movebitmask_to_mov (aarch64_inst *inst)
3126 {
3127   int is32;
3128   uint64_t value;
3129 
3130   /* Should have been assured by the base opcode value.  */
3131   assert (inst->operands[1].reg.regno == 0x1f);
3132   copy_operand_info (inst, 1, 2);
3133   is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3134   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3135   value = inst->operands[1].imm.value;
3136   /* ORR has an immediate that could be generated by a MOVZ or MOVN
3137      instruction.  */
3138   if (inst->operands[0].reg.regno != 0x1f
3139       && (aarch64_wide_constant_p (value, is32, NULL)
3140 	  || aarch64_wide_constant_p (~value, is32, NULL)))
3141     return 0;
3142 
3143   inst->operands[2].type = AARCH64_OPND_NIL;
3144   return 1;
3145 }
3146 
3147 /* Some alias opcodes are disassembled by being converted from their real-form.
3148    N.B. INST->OPCODE is the real opcode rather than the alias.  */
3149 
3150 static int
3151 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
3152 {
3153   switch (alias->op)
3154     {
3155     case OP_ASR_IMM:
3156     case OP_LSR_IMM:
3157       return convert_bfm_to_sr (inst);
3158     case OP_LSL_IMM:
3159       return convert_ubfm_to_lsl (inst);
3160     case OP_CINC:
3161     case OP_CINV:
3162     case OP_CNEG:
3163       return convert_from_csel (inst);
3164     case OP_CSET:
3165     case OP_CSETM:
3166       return convert_csinc_to_cset (inst);
3167     case OP_UBFX:
3168     case OP_BFXIL:
3169     case OP_SBFX:
3170       return convert_bfm_to_bfx (inst);
3171     case OP_SBFIZ:
3172     case OP_BFI:
3173     case OP_UBFIZ:
3174       return convert_bfm_to_bfi (inst);
3175     case OP_BFC:
3176       return convert_bfm_to_bfc (inst);
3177     case OP_MOV_V:
3178       return convert_orr_to_mov (inst);
3179     case OP_MOV_IMM_WIDE:
3180     case OP_MOV_IMM_WIDEN:
3181       return convert_movewide_to_mov (inst);
3182     case OP_MOV_IMM_LOG:
3183       return convert_movebitmask_to_mov (inst);
3184     case OP_ROR_IMM:
3185       return convert_extr_to_ror (inst);
3186     case OP_SXTL:
3187     case OP_SXTL2:
3188     case OP_UXTL:
3189     case OP_UXTL2:
3190       return convert_shll_to_xtl (inst);
3191     default:
3192       return 0;
3193     }
3194 }
3195 
3196 static bool
3197 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
3198 		       aarch64_inst *, int, aarch64_operand_error *errors);
3199 
3200 /* Given the instruction information in *INST, check if the instruction has
3201    any alias form that can be used to represent *INST.  If the answer is yes,
3202    update *INST to be in the form of the determined alias.  */
3203 
3204 /* In the opcode description table, the following flags are used in opcode
3205    entries to help establish the relations between the real and alias opcodes:
3206 
3207 	F_ALIAS:	opcode is an alias
3208 	F_HAS_ALIAS:	opcode has alias(es)
3209 	F_P1
3210 	F_P2
3211 	F_P3:		Disassembly preference priority 1-3 (the larger the
3212 			higher).  If nothing is specified, it is the priority
3213 			0 by default, i.e. the lowest priority.
3214 
3215    Although the relation between the machine and the alias instructions are not
3216    explicitly described, it can be easily determined from the base opcode
3217    values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
3218    description entries:
3219 
3220    The mask of an alias opcode must be equal to or a super-set (i.e. more
3221    constrained) of that of the aliased opcode; so is the base opcode value.
3222 
3223    if (opcode_has_alias (real) && alias_opcode_p (opcode)
3224        && (opcode->mask & real->mask) == real->mask
3225        && (real->mask & opcode->opcode) == (real->mask & real->opcode))
3226    then OPCODE is an alias of, and only of, the REAL instruction
3227 
3228    The alias relationship is forced flat-structured to keep related algorithm
3229    simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
3230 
3231    During the disassembling, the decoding decision tree (in
3232    opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
3233    if the decoding of such a machine instruction succeeds (and -Mno-aliases is
3234    not specified), the disassembler will check whether there is any alias
3235    instruction exists for this real instruction.  If there is, the disassembler
3236    will try to disassemble the 32-bit binary again using the alias's rule, or
3237    try to convert the IR to the form of the alias.  In the case of the multiple
3238    aliases, the aliases are tried one by one from the highest priority
3239    (currently the flag F_P3) to the lowest priority (no priority flag), and the
3240    first succeeds first adopted.
3241 
3242    You may ask why there is a need for the conversion of IR from one form to
3243    another in handling certain aliases.  This is because on one hand it avoids
3244    adding more operand code to handle unusual encoding/decoding; on other
3245    hand, during the disassembling, the conversion is an effective approach to
3246    check the condition of an alias (as an alias may be adopted only if certain
3247    conditions are met).
3248 
3249    In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
3250    aarch64_opcode_table and generated aarch64_find_alias_opcode and
3251    aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help.  */
3252 
3253 static void
3254 determine_disassembling_preference (struct aarch64_inst *inst,
3255 				    aarch64_operand_error *errors)
3256 {
3257   const aarch64_opcode *opcode;
3258   const aarch64_opcode *alias;
3259 
3260   opcode = inst->opcode;
3261 
3262   /* This opcode does not have an alias, so use itself.  */
3263   if (!opcode_has_alias (opcode))
3264     return;
3265 
3266   alias = aarch64_find_alias_opcode (opcode);
3267   assert (alias);
3268 
3269 #ifdef DEBUG_AARCH64
3270   if (debug_dump)
3271     {
3272       const aarch64_opcode *tmp = alias;
3273       printf ("####   LIST    orderd: ");
3274       while (tmp)
3275 	{
3276 	  printf ("%s, ", tmp->name);
3277 	  tmp = aarch64_find_next_alias_opcode (tmp);
3278 	}
3279       printf ("\n");
3280     }
3281 #endif /* DEBUG_AARCH64 */
3282 
3283   for (; alias; alias = aarch64_find_next_alias_opcode (alias))
3284     {
3285       DEBUG_TRACE ("try %s", alias->name);
3286       assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
3287 
3288       /* An alias can be a pseudo opcode which will never be used in the
3289 	 disassembly, e.g. BIC logical immediate is such a pseudo opcode
3290 	 aliasing AND.  */
3291       if (pseudo_opcode_p (alias))
3292 	{
3293 	  DEBUG_TRACE ("skip pseudo %s", alias->name);
3294 	  continue;
3295 	}
3296 
3297       if ((inst->value & alias->mask) != alias->opcode)
3298 	{
3299 	  DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
3300 	  continue;
3301 	}
3302 
3303       if (!AARCH64_CPU_HAS_ALL_FEATURES (arch_variant, *alias->avariant))
3304 	{
3305 	  DEBUG_TRACE ("skip %s: we're missing features", alias->name);
3306 	  continue;
3307 	}
3308 
3309       /* No need to do any complicated transformation on operands, if the alias
3310 	 opcode does not have any operand.  */
3311       if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
3312 	{
3313 	  DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
3314 	  aarch64_replace_opcode (inst, alias);
3315 	  return;
3316 	}
3317       if (alias->flags & F_CONV)
3318 	{
3319 	  aarch64_inst copy;
3320 	  memcpy (&copy, inst, sizeof (aarch64_inst));
3321 	  /* ALIAS is the preference as long as the instruction can be
3322 	     successfully converted to the form of ALIAS.  */
3323 	  if (convert_to_alias (&copy, alias) == 1)
3324 	    {
3325 	      aarch64_replace_opcode (&copy, alias);
3326 	      if (aarch64_match_operands_constraint (&copy, NULL) != 1)
3327 		{
3328 		  DEBUG_TRACE ("FAILED with alias %s ", alias->name);
3329 		}
3330 	      else
3331 		{
3332 		  DEBUG_TRACE ("succeed with %s via conversion", alias->name);
3333 		  memcpy (inst, &copy, sizeof (aarch64_inst));
3334 		}
3335 	      return;
3336 	    }
3337 	}
3338       else
3339 	{
3340 	  /* Directly decode the alias opcode.  */
3341 	  aarch64_inst temp;
3342 	  memset (&temp, '\0', sizeof (aarch64_inst));
3343 	  if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
3344 	    {
3345 	      DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
3346 	      memcpy (inst, &temp, sizeof (aarch64_inst));
3347 	      return;
3348 	    }
3349 	}
3350     }
3351 }
3352 
3353 /* Some instructions (including all SVE ones) use the instruction class
3354    to describe how a qualifiers_list index is represented in the instruction
3355    encoding.  If INST is such an instruction, decode the appropriate fields
3356    and fill in the operand qualifiers accordingly.  Return true if no
3357    problems are found.  */
3358 
3359 static bool
3360 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
3361 {
3362   int i, variant;
3363 
3364   variant = 0;
3365   switch (inst->opcode->iclass)
3366     {
3367     case sme_mov:
3368       variant = extract_fields (inst->value, 0, 2, FLD_SME_Q, FLD_SME_size_22);
3369       if (variant >= 4 && variant < 7)
3370 	return false;
3371       if (variant == 7)
3372 	variant = 4;
3373       break;
3374 
3375     case sme_psel:
3376       i = extract_fields (inst->value, 0, 2, FLD_SME_tszh, FLD_SME_tszl);
3377       if (i == 0)
3378 	return false;
3379       while ((i & 1) == 0)
3380 	{
3381 	  i >>= 1;
3382 	  variant += 1;
3383 	}
3384       break;
3385 
3386     case sme_shift:
3387       i = extract_field (FLD_SVE_tszh, inst->value, 0);
3388       goto sve_shift;
3389 
3390     case sme_size_12_bhs:
3391       variant = extract_field (FLD_SME_size_12, inst->value, 0);
3392       if (variant >= 3)
3393 	return false;
3394       break;
3395 
3396     case sme_size_12_hs:
3397       variant = extract_field (FLD_SME_size_12, inst->value, 0);
3398       if (variant != 1 && variant != 2)
3399 	return false;
3400       variant -= 1;
3401       break;
3402 
3403     case sme_size_22:
3404       variant = extract_field (FLD_SME_size_22, inst->value, 0);
3405       break;
3406 
3407     case sme_size_22_hsd:
3408       variant = extract_field (FLD_SME_size_22, inst->value, 0);
3409       if (variant < 1)
3410 	return false;
3411       variant -= 1;
3412       break;
3413 
3414     case sme_sz_23:
3415       variant = extract_field (FLD_SME_sz_23, inst->value, 0);
3416       break;
3417 
3418     case sve_cpy:
3419       variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
3420       break;
3421 
3422     case sve_index:
3423       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
3424       if ((i & 31) == 0)
3425 	return false;
3426       while ((i & 1) == 0)
3427 	{
3428 	  i >>= 1;
3429 	  variant += 1;
3430 	}
3431       break;
3432 
3433     case sve_index1:
3434       i = extract_fields (inst->value, 0, 2, FLD_SVE_tsz, FLD_SVE_i2h);
3435       if ((i & 15) == 0)
3436 	return false;
3437       while ((i & 1) == 0)
3438 	{
3439 	  i >>= 1;
3440 	  variant += 1;
3441 	}
3442       break;
3443 
3444     case sve_limm:
3445       /* Pick the smallest applicable element size.  */
3446       if ((inst->value & 0x20600) == 0x600)
3447 	variant = 0;
3448       else if ((inst->value & 0x20400) == 0x400)
3449 	variant = 1;
3450       else if ((inst->value & 0x20000) == 0)
3451 	variant = 2;
3452       else
3453 	variant = 3;
3454       break;
3455 
3456     case sme2_mov:
3457       /* .D is preferred over the other sizes in disassembly.  */
3458       variant = 3;
3459       break;
3460 
3461     case sme2_movaz:
3462     case sme_misc:
3463     case sve_misc:
3464       /* These instructions have only a single variant.  */
3465       break;
3466 
3467     case sve_movprfx:
3468       variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
3469       break;
3470 
3471     case sve_pred_zm:
3472       variant = extract_field (FLD_SVE_M_4, inst->value, 0);
3473       break;
3474 
3475     case sve_shift_pred:
3476       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
3477     sve_shift:
3478       if (i == 0)
3479 	return false;
3480       while (i != 1)
3481 	{
3482 	  i >>= 1;
3483 	  variant += 1;
3484 	}
3485       break;
3486 
3487     case sve_shift_unpred:
3488       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3489       goto sve_shift;
3490 
3491     case sve_size_bhs:
3492       variant = extract_field (FLD_size, inst->value, 0);
3493       if (variant >= 3)
3494 	return false;
3495       break;
3496 
3497     case sve_size_bhsd:
3498       variant = extract_field (FLD_size, inst->value, 0);
3499       break;
3500 
3501     case sve_size_hsd:
3502       i = extract_field (FLD_size, inst->value, 0);
3503       if (i < 1)
3504 	return false;
3505       variant = i - 1;
3506       break;
3507 
3508     case sme_fp_sd:
3509     case sme_int_sd:
3510     case sve_size_bh:
3511     case sve_size_sd:
3512       variant = extract_field (FLD_SVE_sz, inst->value, 0);
3513       break;
3514 
3515     case sve_size_sd2:
3516       variant = extract_field (FLD_SVE_sz2, inst->value, 0);
3517       break;
3518 
3519     case sve_size_hsd2:
3520       i = extract_field (FLD_SVE_size, inst->value, 0);
3521       if (i < 1)
3522 	return false;
3523       variant = i - 1;
3524       break;
3525 
3526     case sve_size_13:
3527       /* Ignore low bit of this field since that is set in the opcode for
3528 	 instructions of this iclass.  */
3529       i = (extract_field (FLD_size, inst->value, 0) & 2);
3530       variant = (i >> 1);
3531       break;
3532 
3533     case sve_shift_tsz_bhsd:
3534       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3535       if (i == 0)
3536 	return false;
3537       while (i != 1)
3538 	{
3539 	  i >>= 1;
3540 	  variant += 1;
3541 	}
3542       break;
3543 
3544     case sve_size_tsz_bhs:
3545       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3546       if (i == 0)
3547 	return false;
3548       while (i != 1)
3549 	{
3550 	  if (i & 1)
3551 	    return false;
3552 	  i >>= 1;
3553 	  variant += 1;
3554 	}
3555       break;
3556 
3557     case sve_shift_tsz_hsd:
3558       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3559       if (i == 0)
3560 	return false;
3561       while (i != 1)
3562 	{
3563 	  i >>= 1;
3564 	  variant += 1;
3565 	}
3566       break;
3567 
3568     default:
3569       /* No mapping between instruction class and qualifiers.  */
3570       return true;
3571     }
3572 
3573   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3574     inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
3575   return true;
3576 }
3577 /* Decode the CODE according to OPCODE; fill INST.  Return 0 if the decoding
3578    fails, which meanes that CODE is not an instruction of OPCODE; otherwise
3579    return 1.
3580 
3581    If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
3582    determined and used to disassemble CODE; this is done just before the
3583    return.  */
3584 
3585 static bool
3586 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
3587 		       aarch64_inst *inst, int noaliases_p,
3588 		       aarch64_operand_error *errors)
3589 {
3590   int i;
3591 
3592   DEBUG_TRACE ("enter with %s", opcode->name);
3593 
3594   assert (opcode && inst);
3595 
3596   /* Clear inst.  */
3597   memset (inst, '\0', sizeof (aarch64_inst));
3598 
3599   /* Check the base opcode.  */
3600   if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
3601     {
3602       DEBUG_TRACE ("base opcode match FAIL");
3603       goto decode_fail;
3604     }
3605 
3606   inst->opcode = opcode;
3607   inst->value = code;
3608 
3609   /* Assign operand codes and indexes.  */
3610   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3611     {
3612       if (opcode->operands[i] == AARCH64_OPND_NIL)
3613 	break;
3614       inst->operands[i].type = opcode->operands[i];
3615       inst->operands[i].idx = i;
3616     }
3617 
3618   /* Call the opcode decoder indicated by flags.  */
3619   if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
3620     {
3621       DEBUG_TRACE ("opcode flag-based decoder FAIL");
3622       goto decode_fail;
3623     }
3624 
3625   /* Possibly use the instruction class to determine the correct
3626      qualifier.  */
3627   if (!aarch64_decode_variant_using_iclass (inst))
3628     {
3629       DEBUG_TRACE ("iclass-based decoder FAIL");
3630       goto decode_fail;
3631     }
3632 
3633   /* Call operand decoders.  */
3634   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3635     {
3636       const aarch64_operand *opnd;
3637       enum aarch64_opnd type;
3638 
3639       type = opcode->operands[i];
3640       if (type == AARCH64_OPND_NIL)
3641 	break;
3642       opnd = &aarch64_operands[type];
3643       if (operand_has_extractor (opnd)
3644 	  && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
3645 					 errors)))
3646 	{
3647 	  DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
3648 	  goto decode_fail;
3649 	}
3650     }
3651 
3652   /* If the opcode has a verifier, then check it now.  */
3653   if (opcode->verifier
3654       && opcode->verifier (inst, code, 0, false, errors, NULL) != ERR_OK)
3655     {
3656       DEBUG_TRACE ("operand verifier FAIL");
3657       goto decode_fail;
3658     }
3659 
3660   /* Match the qualifiers.  */
3661   if (aarch64_match_operands_constraint (inst, NULL) == 1)
3662     {
3663       /* Arriving here, the CODE has been determined as a valid instruction
3664 	 of OPCODE and *INST has been filled with information of this OPCODE
3665 	 instruction.  Before the return, check if the instruction has any
3666 	 alias and should be disassembled in the form of its alias instead.
3667 	 If the answer is yes, *INST will be updated.  */
3668       if (!noaliases_p)
3669 	determine_disassembling_preference (inst, errors);
3670       DEBUG_TRACE ("SUCCESS");
3671       return true;
3672     }
3673   else
3674     {
3675       DEBUG_TRACE ("constraint matching FAIL");
3676     }
3677 
3678  decode_fail:
3679   return false;
3680 }
3681 
3682 /* This does some user-friendly fix-up to *INST.  It is currently focus on
3683    the adjustment of qualifiers to help the printed instruction
3684    recognized/understood more easily.  */
3685 
3686 static void
3687 user_friendly_fixup (aarch64_inst *inst)
3688 {
3689   switch (inst->opcode->iclass)
3690     {
3691     case testbranch:
3692       /* TBNZ Xn|Wn, #uimm6, label
3693 	 Test and Branch Not Zero: conditionally jumps to label if bit number
3694 	 uimm6 in register Xn is not zero.  The bit number implies the width of
3695 	 the register, which may be written and should be disassembled as Wn if
3696 	 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3697 	 */
3698       if (inst->operands[1].imm.value < 32)
3699 	inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3700       break;
3701     default: break;
3702     }
3703 }
3704 
3705 /* Decode INSN and fill in *INST the instruction information.  An alias
3706    opcode may be filled in *INSN if NOALIASES_P is FALSE.  Return zero on
3707    success.  */
3708 
3709 enum err_type
3710 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3711 		     bool noaliases_p,
3712 		     aarch64_operand_error *errors)
3713 {
3714   const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3715 
3716 #ifdef DEBUG_AARCH64
3717   if (debug_dump)
3718     {
3719       const aarch64_opcode *tmp = opcode;
3720       printf ("\n");
3721       DEBUG_TRACE ("opcode lookup:");
3722       while (tmp != NULL)
3723 	{
3724 	  aarch64_verbose ("  %s", tmp->name);
3725 	  tmp = aarch64_find_next_opcode (tmp);
3726 	}
3727     }
3728 #endif /* DEBUG_AARCH64 */
3729 
3730   /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3731      distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3732      opcode field and value, apart from the difference that one of them has an
3733      extra field as part of the opcode, but such a field is used for operand
3734      encoding in other opcode(s) ('immh' in the case of the example).  */
3735   while (opcode != NULL)
3736     {
3737       /* But only one opcode can be decoded successfully for, as the
3738 	 decoding routine will check the constraint carefully.  */
3739       if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3740 	return ERR_OK;
3741       opcode = aarch64_find_next_opcode (opcode);
3742     }
3743 
3744   return ERR_UND;
3745 }
3746 
3747 /* Return a short string to indicate a switch to STYLE.  These strings
3748    will be embedded into the disassembled operand text (as produced by
3749    aarch64_print_operand), and then spotted in the print_operands function
3750    so that the disassembler output can be split by style.  */
3751 
3752 static const char *
3753 get_style_text (enum disassembler_style style)
3754 {
3755   static bool init = false;
3756   static char formats[16][4];
3757   unsigned num;
3758 
3759   /* First time through we build a string for every possible format.  This
3760      code relies on there being no more than 16 different styles (there's
3761      an assert below for this).  */
3762   if (!init)
3763     {
3764       int i;
3765 
3766       for (i = 0; i <= 0xf; ++i)
3767 	{
3768 	  int res ATTRIBUTE_UNUSED
3769 	    = snprintf (&formats[i][0], sizeof (formats[i]), "%c%x%c",
3770 			STYLE_MARKER_CHAR, i, STYLE_MARKER_CHAR);
3771 	  assert (res == 3);
3772 	}
3773 
3774       init = true;
3775     }
3776 
3777   /* Return the string that marks switching to STYLE.  */
3778   num = (unsigned) style;
3779   assert (style <= 0xf);
3780   return formats[num];
3781 }
3782 
3783 /* Callback used by aarch64_print_operand to apply STYLE to the
3784    disassembler output created from FMT and ARGS.  The STYLER object holds
3785    any required state.  Must return a pointer to a string (created from FMT
3786    and ARGS) that will continue to be valid until the complete disassembled
3787    instruction has been printed.
3788 
3789    We return a string that includes two embedded style markers, the first,
3790    places at the start of the string, indicates a switch to STYLE, and the
3791    second, placed at the end of the string, indicates a switch back to the
3792    default text style.
3793 
3794    Later, when we print the operand text we take care to collapse any
3795    adjacent style markers, and to ignore any style markers that appear at
3796    the very end of a complete operand string.  */
3797 
3798 static const char *aarch64_apply_style (struct aarch64_styler *styler,
3799 					enum disassembler_style style,
3800 					const char *fmt,
3801 					va_list args)
3802 {
3803   int res;
3804   char *ptr, *tmp;
3805   struct obstack *stack = (struct obstack *) styler->state;
3806   va_list ap;
3807 
3808   /* These are the two strings for switching styles.  */
3809   const char *style_on = get_style_text (style);
3810   const char *style_off = get_style_text (dis_style_text);
3811 
3812   /* Calculate space needed once FMT and ARGS are expanded.  */
3813   va_copy (ap, args);
3814   res = vsnprintf (NULL, 0, fmt, ap);
3815   va_end (ap);
3816   assert (res >= 0);
3817 
3818   /* Allocate space on the obstack for the expanded FMT and ARGS, as well
3819      as the two strings for switching styles, then write all of these
3820      strings onto the obstack.  */
3821   ptr = (char *) obstack_alloc (stack, res + strlen (style_on)
3822 				+ strlen (style_off) + 1);
3823   tmp = stpcpy (ptr, style_on);
3824   res = vsnprintf (tmp, (res + 1), fmt, args);
3825   assert (res >= 0);
3826   tmp += res;
3827   strcpy (tmp, style_off);
3828 
3829   return ptr;
3830 }
3831 
3832 /* Print operands.  */
3833 
3834 static void
3835 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3836 		const aarch64_opnd_info *opnds, struct disassemble_info *info,
3837 		bool *has_notes)
3838 {
3839   char *notes = NULL;
3840   int i, pcrel_p, num_printed;
3841   struct aarch64_styler styler;
3842   struct obstack content;
3843   obstack_init (&content);
3844 
3845   styler.apply_style = aarch64_apply_style;
3846   styler.state = (void *) &content;
3847 
3848   for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3849     {
3850       char str[128];
3851       char cmt[128];
3852 
3853       /* We regard the opcode operand info more, however we also look into
3854 	 the inst->operands to support the disassembling of the optional
3855 	 operand.
3856 	 The two operand code should be the same in all cases, apart from
3857 	 when the operand can be optional.  */
3858       if (opcode->operands[i] == AARCH64_OPND_NIL
3859 	  || opnds[i].type == AARCH64_OPND_NIL)
3860 	break;
3861 
3862       /* Generate the operand string in STR.  */
3863       aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3864 			     &info->target, &notes, cmt, sizeof (cmt),
3865 			     arch_variant, &styler);
3866 
3867       /* Print the delimiter (taking account of omitted operand(s)).  */
3868       if (str[0] != '\0')
3869 	(*info->fprintf_styled_func) (info->stream, dis_style_text, "%s",
3870 				      num_printed++ == 0 ? "\t" : ", ");
3871 
3872       /* Print the operand.  */
3873       if (pcrel_p)
3874 	(*info->print_address_func) (info->target, info);
3875       else
3876 	{
3877 	  /* This operand came from aarch64_print_operand, and will include
3878 	     embedded strings indicating which style each character should
3879 	     have.  In the following code we split the text based on
3880 	     CURR_STYLE, and call the styled print callback to print each
3881 	     block of text in the appropriate style.  */
3882 	  char *start, *curr;
3883 	  enum disassembler_style curr_style = dis_style_text;
3884 
3885 	  start = curr = str;
3886 	  do
3887 	    {
3888 	      if (*curr == '\0'
3889 		  || (*curr == STYLE_MARKER_CHAR
3890 		      && ISXDIGIT (*(curr + 1))
3891 		      && *(curr + 2) == STYLE_MARKER_CHAR))
3892 		{
3893 		  /* Output content between our START position and CURR.  */
3894 		  int len = curr - start;
3895 		  if (len > 0)
3896 		    {
3897 		      if ((*info->fprintf_styled_func) (info->stream,
3898 							curr_style,
3899 							"%.*s",
3900 							len, start) < 0)
3901 			break;
3902 		    }
3903 
3904 		  if (*curr == '\0')
3905 		    break;
3906 
3907 		  /* Skip over the initial STYLE_MARKER_CHAR.  */
3908 		  ++curr;
3909 
3910 		  /* Update the CURR_STYLE.  As there are less than 16
3911 		     styles, it is possible, that if the input is corrupted
3912 		     in some way, that we might set CURR_STYLE to an
3913 		     invalid value.  Don't worry though, we check for this
3914 		     situation.  */
3915 		  if (*curr >= '0' && *curr <= '9')
3916 		    curr_style = (enum disassembler_style) (*curr - '0');
3917 		  else if (*curr >= 'a' && *curr <= 'f')
3918 		    curr_style = (enum disassembler_style) (*curr - 'a' + 10);
3919 		  else
3920 		    curr_style = dis_style_text;
3921 
3922 		  /* Check for an invalid style having been selected.  This
3923 		     should never happen, but it doesn't hurt to be a
3924 		     little paranoid.  */
3925 		  if (curr_style > dis_style_comment_start)
3926 		    curr_style = dis_style_text;
3927 
3928 		  /* Skip the hex character, and the closing STYLE_MARKER_CHAR.  */
3929 		  curr += 2;
3930 
3931 		  /* Reset the START to after the style marker.  */
3932 		  start = curr;
3933 		}
3934 	      else
3935 		++curr;
3936 	    }
3937 	  while (true);
3938 	}
3939 
3940       /* Print the comment.  This works because only the last operand ever
3941 	 adds a comment.  If that ever changes then we'll need to be
3942 	 smarter here.  */
3943       if (cmt[0] != '\0')
3944 	(*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3945 				      "\t// %s", cmt);
3946     }
3947 
3948     if (notes && !no_notes)
3949       {
3950 	*has_notes = true;
3951 	(*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3952 				      "  // note: %s", notes);
3953       }
3954 
3955     obstack_free (&content, NULL);
3956 }
3957 
3958 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed.  */
3959 
3960 static void
3961 remove_dot_suffix (char *name, const aarch64_inst *inst)
3962 {
3963   char *ptr;
3964   size_t len;
3965 
3966   ptr = strchr (inst->opcode->name, '.');
3967   assert (ptr && inst->cond);
3968   len = ptr - inst->opcode->name;
3969   assert (len < 8);
3970   strncpy (name, inst->opcode->name, len);
3971   name[len] = '\0';
3972 }
3973 
3974 /* Print the instruction mnemonic name.  */
3975 
3976 static void
3977 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3978 {
3979   if (inst->opcode->flags & F_COND)
3980     {
3981       /* For instructions that are truly conditionally executed, e.g. b.cond,
3982 	 prepare the full mnemonic name with the corresponding condition
3983 	 suffix.  */
3984       char name[8];
3985 
3986       remove_dot_suffix (name, inst);
3987       (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3988 				    "%s.%s", name, inst->cond->names[0]);
3989     }
3990   else
3991     (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3992 				  "%s", inst->opcode->name);
3993 }
3994 
3995 /* Decide whether we need to print a comment after the operands of
3996    instruction INST.  */
3997 
3998 static void
3999 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
4000 {
4001   if (inst->opcode->flags & F_COND)
4002     {
4003       char name[8];
4004       unsigned int i, num_conds;
4005 
4006       remove_dot_suffix (name, inst);
4007       num_conds = ARRAY_SIZE (inst->cond->names);
4008       for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
4009 	(*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4010 				      "%s %s.%s",
4011 				      i == 1 ? "  //" : ",",
4012 				      name, inst->cond->names[i]);
4013     }
4014 }
4015 
4016 /* Build notes from verifiers into a string for printing.  */
4017 
4018 static void
4019 print_verifier_notes (aarch64_operand_error *detail,
4020 		      struct disassemble_info *info)
4021 {
4022   if (no_notes)
4023     return;
4024 
4025   /* The output of the verifier cannot be a fatal error, otherwise the assembly
4026      would not have succeeded.  We can safely ignore these.  */
4027   assert (detail->non_fatal);
4028 
4029   (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4030 				"  // note: ");
4031   switch (detail->kind)
4032     {
4033     case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
4034       (*info->fprintf_styled_func) (info->stream, dis_style_text,
4035 				    _("this `%s' should have an immediately"
4036 				      " preceding `%s'"),
4037 				    detail->data[0].s, detail->data[1].s);
4038       break;
4039 
4040     case AARCH64_OPDE_EXPECTED_A_AFTER_B:
4041       (*info->fprintf_styled_func) (info->stream, dis_style_text,
4042 				    _("expected `%s' after previous `%s'"),
4043 				    detail->data[0].s, detail->data[1].s);
4044       break;
4045 
4046     default:
4047       assert (detail->error);
4048       (*info->fprintf_styled_func) (info->stream, dis_style_text,
4049 				    "%s", detail->error);
4050       if (detail->index >= 0)
4051 	(*info->fprintf_styled_func) (info->stream, dis_style_text,
4052 				      " at operand %d", detail->index + 1);
4053       break;
4054     }
4055 }
4056 
4057 /* Print the instruction according to *INST.  */
4058 
4059 static void
4060 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
4061 		    const aarch64_insn code,
4062 		    struct disassemble_info *info,
4063 		    aarch64_operand_error *mismatch_details)
4064 {
4065   bool has_notes = false;
4066 
4067   print_mnemonic_name (inst, info);
4068   print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
4069   print_comment (inst, info);
4070 
4071   /* We've already printed a note, not enough space to print more so exit.
4072      Usually notes shouldn't overlap so it shouldn't happen that we have a note
4073      from a register and instruction at the same time.  */
4074   if (has_notes)
4075     return;
4076 
4077   /* Always run constraint verifiers, this is needed because constraints need to
4078      maintain a global state regardless of whether the instruction has the flag
4079      set or not.  */
4080   enum err_type result = verify_constraints (inst, code, pc, false,
4081 					     mismatch_details, &insn_sequence);
4082   switch (result)
4083     {
4084     case ERR_VFI:
4085       print_verifier_notes (mismatch_details, info);
4086       break;
4087     case ERR_UND:
4088     case ERR_UNP:
4089     case ERR_NYI:
4090     default:
4091       break;
4092     }
4093 }
4094 
4095 /* Entry-point of the instruction disassembler and printer.  */
4096 
4097 static void
4098 print_insn_aarch64_word (bfd_vma pc,
4099 			 uint32_t word,
4100 			 struct disassemble_info *info,
4101 			 aarch64_operand_error *errors)
4102 {
4103   static const char *err_msg[ERR_NR_ENTRIES+1] =
4104     {
4105       [ERR_OK]  = "_",
4106       [ERR_UND] = "undefined",
4107       [ERR_UNP] = "unpredictable",
4108       [ERR_NYI] = "NYI"
4109     };
4110 
4111   enum err_type ret;
4112   aarch64_inst inst;
4113 
4114   info->insn_info_valid = 1;
4115   info->branch_delay_insns = 0;
4116   info->data_size = 0;
4117   info->target = 0;
4118   info->target2 = 0;
4119 
4120   if (info->flags & INSN_HAS_RELOC)
4121     /* If the instruction has a reloc associated with it, then
4122        the offset field in the instruction will actually be the
4123        addend for the reloc.  (If we are using REL type relocs).
4124        In such cases, we can ignore the pc when computing
4125        addresses, since the addend is not currently pc-relative.  */
4126     pc = 0;
4127 
4128   ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
4129 
4130   if (((word >> 21) & 0x3ff) == 1)
4131     {
4132       /* RESERVED for ALES.  */
4133       assert (ret != ERR_OK);
4134       ret = ERR_NYI;
4135     }
4136 
4137   switch (ret)
4138     {
4139     case ERR_UND:
4140     case ERR_UNP:
4141     case ERR_NYI:
4142       /* Handle undefined instructions.  */
4143       info->insn_type = dis_noninsn;
4144       (*info->fprintf_styled_func) (info->stream,
4145 				    dis_style_assembler_directive,
4146 				    ".inst\t");
4147       (*info->fprintf_styled_func) (info->stream, dis_style_immediate,
4148 				    "0x%08x", word);
4149       (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4150 				    " ; %s", err_msg[ret]);
4151       break;
4152     case ERR_OK:
4153       user_friendly_fixup (&inst);
4154       if (inst.opcode->iclass == condbranch
4155 	  || inst.opcode->iclass == testbranch
4156 	  || inst.opcode->iclass == compbranch)
4157         info->insn_type = dis_condbranch;
4158       else if (inst.opcode->iclass == branch_imm)
4159         info->insn_type = dis_jsr;
4160       print_aarch64_insn (pc, &inst, word, info, errors);
4161       break;
4162     default:
4163       abort ();
4164     }
4165 }
4166 
4167 /* Disallow mapping symbols ($x, $d etc) from
4168    being displayed in symbol relative addresses.  */
4169 
4170 bool
4171 aarch64_symbol_is_valid (asymbol * sym,
4172 			 struct disassemble_info * info ATTRIBUTE_UNUSED)
4173 {
4174   const char * name;
4175 
4176   if (sym == NULL)
4177     return false;
4178 
4179   name = bfd_asymbol_name (sym);
4180 
4181   return name
4182     && (name[0] != '$'
4183 	|| (name[1] != 'x' && name[1] != 'd')
4184 	|| (name[2] != '\0' && name[2] != '.'));
4185 }
4186 
4187 /* Print data bytes on INFO->STREAM.  */
4188 
4189 static void
4190 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
4191 		 uint32_t word,
4192 		 struct disassemble_info *info,
4193 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4194 {
4195   switch (info->bytes_per_chunk)
4196     {
4197     case 1:
4198       info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4199 				 ".byte\t");
4200       info->fprintf_styled_func (info->stream, dis_style_immediate,
4201 				 "0x%02x", word);
4202       break;
4203     case 2:
4204       info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4205 				 ".short\t");
4206       info->fprintf_styled_func (info->stream, dis_style_immediate,
4207 				 "0x%04x", word);
4208       break;
4209     case 4:
4210       info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4211 				 ".word\t");
4212       info->fprintf_styled_func (info->stream, dis_style_immediate,
4213 				 "0x%08x", word);
4214       break;
4215     default:
4216       abort ();
4217     }
4218 }
4219 
4220 /* Try to infer the code or data type from a symbol.
4221    Returns nonzero if *MAP_TYPE was set.  */
4222 
4223 static int
4224 get_sym_code_type (struct disassemble_info *info, int n,
4225 		   enum map_type *map_type)
4226 {
4227   asymbol * as;
4228   elf_symbol_type *es;
4229   unsigned int type;
4230   const char *name;
4231 
4232   /* If the symbol is in a different section, ignore it.  */
4233   if (info->section != NULL && info->section != info->symtab[n]->section)
4234     return false;
4235 
4236   if (n >= info->symtab_size)
4237     return false;
4238 
4239   as = info->symtab[n];
4240   if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
4241     return false;
4242   es = (elf_symbol_type *) as;
4243 
4244   type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
4245 
4246   /* If the symbol has function type then use that.  */
4247   if (type == STT_FUNC)
4248     {
4249       *map_type = MAP_INSN;
4250       return true;
4251     }
4252 
4253   /* Check for mapping symbols.  */
4254   name = bfd_asymbol_name(info->symtab[n]);
4255   if (name[0] == '$'
4256       && (name[1] == 'x' || name[1] == 'd')
4257       && (name[2] == '\0' || name[2] == '.'))
4258     {
4259       *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
4260       return true;
4261     }
4262 
4263   return false;
4264 }
4265 
4266 /* Set the feature bits in arch_variant in order to get the correct disassembly
4267    for the chosen architecture variant.
4268 
4269    Currently we only restrict disassembly for Armv8-R and otherwise enable all
4270    non-R-profile features.  */
4271 static void
4272 select_aarch64_variant (unsigned mach)
4273 {
4274   switch (mach)
4275     {
4276     case bfd_mach_aarch64_8R:
4277       AARCH64_SET_FEATURE (arch_variant, AARCH64_ARCH_V8R);
4278       break;
4279     default:
4280       arch_variant = (aarch64_feature_set) AARCH64_ALL_FEATURES;
4281       AARCH64_CLEAR_FEATURE (arch_variant, arch_variant, V8R);
4282     }
4283 }
4284 
4285 /* Entry-point of the AArch64 disassembler.  */
4286 
4287 int
4288 print_insn_aarch64 (bfd_vma pc,
4289 		    struct disassemble_info *info)
4290 {
4291   bfd_byte	buffer[INSNLEN];
4292   int		status;
4293   void		(*printer) (bfd_vma, uint32_t, struct disassemble_info *,
4294 			    aarch64_operand_error *);
4295   bool   found = false;
4296   unsigned int	size = 4;
4297   unsigned long	data;
4298   aarch64_operand_error errors;
4299   static bool set_features;
4300 
4301   if (info->disassembler_options)
4302     {
4303       set_default_aarch64_dis_options (info);
4304 
4305       parse_aarch64_dis_options (info->disassembler_options);
4306 
4307       /* To avoid repeated parsing of these options, we remove them here.  */
4308       info->disassembler_options = NULL;
4309     }
4310 
4311   if (!set_features)
4312     {
4313       select_aarch64_variant (info->mach);
4314       set_features = true;
4315     }
4316 
4317   /* Aarch64 instructions are always little-endian */
4318   info->endian_code = BFD_ENDIAN_LITTLE;
4319 
4320   /* Default to DATA.  A text section is required by the ABI to contain an
4321      INSN mapping symbol at the start.  A data section has no such
4322      requirement, hence if no mapping symbol is found the section must
4323      contain only data.  This however isn't very useful if the user has
4324      fully stripped the binaries.  If this is the case use the section
4325      attributes to determine the default.  If we have no section default to
4326      INSN as well, as we may be disassembling some raw bytes on a baremetal
4327      HEX file or similar.  */
4328   enum map_type type = MAP_DATA;
4329   if ((info->section && info->section->flags & SEC_CODE) || !info->section)
4330     type = MAP_INSN;
4331 
4332   /* First check the full symtab for a mapping symbol, even if there
4333      are no usable non-mapping symbols for this address.  */
4334   if (info->symtab_size != 0
4335       && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
4336     {
4337       int last_sym = -1;
4338       bfd_vma addr, section_vma = 0;
4339       bool can_use_search_opt_p;
4340       int n;
4341 
4342       if (pc <= last_mapping_addr)
4343 	last_mapping_sym = -1;
4344 
4345       /* Start scanning at the start of the function, or wherever
4346 	 we finished last time.  */
4347       n = info->symtab_pos + 1;
4348 
4349       /* If the last stop offset is different from the current one it means we
4350 	 are disassembling a different glob of bytes.  As such the optimization
4351 	 would not be safe and we should start over.  */
4352       can_use_search_opt_p = last_mapping_sym >= 0
4353 			     && info->stop_offset == last_stop_offset;
4354 
4355       if (n >= last_mapping_sym && can_use_search_opt_p)
4356 	n = last_mapping_sym;
4357 
4358       /* Look down while we haven't passed the location being disassembled.
4359 	 The reason for this is that there's no defined order between a symbol
4360 	 and an mapping symbol that may be at the same address.  We may have to
4361 	 look at least one position ahead.  */
4362       for (; n < info->symtab_size; n++)
4363 	{
4364 	  addr = bfd_asymbol_value (info->symtab[n]);
4365 	  if (addr > pc)
4366 	    break;
4367 	  if (get_sym_code_type (info, n, &type))
4368 	    {
4369 	      last_sym = n;
4370 	      found = true;
4371 	    }
4372 	}
4373 
4374       if (!found)
4375 	{
4376 	  n = info->symtab_pos;
4377 	  if (n >= last_mapping_sym && can_use_search_opt_p)
4378 	    n = last_mapping_sym;
4379 
4380 	  /* No mapping symbol found at this address.  Look backwards
4381 	     for a preceeding one, but don't go pass the section start
4382 	     otherwise a data section with no mapping symbol can pick up
4383 	     a text mapping symbol of a preceeding section.  The documentation
4384 	     says section can be NULL, in which case we will seek up all the
4385 	     way to the top.  */
4386 	  if (info->section)
4387 	    section_vma = info->section->vma;
4388 
4389 	  for (; n >= 0; n--)
4390 	    {
4391 	      addr = bfd_asymbol_value (info->symtab[n]);
4392 	      if (addr < section_vma)
4393 		break;
4394 
4395 	      if (get_sym_code_type (info, n, &type))
4396 		{
4397 		  last_sym = n;
4398 		  found = true;
4399 		  break;
4400 		}
4401 	    }
4402 	}
4403 
4404       last_mapping_sym = last_sym;
4405       last_type = type;
4406       last_stop_offset = info->stop_offset;
4407 
4408       /* Look a little bit ahead to see if we should print out
4409 	 less than four bytes of data.  If there's a symbol,
4410 	 mapping or otherwise, after two bytes then don't
4411 	 print more.  */
4412       if (last_type == MAP_DATA)
4413 	{
4414 	  size = 4 - (pc & 3);
4415 	  for (n = last_sym + 1; n < info->symtab_size; n++)
4416 	    {
4417 	      addr = bfd_asymbol_value (info->symtab[n]);
4418 	      if (addr > pc)
4419 		{
4420 		  if (addr - pc < size)
4421 		    size = addr - pc;
4422 		  break;
4423 		}
4424 	    }
4425 	  /* If the next symbol is after three bytes, we need to
4426 	     print only part of the data, so that we can use either
4427 	     .byte or .short.  */
4428 	  if (size == 3)
4429 	    size = (pc & 1) ? 1 : 2;
4430 	}
4431     }
4432   else
4433     last_type = type;
4434 
4435   /* PR 10263: Disassemble data if requested to do so by the user.  */
4436   if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
4437     {
4438       /* size was set above.  */
4439       info->bytes_per_chunk = size;
4440       info->display_endian = info->endian;
4441       printer = print_insn_data;
4442     }
4443   else
4444     {
4445       info->bytes_per_chunk = size = INSNLEN;
4446       info->display_endian = info->endian_code;
4447       printer = print_insn_aarch64_word;
4448     }
4449 
4450   status = (*info->read_memory_func) (pc, buffer, size, info);
4451   if (status != 0)
4452     {
4453       (*info->memory_error_func) (status, pc, info);
4454       return -1;
4455     }
4456 
4457   data = bfd_get_bits (buffer, size * 8,
4458 		       info->display_endian == BFD_ENDIAN_BIG);
4459 
4460   (*printer) (pc, data, info, &errors);
4461 
4462   return size;
4463 }
4464 
4465 void
4466 print_aarch64_disassembler_options (FILE *stream)
4467 {
4468   fprintf (stream, _("\n\
4469 The following AARCH64 specific disassembler options are supported for use\n\
4470 with the -M switch (multiple options should be separated by commas):\n"));
4471 
4472   fprintf (stream, _("\n\
4473   no-aliases         Don't print instruction aliases.\n"));
4474 
4475   fprintf (stream, _("\n\
4476   aliases            Do print instruction aliases.\n"));
4477 
4478   fprintf (stream, _("\n\
4479   no-notes         Don't print instruction notes.\n"));
4480 
4481   fprintf (stream, _("\n\
4482   notes            Do print instruction notes.\n"));
4483 
4484 #ifdef DEBUG_AARCH64
4485   fprintf (stream, _("\n\
4486   debug_dump         Temp switch for debug trace.\n"));
4487 #endif /* DEBUG_AARCH64 */
4488 
4489   fprintf (stream, _("\n"));
4490 }
4491