xref: /netbsd-src/external/gpl3/binutils/dist/opcodes/aarch64-dis.c (revision 6d322f2f4598f0d8a138f10ea648ec4fabe41f8b)
1 /* aarch64-dis.c -- AArch64 disassembler.
2    Copyright 2009, 2010, 2011, 2012, 2013  Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 
28 #if !defined(EMBEDDED_ENV)
29 #define SYMTAB_AVAILABLE 1
30 #include "elf-bfd.h"
31 #include "elf/aarch64.h"
32 #endif
33 
34 #define ERR_OK   0
35 #define ERR_UND -1
36 #define ERR_UNP -3
37 #define ERR_NYI -5
38 
39 #define INSNLEN 4
40 
41 /* Cached mapping symbol state.  */
42 enum map_type
43 {
44   MAP_INSN,
45   MAP_DATA
46 };
47 
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_mapping_addr = 0;
51 
52 /* Other options */
53 static int no_aliases = 0;	/* If set disassemble as most general inst.  */
54 
55 
56 static void
57 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
58 {
59 }
60 
61 static void
62 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
63 {
64   /* Try to match options that are simple flags */
65   if (CONST_STRNEQ (option, "no-aliases"))
66     {
67       no_aliases = 1;
68       return;
69     }
70 
71   if (CONST_STRNEQ (option, "aliases"))
72     {
73       no_aliases = 0;
74       return;
75     }
76 
77 #ifdef DEBUG_AARCH64
78   if (CONST_STRNEQ (option, "debug_dump"))
79     {
80       debug_dump = 1;
81       return;
82     }
83 #endif /* DEBUG_AARCH64 */
84 
85   /* Invalid option.  */
86   fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
87 }
88 
89 static void
90 parse_aarch64_dis_options (const char *options)
91 {
92   const char *option_end;
93 
94   if (options == NULL)
95     return;
96 
97   while (*options != '\0')
98     {
99       /* Skip empty options.  */
100       if (*options == ',')
101 	{
102 	  options++;
103 	  continue;
104 	}
105 
106       /* We know that *options is neither NUL or a comma.  */
107       option_end = options + 1;
108       while (*option_end != ',' && *option_end != '\0')
109 	option_end++;
110 
111       parse_aarch64_dis_option (options, option_end - options);
112 
113       /* Go on to the next one.  If option_end points to a comma, it
114 	 will be skipped above.  */
115       options = option_end;
116     }
117 }
118 
119 /* Functions doing the instruction disassembling.  */
120 
121 /* The unnamed arguments consist of the number of fields and information about
122    these fields where the VALUE will be extracted from CODE and returned.
123    MASK can be zero or the base mask of the opcode.
124 
125    N.B. the fields are required to be in such an order than the most signficant
126    field for VALUE comes the first, e.g. the <index> in
127     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
128    is encoded in H:L:M in some cases, the the fields H:L:M should be passed in
129    the order of H, L, M.  */
130 
131 static inline aarch64_insn
132 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
133 {
134   uint32_t num;
135   const aarch64_field *field;
136   enum aarch64_field_kind kind;
137   va_list va;
138   va_start (va, mask);
139   num = va_arg (va, uint32_t);
140   assert (num <= 5);
141   aarch64_insn value = 0x0;
142   while (num--)
143     {
144       kind = va_arg (va, enum aarch64_field_kind);
145       field = &fields[kind];
146       value <<= field->width;
147       value |= extract_field (kind, code, mask);
148     }
149   return value;
150 }
151 
152 /* Sign-extend bit I of VALUE.  */
153 static inline int32_t
154 sign_extend (aarch64_insn value, unsigned i)
155 {
156   uint32_t ret = value;
157   assert (i < 32);
158   if ((value >> i) & 0x1)
159     {
160       uint32_t val = (uint32_t)(-1) << i;
161       ret = ret | val;
162     }
163   return (int32_t) ret;
164 }
165 
166 /* N.B. the following inline helpfer functions create a dependency on the
167    order of operand qualifier enumerators.  */
168 
169 /* Given VALUE, return qualifier for a general purpose register.  */
170 static inline enum aarch64_opnd_qualifier
171 get_greg_qualifier_from_value (aarch64_insn value)
172 {
173   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
174   assert (value <= 0x1
175 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
176   return qualifier;
177 }
178 
179 /* Given VALUE, return qualifier for a vector register.  */
180 static inline enum aarch64_opnd_qualifier
181 get_vreg_qualifier_from_value (aarch64_insn value)
182 {
183   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
184   assert (value <= 0x8
185 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
186   return qualifier;
187 }
188 
189 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register.  */
190 static inline enum aarch64_opnd_qualifier
191 get_sreg_qualifier_from_value (aarch64_insn value)
192 {
193   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
194   assert (value <= 0x4
195 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
196   return qualifier;
197 }
198 
199 /* Given the instruction in *INST which is probably half way through the
200    decoding and our caller wants to know the expected qualifier for operand
201    I.  Return such a qualifier if we can establish it; otherwise return
202    AARCH64_OPND_QLF_NIL.  */
203 
204 static aarch64_opnd_qualifier_t
205 get_expected_qualifier (const aarch64_inst *inst, int i)
206 {
207   aarch64_opnd_qualifier_seq_t qualifiers;
208   /* Should not be called if the qualifier is known.  */
209   assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
210   if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
211 			       i, qualifiers))
212     return qualifiers[i];
213   else
214     return AARCH64_OPND_QLF_NIL;
215 }
216 
217 /* Operand extractors.  */
218 
219 int
220 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
221 		   const aarch64_insn code,
222 		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
223 {
224   info->reg.regno = extract_field (self->fields[0], code, 0);
225   return 1;
226 }
227 
228 /* e.g. IC <ic_op>{, <Xt>}.  */
229 int
230 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
231 			  const aarch64_insn code,
232 			  const aarch64_inst *inst ATTRIBUTE_UNUSED)
233 {
234   info->reg.regno = extract_field (self->fields[0], code, 0);
235   assert (info->idx == 1
236 	  && (aarch64_get_operand_class (inst->operands[0].type)
237 	      == AARCH64_OPND_CLASS_SYSTEM));
238   /* This will make the constraint checking happy and more importantly will
239      help the disassembler determine whether this operand is optional or
240      not.  */
241   info->present = inst->operands[0].sysins_op->has_xt;
242 
243   return 1;
244 }
245 
246 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
247 int
248 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
249 		     const aarch64_insn code,
250 		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
251 {
252   /* regno */
253   info->reglane.regno = extract_field (self->fields[0], code,
254 				       inst->opcode->mask);
255 
256   /* index and/or type */
257   if (inst->opcode->iclass == asisdone
258     || inst->opcode->iclass == asimdins)
259     {
260       if (info->type == AARCH64_OPND_En
261 	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
262 	{
263 	  unsigned shift;
264 	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
265 	  assert (info->idx == 1);	/* Vn */
266 	  aarch64_insn value = extract_field (FLD_imm4, code, 0);
267 	  /* Depend on AARCH64_OPND_Ed to determine the qualifier.  */
268 	  info->qualifier = get_expected_qualifier (inst, info->idx);
269 	  shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
270 	  info->reglane.index = value >> shift;
271 	}
272       else
273 	{
274 	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
275 	     imm5<3:0>	<V>
276 	     0000	RESERVED
277 	     xxx1	B
278 	     xx10	H
279 	     x100	S
280 	     1000	D  */
281 	  int pos = -1;
282 	  aarch64_insn value = extract_field (FLD_imm5, code, 0);
283 	  while (++pos <= 3 && (value & 0x1) == 0)
284 	    value >>= 1;
285 	  if (pos > 3)
286 	    return 0;
287 	  info->qualifier = get_sreg_qualifier_from_value (pos);
288 	  info->reglane.index = (unsigned) (value >> 1);
289 	}
290     }
291   else
292     {
293       /* index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
294          or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
295 
296       /* Need information in other operand(s) to help decoding.  */
297       info->qualifier = get_expected_qualifier (inst, info->idx);
298       switch (info->qualifier)
299 	{
300 	case AARCH64_OPND_QLF_S_H:
301 	  /* h:l:m */
302 	  info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
303 						FLD_M);
304 	  info->reglane.regno &= 0xf;
305 	  break;
306 	case AARCH64_OPND_QLF_S_S:
307 	  /* h:l */
308 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
309 	  break;
310 	case AARCH64_OPND_QLF_S_D:
311 	  /* H */
312 	  info->reglane.index = extract_field (FLD_H, code, 0);
313 	  break;
314 	default:
315 	  return 0;
316 	}
317     }
318 
319   return 1;
320 }
321 
322 int
323 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
324 		     const aarch64_insn code,
325 		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
326 {
327   /* R */
328   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
329   /* len */
330   info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
331   return 1;
332 }
333 
334 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions.  */
335 int
336 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
337 			  aarch64_opnd_info *info, const aarch64_insn code,
338 			  const aarch64_inst *inst)
339 {
340   aarch64_insn value;
341   /* Number of elements in each structure to be loaded/stored.  */
342   unsigned expected_num = get_opcode_dependent_value (inst->opcode);
343 
344   struct
345     {
346       unsigned is_reserved;
347       unsigned num_regs;
348       unsigned num_elements;
349     } data [] =
350   {   {0, 4, 4},
351       {1, 4, 4},
352       {0, 4, 1},
353       {0, 4, 2},
354       {0, 3, 3},
355       {1, 3, 3},
356       {0, 3, 1},
357       {0, 1, 1},
358       {0, 2, 2},
359       {1, 2, 2},
360       {0, 2, 1},
361   };
362 
363   /* Rt */
364   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
365   /* opcode */
366   value = extract_field (FLD_opcode, code, 0);
367   if (expected_num != data[value].num_elements || data[value].is_reserved)
368     return 0;
369   info->reglist.num_regs = data[value].num_regs;
370 
371   return 1;
372 }
373 
374 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
375    lanes instructions.  */
376 int
377 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
378 			    aarch64_opnd_info *info, const aarch64_insn code,
379 			    const aarch64_inst *inst)
380 {
381   aarch64_insn value;
382 
383   /* Rt */
384   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
385   /* S */
386   value = extract_field (FLD_S, code, 0);
387 
388   /* Number of registers is equal to the number of elements in
389      each structure to be loaded/stored.  */
390   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
391   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
392 
393   /* Except when it is LD1R.  */
394   if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
395     info->reglist.num_regs = 2;
396 
397   return 1;
398 }
399 
400 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
401    load/store single element instructions.  */
402 int
403 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
404 			   aarch64_opnd_info *info, const aarch64_insn code,
405 			   const aarch64_inst *inst ATTRIBUTE_UNUSED)
406 {
407   aarch64_field field = {0, 0};
408   aarch64_insn QSsize;		/* fields Q:S:size.  */
409   aarch64_insn opcodeh2;	/* opcode<2:1> */
410 
411   /* Rt */
412   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
413 
414   /* Decode the index, opcode<2:1> and size.  */
415   gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
416   opcodeh2 = extract_field_2 (&field, code, 0);
417   QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
418   switch (opcodeh2)
419     {
420     case 0x0:
421       info->qualifier = AARCH64_OPND_QLF_S_B;
422       /* Index encoded in "Q:S:size".  */
423       info->reglist.index = QSsize;
424       break;
425     case 0x1:
426       info->qualifier = AARCH64_OPND_QLF_S_H;
427       /* Index encoded in "Q:S:size<1>".  */
428       info->reglist.index = QSsize >> 1;
429       break;
430     case 0x2:
431       if ((QSsize & 0x1) == 0)
432 	{
433 	  info->qualifier = AARCH64_OPND_QLF_S_S;
434 	  /* Index encoded in "Q:S".  */
435 	  info->reglist.index = QSsize >> 2;
436 	}
437       else
438 	{
439 	  info->qualifier = AARCH64_OPND_QLF_S_D;
440 	  /* Index encoded in "Q".  */
441 	  info->reglist.index = QSsize >> 3;
442 	  if (extract_field (FLD_S, code, 0))
443 	    /* UND */
444 	    return 0;
445 	}
446       break;
447     default:
448       return 0;
449     }
450 
451   info->reglist.has_index = 1;
452   info->reglist.num_regs = 0;
453   /* Number of registers is equal to the number of elements in
454      each structure to be loaded/stored.  */
455   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
456   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
457 
458   return 1;
459 }
460 
461 /* Decode fields immh:immb and/or Q for e.g.
462    SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
463    or SSHR <V><d>, <V><n>, #<shift>.  */
464 
465 int
466 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
467 			       aarch64_opnd_info *info, const aarch64_insn code,
468 			       const aarch64_inst *inst)
469 {
470   int pos;
471   aarch64_insn Q, imm, immh;
472   enum aarch64_insn_class iclass = inst->opcode->iclass;
473 
474   immh = extract_field (FLD_immh, code, 0);
475   if (immh == 0)
476     return 0;
477   imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
478   pos = 4;
479   /* Get highest set bit in immh.  */
480   while (--pos >= 0 && (immh & 0x8) == 0)
481     immh <<= 1;
482 
483   assert ((iclass == asimdshf || iclass == asisdshf)
484 	  && (info->type == AARCH64_OPND_IMM_VLSR
485 	      || info->type == AARCH64_OPND_IMM_VLSL));
486 
487   if (iclass == asimdshf)
488     {
489       Q = extract_field (FLD_Q, code, 0);
490       /* immh	Q	<T>
491 	 0000	x	SEE AdvSIMD modified immediate
492 	 0001	0	8B
493 	 0001	1	16B
494 	 001x	0	4H
495 	 001x	1	8H
496 	 01xx	0	2S
497 	 01xx	1	4S
498 	 1xxx	0	RESERVED
499 	 1xxx	1	2D  */
500       info->qualifier =
501 	get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
502     }
503   else
504     info->qualifier = get_sreg_qualifier_from_value (pos);
505 
506   if (info->type == AARCH64_OPND_IMM_VLSR)
507     /* immh	<shift>
508        0000	SEE AdvSIMD modified immediate
509        0001	(16-UInt(immh:immb))
510        001x	(32-UInt(immh:immb))
511        01xx	(64-UInt(immh:immb))
512        1xxx	(128-UInt(immh:immb))  */
513     info->imm.value = (16 << pos) - imm;
514   else
515     /* immh:immb
516        immh	<shift>
517        0000	SEE AdvSIMD modified immediate
518        0001	(UInt(immh:immb)-8)
519        001x	(UInt(immh:immb)-16)
520        01xx	(UInt(immh:immb)-32)
521        1xxx	(UInt(immh:immb)-64)  */
522     info->imm.value = imm - (8 << pos);
523 
524   return 1;
525 }
526 
527 /* Decode shift immediate for e.g. sshr (imm).  */
528 int
529 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
530 		      aarch64_opnd_info *info, const aarch64_insn code,
531 		      const aarch64_inst *inst ATTRIBUTE_UNUSED)
532 {
533   int64_t imm;
534   aarch64_insn val;
535   val = extract_field (FLD_size, code, 0);
536   switch (val)
537     {
538     case 0: imm = 8; break;
539     case 1: imm = 16; break;
540     case 2: imm = 32; break;
541     default: return 0;
542     }
543   info->imm.value = imm;
544   return 1;
545 }
546 
547 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
548    value in the field(s) will be extracted as unsigned immediate value.  */
549 int
550 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
551 		 const aarch64_insn code,
552 		 const aarch64_inst *inst ATTRIBUTE_UNUSED)
553 {
554   int64_t imm;
555   /* Maximum of two fields to extract.  */
556   assert (self->fields[2] == FLD_NIL);
557 
558   if (self->fields[1] == FLD_NIL)
559     imm = extract_field (self->fields[0], code, 0);
560   else
561     /* e.g. TBZ b5:b40.  */
562     imm = extract_fields (code, 0, 2, self->fields[0], self->fields[1]);
563 
564   if (info->type == AARCH64_OPND_FPIMM)
565     info->imm.is_fp = 1;
566 
567   if (operand_need_sign_extension (self))
568     imm = sign_extend (imm, get_operand_fields_width (self) - 1);
569 
570   if (operand_need_shift_by_two (self))
571     imm <<= 2;
572 
573   if (info->type == AARCH64_OPND_ADDR_ADRP)
574     imm <<= 12;
575 
576   info->imm.value = imm;
577   return 1;
578 }
579 
580 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
581 int
582 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
583 		      const aarch64_insn code,
584 		      const aarch64_inst *inst ATTRIBUTE_UNUSED)
585 {
586   aarch64_ext_imm (self, info, code, inst);
587   info->shifter.kind = AARCH64_MOD_LSL;
588   info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
589   return 1;
590 }
591 
592 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
593      MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
594 int
595 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
596 				  aarch64_opnd_info *info,
597 				  const aarch64_insn code,
598 				  const aarch64_inst *inst ATTRIBUTE_UNUSED)
599 {
600   uint64_t imm;
601   enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
602   aarch64_field field = {0, 0};
603 
604   assert (info->idx == 1);
605 
606   if (info->type == AARCH64_OPND_SIMD_FPIMM)
607     info->imm.is_fp = 1;
608 
609   /* a:b:c:d:e:f:g:h */
610   imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
611   if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
612     {
613       /* Either MOVI <Dd>, #<imm>
614 	 or     MOVI <Vd>.2D, #<imm>.
615 	 <imm> is a 64-bit immediate
616 	 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
617 	 encoded in "a:b:c:d:e:f:g:h".	*/
618       int i;
619       unsigned abcdefgh = imm;
620       for (imm = 0ull, i = 0; i < 8; i++)
621 	if (((abcdefgh >> i) & 0x1) != 0)
622 	  imm |= 0xffull << (8 * i);
623     }
624   info->imm.value = imm;
625 
626   /* cmode */
627   info->qualifier = get_expected_qualifier (inst, info->idx);
628   switch (info->qualifier)
629     {
630     case AARCH64_OPND_QLF_NIL:
631       /* no shift */
632       info->shifter.kind = AARCH64_MOD_NONE;
633       return 1;
634     case AARCH64_OPND_QLF_LSL:
635       /* shift zeros */
636       info->shifter.kind = AARCH64_MOD_LSL;
637       switch (aarch64_get_qualifier_esize (opnd0_qualifier))
638 	{
639 	case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break;	/* per word */
640 	case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break;	/* per half */
641 	default: assert (0); return 0;
642 	}
643       /* 00: 0; 01: 8; 10:16; 11:24.  */
644       info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
645       break;
646     case AARCH64_OPND_QLF_MSL:
647       /* shift ones */
648       info->shifter.kind = AARCH64_MOD_MSL;
649       gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
650       info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
651       break;
652     default:
653       assert (0);
654       return 0;
655     }
656 
657   return 1;
658 }
659 
660 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>.  */
661 int
662 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
663 		   aarch64_opnd_info *info, const aarch64_insn code,
664 		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
665 {
666   info->imm.value = 64- extract_field (FLD_scale, code, 0);
667   return 1;
668 }
669 
670 /* Decode arithmetic immediate for e.g.
671      SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
672 int
673 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
674 		  aarch64_opnd_info *info, const aarch64_insn code,
675 		  const aarch64_inst *inst ATTRIBUTE_UNUSED)
676 {
677   aarch64_insn value;
678 
679   info->shifter.kind = AARCH64_MOD_LSL;
680   /* shift */
681   value = extract_field (FLD_shift, code, 0);
682   if (value >= 2)
683     return 0;
684   info->shifter.amount = value ? 12 : 0;
685   /* imm12 (unsigned) */
686   info->imm.value = extract_field (FLD_imm12, code, 0);
687 
688   return 1;
689 }
690 
691 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>.  */
692 
693 int
694 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
695 		  aarch64_opnd_info *info, const aarch64_insn code,
696 		  const aarch64_inst *inst ATTRIBUTE_UNUSED)
697 {
698   uint64_t imm, mask;
699   uint32_t sf;
700   uint32_t N, R, S;
701   unsigned simd_size;
702   aarch64_insn value;
703 
704   value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
705   assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
706 	  || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
707   sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
708 
709   /* value is N:immr:imms.  */
710   S = value & 0x3f;
711   R = (value >> 6) & 0x3f;
712   N = (value >> 12) & 0x1;
713 
714   if (sf == 0 && N == 1)
715     return 0;
716 
717   /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
718      (in other words, right rotated by R), then replicated. */
719   if (N != 0)
720     {
721       simd_size = 64;
722       mask = 0xffffffffffffffffull;
723     }
724   else
725     {
726       switch (S)
727 	{
728 	case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32;           break;
729 	case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
730 	case 0x30 ... 0x37: /* 110xxx */ simd_size =  8; S &= 0x7; break;
731 	case 0x38 ... 0x3b: /* 1110xx */ simd_size =  4; S &= 0x3; break;
732 	case 0x3c ... 0x3d: /* 11110x */ simd_size =  2; S &= 0x1; break;
733 	default: return 0;
734 	}
735       mask = (1ull << simd_size) - 1;
736       /* Top bits are IGNORED.  */
737       R &= simd_size - 1;
738     }
739   /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected.  */
740   if (S == simd_size - 1)
741     return 0;
742   /* S+1 consecutive bits to 1.  */
743   /* NOTE: S can't be 63 due to detection above.  */
744   imm = (1ull << (S + 1)) - 1;
745   /* Rotate to the left by simd_size - R.  */
746   if (R != 0)
747     imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
748   /* Replicate the value according to SIMD size.  */
749   switch (simd_size)
750     {
751     case  2: imm = (imm <<  2) | imm;
752     case  4: imm = (imm <<  4) | imm;
753     case  8: imm = (imm <<  8) | imm;
754     case 16: imm = (imm << 16) | imm;
755     case 32: imm = (imm << 32) | imm;
756     case 64: break;
757     default: assert (0); return 0;
758     }
759 
760   info->imm.value = sf ? imm : imm & 0xffffffff;
761 
762   return 1;
763 }
764 
765 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
766    or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
767 int
768 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
769 		aarch64_opnd_info *info,
770 		const aarch64_insn code, const aarch64_inst *inst)
771 {
772   aarch64_insn value;
773 
774   /* Rt */
775   info->reg.regno = extract_field (FLD_Rt, code, 0);
776 
777   /* size */
778   value = extract_field (FLD_ldst_size, code, 0);
779   if (inst->opcode->iclass == ldstpair_indexed
780       || inst->opcode->iclass == ldstnapair_offs
781       || inst->opcode->iclass == ldstpair_off
782       || inst->opcode->iclass == loadlit)
783     {
784       enum aarch64_opnd_qualifier qualifier;
785       switch (value)
786 	{
787 	case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
788 	case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
789 	case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
790 	default: return 0;
791 	}
792       info->qualifier = qualifier;
793     }
794   else
795     {
796       /* opc1:size */
797       value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
798       if (value > 0x4)
799 	return 0;
800       info->qualifier = get_sreg_qualifier_from_value (value);
801     }
802 
803   return 1;
804 }
805 
806 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
807 int
808 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
809 			 aarch64_opnd_info *info,
810 			 aarch64_insn code,
811 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
812 {
813   /* Rn */
814   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
815   return 1;
816 }
817 
818 /* Decode the address operand for e.g.
819      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
820 int
821 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
822 			 aarch64_opnd_info *info,
823 			 aarch64_insn code, const aarch64_inst *inst)
824 {
825   aarch64_insn S, value;
826 
827   /* Rn */
828   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
829   /* Rm */
830   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
831   /* option */
832   value = extract_field (FLD_option, code, 0);
833   info->shifter.kind =
834     aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
835   /* Fix-up the shifter kind; although the table-driven approach is
836      efficient, it is slightly inflexible, thus needing this fix-up.  */
837   if (info->shifter.kind == AARCH64_MOD_UXTX)
838     info->shifter.kind = AARCH64_MOD_LSL;
839   /* S */
840   S = extract_field (FLD_S, code, 0);
841   if (S == 0)
842     {
843       info->shifter.amount = 0;
844       info->shifter.amount_present = 0;
845     }
846   else
847     {
848       int size;
849       /* Need information in other operand(s) to help achieve the decoding
850 	 from 'S' field.  */
851       info->qualifier = get_expected_qualifier (inst, info->idx);
852       /* Get the size of the data element that is accessed, which may be
853 	 different from that of the source register size, e.g. in strb/ldrb.  */
854       size = aarch64_get_qualifier_esize (info->qualifier);
855       info->shifter.amount = get_logsz (size);
856       info->shifter.amount_present = 1;
857     }
858 
859   return 1;
860 }
861 
862 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>.  */
863 int
864 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
865 		       aarch64_insn code, const aarch64_inst *inst)
866 {
867   aarch64_insn imm;
868   info->qualifier = get_expected_qualifier (inst, info->idx);
869 
870   /* Rn */
871   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
872   /* simm (imm9 or imm7)  */
873   imm = extract_field (self->fields[0], code, 0);
874   info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
875   if (self->fields[0] == FLD_imm7)
876     /* scaled immediate in ld/st pair instructions.  */
877     info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
878   /* qualifier */
879   if (inst->opcode->iclass == ldst_unscaled
880       || inst->opcode->iclass == ldstnapair_offs
881       || inst->opcode->iclass == ldstpair_off
882       || inst->opcode->iclass == ldst_unpriv)
883     info->addr.writeback = 0;
884   else
885     {
886       /* pre/post- index */
887       info->addr.writeback = 1;
888       if (extract_field (self->fields[1], code, 0) == 1)
889 	info->addr.preind = 1;
890       else
891 	info->addr.postind = 1;
892     }
893 
894   return 1;
895 }
896 
897 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}].  */
898 int
899 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
900 			 aarch64_insn code,
901 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
902 {
903   int shift;
904   info->qualifier = get_expected_qualifier (inst, info->idx);
905   shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
906   /* Rn */
907   info->addr.base_regno = extract_field (self->fields[0], code, 0);
908   /* uimm12 */
909   info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
910   return 1;
911 }
912 
913 /* Decode the address operand for e.g.
914      LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
915 int
916 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
917 			    aarch64_opnd_info *info,
918 			    aarch64_insn code, const aarch64_inst *inst)
919 {
920   /* The opcode dependent area stores the number of elements in
921      each structure to be loaded/stored.  */
922   int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
923 
924   /* Rn */
925   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
926   /* Rm | #<amount>  */
927   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
928   if (info->addr.offset.regno == 31)
929     {
930       if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
931 	/* Special handling of loading single structure to all lane.  */
932 	info->addr.offset.imm = (is_ld1r ? 1
933 				 : inst->operands[0].reglist.num_regs)
934 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
935       else
936 	info->addr.offset.imm = inst->operands[0].reglist.num_regs
937 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
938 	  * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
939     }
940   else
941     info->addr.offset.is_reg = 1;
942   info->addr.writeback = 1;
943 
944   return 1;
945 }
946 
947 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
948 int
949 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
950 		  aarch64_opnd_info *info,
951 		  aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
952 {
953   aarch64_insn value;
954   /* cond */
955   value = extract_field (FLD_cond, code, 0);
956   info->cond = get_cond_from_value (value);
957   return 1;
958 }
959 
960 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
961 int
962 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
963 		    aarch64_opnd_info *info,
964 		    aarch64_insn code,
965 		    const aarch64_inst *inst ATTRIBUTE_UNUSED)
966 {
967   /* op0:op1:CRn:CRm:op2 */
968   info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
969 				 FLD_CRm, FLD_op2);
970   return 1;
971 }
972 
973 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
974 int
975 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
976 			 aarch64_opnd_info *info, aarch64_insn code,
977 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
978 {
979   int i;
980   /* op1:op2 */
981   info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
982   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
983     if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
984       return 1;
985   /* Reserved value in <pstatefield>.  */
986   return 0;
987 }
988 
989 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
990 int
991 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
992 		       aarch64_opnd_info *info,
993 		       aarch64_insn code,
994 		       const aarch64_inst *inst ATTRIBUTE_UNUSED)
995 {
996   int i;
997   aarch64_insn value;
998   const aarch64_sys_ins_reg *sysins_ops;
999   /* op0:op1:CRn:CRm:op2 */
1000   value = extract_fields (code, 0, 5,
1001 			  FLD_op0, FLD_op1, FLD_CRn,
1002 			  FLD_CRm, FLD_op2);
1003 
1004   switch (info->type)
1005     {
1006     case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1007     case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1008     case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1009     case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1010     default: assert (0); return 0;
1011     }
1012 
1013   for (i = 0; sysins_ops[i].template != NULL; ++i)
1014     if (sysins_ops[i].value == value)
1015       {
1016 	info->sysins_op = sysins_ops + i;
1017 	DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1018 		     info->sysins_op->template,
1019 		     (unsigned)info->sysins_op->value,
1020 		     info->sysins_op->has_xt, i);
1021 	return 1;
1022       }
1023 
1024   return 0;
1025 }
1026 
1027 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
1028 
1029 int
1030 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1031 		     aarch64_opnd_info *info,
1032 		     aarch64_insn code,
1033 		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
1034 {
1035   /* CRm */
1036   info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1037   return 1;
1038 }
1039 
1040 /* Decode the prefetch operation option operand for e.g.
1041      PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
1042 
1043 int
1044 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1045 		   aarch64_opnd_info *info,
1046 		   aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1047 {
1048   /* prfop in Rt */
1049   info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1050   return 1;
1051 }
1052 
1053 /* Decode the extended register operand for e.g.
1054      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1055 int
1056 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1057 			  aarch64_opnd_info *info,
1058 			  aarch64_insn code,
1059 			  const aarch64_inst *inst ATTRIBUTE_UNUSED)
1060 {
1061   aarch64_insn value;
1062 
1063   /* Rm */
1064   info->reg.regno = extract_field (FLD_Rm, code, 0);
1065   /* option */
1066   value = extract_field (FLD_option, code, 0);
1067   info->shifter.kind =
1068     aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1069   /* imm3 */
1070   info->shifter.amount = extract_field (FLD_imm3, code,  0);
1071 
1072   /* This makes the constraint checking happy.  */
1073   info->shifter.operator_present = 1;
1074 
1075   /* Assume inst->operands[0].qualifier has been resolved.  */
1076   assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1077   info->qualifier = AARCH64_OPND_QLF_W;
1078   if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1079       && (info->shifter.kind == AARCH64_MOD_UXTX
1080 	  || info->shifter.kind == AARCH64_MOD_SXTX))
1081     info->qualifier = AARCH64_OPND_QLF_X;
1082 
1083   return 1;
1084 }
1085 
1086 /* Decode the shifted register operand for e.g.
1087      SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
1088 int
1089 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1090 			 aarch64_opnd_info *info,
1091 			 aarch64_insn code,
1092 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1093 {
1094   aarch64_insn value;
1095 
1096   /* Rm */
1097   info->reg.regno = extract_field (FLD_Rm, code, 0);
1098   /* shift */
1099   value = extract_field (FLD_shift, code, 0);
1100   info->shifter.kind =
1101     aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1102   if (info->shifter.kind == AARCH64_MOD_ROR
1103       && inst->opcode->iclass != log_shift)
1104     /* ROR is not available for the shifted register operand in arithmetic
1105        instructions.  */
1106     return 0;
1107   /* imm6 */
1108   info->shifter.amount = extract_field (FLD_imm6, code,  0);
1109 
1110   /* This makes the constraint checking happy.  */
1111   info->shifter.operator_present = 1;
1112 
1113   return 1;
1114 }
1115 
1116 /* Bitfields that are commonly used to encode certain operands' information
1117    may be partially used as part of the base opcode in some instructions.
1118    For example, the bit 1 of the field 'size' in
1119      FCVTXN <Vb><d>, <Va><n>
1120    is actually part of the base opcode, while only size<0> is available
1121    for encoding the register type.  Another example is the AdvSIMD
1122    instruction ORR (register), in which the field 'size' is also used for
1123    the base opcode, leaving only the field 'Q' available to encode the
1124    vector register arrangement specifier '8B' or '16B'.
1125 
1126    This function tries to deduce the qualifier from the value of partially
1127    constrained field(s).  Given the VALUE of such a field or fields, the
1128    qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1129    operand encoding), the function returns the matching qualifier or
1130    AARCH64_OPND_QLF_NIL if nothing matches.
1131 
1132    N.B. CANDIDATES is a group of possible qualifiers that are valid for
1133    one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1134    may end with AARCH64_OPND_QLF_NIL.  */
1135 
1136 static enum aarch64_opnd_qualifier
1137 get_qualifier_from_partial_encoding (aarch64_insn value,
1138 				     const enum aarch64_opnd_qualifier* \
1139 				     candidates,
1140 				     aarch64_insn mask)
1141 {
1142   int i;
1143   DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1144   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1145     {
1146       aarch64_insn standard_value;
1147       if (candidates[i] == AARCH64_OPND_QLF_NIL)
1148 	break;
1149       standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1150       if ((standard_value & mask) == (value & mask))
1151 	return candidates[i];
1152     }
1153   return AARCH64_OPND_QLF_NIL;
1154 }
1155 
1156 /* Given a list of qualifier sequences, return all possible valid qualifiers
1157    for operand IDX in QUALIFIERS.
1158    Assume QUALIFIERS is an array whose length is large enough.  */
1159 
1160 static void
1161 get_operand_possible_qualifiers (int idx,
1162 				 const aarch64_opnd_qualifier_seq_t *list,
1163 				 enum aarch64_opnd_qualifier *qualifiers)
1164 {
1165   int i;
1166   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1167     if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1168       break;
1169 }
1170 
1171 /* Decode the size Q field for e.g. SHADD.
1172    We tag one operand with the qualifer according to the code;
1173    whether the qualifier is valid for this opcode or not, it is the
1174    duty of the semantic checking.  */
1175 
1176 static int
1177 decode_sizeq (aarch64_inst *inst)
1178 {
1179   int idx;
1180   enum aarch64_opnd_qualifier qualifier;
1181   aarch64_insn code;
1182   aarch64_insn value, mask;
1183   enum aarch64_field_kind fld_sz;
1184   enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1185 
1186   if (inst->opcode->iclass == asisdlse
1187      || inst->opcode->iclass == asisdlsep
1188      || inst->opcode->iclass == asisdlso
1189      || inst->opcode->iclass == asisdlsop)
1190     fld_sz = FLD_vldst_size;
1191   else
1192     fld_sz = FLD_size;
1193 
1194   code = inst->value;
1195   value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1196   /* Obtain the info that which bits of fields Q and size are actually
1197      available for operand encoding.  Opcodes like FMAXNM and FMLA have
1198      size[1] unavailable.  */
1199   mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1200 
1201   /* The index of the operand we are going to tag a qualifier and the qualifer
1202      itself are reasoned from the value of the size and Q fields and the
1203      possible valid qualifier lists.  */
1204   idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1205   DEBUG_TRACE ("key idx: %d", idx);
1206 
1207   /* For most related instruciton, size:Q are fully available for operand
1208      encoding.  */
1209   if (mask == 0x7)
1210     {
1211       inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1212       return 1;
1213     }
1214 
1215   get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1216 				   candidates);
1217 #ifdef DEBUG_AARCH64
1218   if (debug_dump)
1219     {
1220       int i;
1221       for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1222 	   && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1223 	DEBUG_TRACE ("qualifier %d: %s", i,
1224 		     aarch64_get_qualifier_name(candidates[i]));
1225       DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1226     }
1227 #endif /* DEBUG_AARCH64 */
1228 
1229   qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1230 
1231   if (qualifier == AARCH64_OPND_QLF_NIL)
1232     return 0;
1233 
1234   inst->operands[idx].qualifier = qualifier;
1235   return 1;
1236 }
1237 
1238 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1239      e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1240 
1241 static int
1242 decode_asimd_fcvt (aarch64_inst *inst)
1243 {
1244   aarch64_field field = {0, 0};
1245   aarch64_insn value;
1246   enum aarch64_opnd_qualifier qualifier;
1247 
1248   gen_sub_field (FLD_size, 0, 1, &field);
1249   value = extract_field_2 (&field, inst->value, 0);
1250   qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1251     : AARCH64_OPND_QLF_V_2D;
1252   switch (inst->opcode->op)
1253     {
1254     case OP_FCVTN:
1255     case OP_FCVTN2:
1256       /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1257       inst->operands[1].qualifier = qualifier;
1258       break;
1259     case OP_FCVTL:
1260     case OP_FCVTL2:
1261       /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
1262       inst->operands[0].qualifier = qualifier;
1263       break;
1264     default:
1265       assert (0);
1266       return 0;
1267     }
1268 
1269   return 1;
1270 }
1271 
1272 /* Decode size[0], i.e. bit 22, for
1273      e.g. FCVTXN <Vb><d>, <Va><n>.  */
1274 
1275 static int
1276 decode_asisd_fcvtxn (aarch64_inst *inst)
1277 {
1278   aarch64_field field = {0, 0};
1279   gen_sub_field (FLD_size, 0, 1, &field);
1280   if (!extract_field_2 (&field, inst->value, 0))
1281     return 0;
1282   inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1283   return 1;
1284 }
1285 
1286 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
1287 static int
1288 decode_fcvt (aarch64_inst *inst)
1289 {
1290   enum aarch64_opnd_qualifier qualifier;
1291   aarch64_insn value;
1292   const aarch64_field field = {15, 2};
1293 
1294   /* opc dstsize */
1295   value = extract_field_2 (&field, inst->value, 0);
1296   switch (value)
1297     {
1298     case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1299     case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1300     case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1301     default: return 0;
1302     }
1303   inst->operands[0].qualifier = qualifier;
1304 
1305   return 1;
1306 }
1307 
1308 /* Do miscellaneous decodings that are not common enough to be driven by
1309    flags.  */
1310 
1311 static int
1312 do_misc_decoding (aarch64_inst *inst)
1313 {
1314   switch (inst->opcode->op)
1315     {
1316     case OP_FCVT:
1317       return decode_fcvt (inst);
1318     case OP_FCVTN:
1319     case OP_FCVTN2:
1320     case OP_FCVTL:
1321     case OP_FCVTL2:
1322       return decode_asimd_fcvt (inst);
1323     case OP_FCVTXN_S:
1324       return decode_asisd_fcvtxn (inst);
1325     default:
1326       return 0;
1327     }
1328 }
1329 
1330 /* Opcodes that have fields shared by multiple operands are usually flagged
1331    with flags.  In this function, we detect such flags, decode the related
1332    field(s) and store the information in one of the related operands.  The
1333    'one' operand is not any operand but one of the operands that can
1334    accommadate all the information that has been decoded.  */
1335 
1336 static int
1337 do_special_decoding (aarch64_inst *inst)
1338 {
1339   int idx;
1340   aarch64_insn value;
1341   /* Condition for truly conditional executed instructions, e.g. b.cond.  */
1342   if (inst->opcode->flags & F_COND)
1343     {
1344       value = extract_field (FLD_cond2, inst->value, 0);
1345       inst->cond = get_cond_from_value (value);
1346     }
1347   /* 'sf' field.  */
1348   if (inst->opcode->flags & F_SF)
1349     {
1350       idx = select_operand_for_sf_field_coding (inst->opcode);
1351       value = extract_field (FLD_sf, inst->value, 0);
1352       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1353       if ((inst->opcode->flags & F_N)
1354 	  && extract_field (FLD_N, inst->value, 0) != value)
1355 	return 0;
1356     }
1357   /* size:Q fields.  */
1358   if (inst->opcode->flags & F_SIZEQ)
1359     return decode_sizeq (inst);
1360 
1361   if (inst->opcode->flags & F_FPTYPE)
1362     {
1363       idx = select_operand_for_fptype_field_coding (inst->opcode);
1364       value = extract_field (FLD_type, inst->value, 0);
1365       switch (value)
1366 	{
1367 	case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1368 	case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1369 	case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1370 	default: return 0;
1371 	}
1372     }
1373 
1374   if (inst->opcode->flags & F_SSIZE)
1375     {
1376       /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1377 	 of the base opcode.  */
1378       aarch64_insn mask;
1379       enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1380       idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1381       value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1382       mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1383       /* For most related instruciton, the 'size' field is fully available for
1384 	 operand encoding.  */
1385       if (mask == 0x3)
1386 	inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1387       else
1388 	{
1389 	  get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1390 					   candidates);
1391 	  inst->operands[idx].qualifier
1392 	    = get_qualifier_from_partial_encoding (value, candidates, mask);
1393 	}
1394     }
1395 
1396   if (inst->opcode->flags & F_T)
1397     {
1398       /* Num of consecutive '0's on the right side of imm5<3:0>.  */
1399       int num = 0;
1400       unsigned val, Q;
1401       assert (aarch64_get_operand_class (inst->opcode->operands[0])
1402 	      == AARCH64_OPND_CLASS_SIMD_REG);
1403       /* imm5<3:0>	q	<t>
1404 	 0000		x	reserved
1405 	 xxx1		0	8b
1406 	 xxx1		1	16b
1407 	 xx10		0	4h
1408 	 xx10		1	8h
1409 	 x100		0	2s
1410 	 x100		1	4s
1411 	 1000		0	reserved
1412 	 1000		1	2d  */
1413       val = extract_field (FLD_imm5, inst->value, 0);
1414       while ((val & 0x1) == 0 && ++num <= 3)
1415 	val >>= 1;
1416       if (num > 3)
1417 	return 0;
1418       Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1419       inst->operands[0].qualifier =
1420 	get_vreg_qualifier_from_value ((num << 1) | Q);
1421     }
1422 
1423   if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1424     {
1425       /* Use Rt to encode in the case of e.g.
1426 	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
1427       idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1428       if (idx == -1)
1429 	{
1430 	  /* Otherwise use the result operand, which has to be a integer
1431 	     register.  */
1432 	  assert (aarch64_get_operand_class (inst->opcode->operands[0])
1433 		  == AARCH64_OPND_CLASS_INT_REG);
1434 	  idx = 0;
1435 	}
1436       assert (idx == 0 || idx == 1);
1437       value = extract_field (FLD_Q, inst->value, 0);
1438       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1439     }
1440 
1441   if (inst->opcode->flags & F_LDS_SIZE)
1442     {
1443       aarch64_field field = {0, 0};
1444       assert (aarch64_get_operand_class (inst->opcode->operands[0])
1445 	      == AARCH64_OPND_CLASS_INT_REG);
1446       gen_sub_field (FLD_opc, 0, 1, &field);
1447       value = extract_field_2 (&field, inst->value, 0);
1448       inst->operands[0].qualifier
1449 	= value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1450     }
1451 
1452   /* Miscellaneous decoding; done as the last step.  */
1453   if (inst->opcode->flags & F_MISC)
1454     return do_misc_decoding (inst);
1455 
1456   return 1;
1457 }
1458 
1459 /* Converters converting a real opcode instruction to its alias form.  */
1460 
1461 /* ROR <Wd>, <Ws>, #<shift>
1462      is equivalent to:
1463    EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
1464 static int
1465 convert_extr_to_ror (aarch64_inst *inst)
1466 {
1467   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1468     {
1469       copy_operand_info (inst, 2, 3);
1470       inst->operands[3].type = AARCH64_OPND_NIL;
1471       return 1;
1472     }
1473   return 0;
1474 }
1475 
1476 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1477      is equivalent to:
1478    USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
1479 static int
1480 convert_shll_to_xtl (aarch64_inst *inst)
1481 {
1482   if (inst->operands[2].imm.value == 0)
1483     {
1484       inst->operands[2].type = AARCH64_OPND_NIL;
1485       return 1;
1486     }
1487   return 0;
1488 }
1489 
1490 /* Convert
1491      UBFM <Xd>, <Xn>, #<shift>, #63.
1492    to
1493      LSR <Xd>, <Xn>, #<shift>.  */
1494 static int
1495 convert_bfm_to_sr (aarch64_inst *inst)
1496 {
1497   int64_t imms, val;
1498 
1499   imms = inst->operands[3].imm.value;
1500   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1501   if (imms == val)
1502     {
1503       inst->operands[3].type = AARCH64_OPND_NIL;
1504       return 1;
1505     }
1506 
1507   return 0;
1508 }
1509 
1510 /* Convert MOV to ORR.  */
1511 static int
1512 convert_orr_to_mov (aarch64_inst *inst)
1513 {
1514   /* MOV <Vd>.<T>, <Vn>.<T>
1515      is equivalent to:
1516      ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
1517   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1518     {
1519       inst->operands[2].type = AARCH64_OPND_NIL;
1520       return 1;
1521     }
1522   return 0;
1523 }
1524 
1525 /* When <imms> >= <immr>, the instruction written:
1526      SBFX <Xd>, <Xn>, #<lsb>, #<width>
1527    is equivalent to:
1528      SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
1529 
1530 static int
1531 convert_bfm_to_bfx (aarch64_inst *inst)
1532 {
1533   int64_t immr, imms;
1534 
1535   immr = inst->operands[2].imm.value;
1536   imms = inst->operands[3].imm.value;
1537   if (imms >= immr)
1538     {
1539       int64_t lsb = immr;
1540       inst->operands[2].imm.value = lsb;
1541       inst->operands[3].imm.value = imms + 1 - lsb;
1542       /* The two opcodes have different qualifiers for
1543 	 the immediate operands; reset to help the checking.  */
1544       reset_operand_qualifier (inst, 2);
1545       reset_operand_qualifier (inst, 3);
1546       return 1;
1547     }
1548 
1549   return 0;
1550 }
1551 
1552 /* When <imms> < <immr>, the instruction written:
1553      SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1554    is equivalent to:
1555      SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
1556 
1557 static int
1558 convert_bfm_to_bfi (aarch64_inst *inst)
1559 {
1560   int64_t immr, imms, val;
1561 
1562   immr = inst->operands[2].imm.value;
1563   imms = inst->operands[3].imm.value;
1564   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1565   if (imms < immr)
1566     {
1567       inst->operands[2].imm.value = (val - immr) & (val - 1);
1568       inst->operands[3].imm.value = imms + 1;
1569       /* The two opcodes have different qualifiers for
1570 	 the immediate operands; reset to help the checking.  */
1571       reset_operand_qualifier (inst, 2);
1572       reset_operand_qualifier (inst, 3);
1573       return 1;
1574     }
1575 
1576   return 0;
1577 }
1578 
1579 /* The instruction written:
1580      LSL <Xd>, <Xn>, #<shift>
1581    is equivalent to:
1582      UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
1583 
1584 static int
1585 convert_ubfm_to_lsl (aarch64_inst *inst)
1586 {
1587   int64_t immr = inst->operands[2].imm.value;
1588   int64_t imms = inst->operands[3].imm.value;
1589   int64_t val
1590     = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1591 
1592   if ((immr == 0 && imms == val) || immr == imms + 1)
1593     {
1594       inst->operands[3].type = AARCH64_OPND_NIL;
1595       inst->operands[2].imm.value = val - imms;
1596       return 1;
1597     }
1598 
1599   return 0;
1600 }
1601 
1602 /* CINC <Wd>, <Wn>, <cond>
1603      is equivalent to:
1604    CSINC <Wd>, <Wn>, <Wn>, invert(<cond>).  */
1605 
1606 static int
1607 convert_from_csel (aarch64_inst *inst)
1608 {
1609   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1610     {
1611       copy_operand_info (inst, 2, 3);
1612       inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1613       inst->operands[3].type = AARCH64_OPND_NIL;
1614       return 1;
1615     }
1616   return 0;
1617 }
1618 
1619 /* CSET <Wd>, <cond>
1620      is equivalent to:
1621    CSINC <Wd>, WZR, WZR, invert(<cond>).  */
1622 
1623 static int
1624 convert_csinc_to_cset (aarch64_inst *inst)
1625 {
1626   if (inst->operands[1].reg.regno == 0x1f
1627       && inst->operands[2].reg.regno == 0x1f)
1628     {
1629       copy_operand_info (inst, 1, 3);
1630       inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1631       inst->operands[3].type = AARCH64_OPND_NIL;
1632       inst->operands[2].type = AARCH64_OPND_NIL;
1633       return 1;
1634     }
1635   return 0;
1636 }
1637 
1638 /* MOV <Wd>, #<imm>
1639      is equivalent to:
1640    MOVZ <Wd>, #<imm16>, LSL #<shift>.
1641 
1642    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1643    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1644    or where a MOVN has an immediate that could be encoded by MOVZ, or where
1645    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1646    machine-instruction mnemonic must be used.  */
1647 
1648 static int
1649 convert_movewide_to_mov (aarch64_inst *inst)
1650 {
1651   uint64_t value = inst->operands[1].imm.value;
1652   /* MOVZ/MOVN #0 have a shift amount other than LSL #0.  */
1653   if (value == 0 && inst->operands[1].shifter.amount != 0)
1654     return 0;
1655   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1656   inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1657   value <<= inst->operands[1].shifter.amount;
1658   /* As an alias convertor, it has to be clear that the INST->OPCODE
1659      is the opcode of the real instruction.  */
1660   if (inst->opcode->op == OP_MOVN)
1661     {
1662       int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1663       value = ~value;
1664       /* A MOVN has an immediate that could be encoded by MOVZ.  */
1665       if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1666 	return 0;
1667     }
1668   inst->operands[1].imm.value = value;
1669   inst->operands[1].shifter.amount = 0;
1670   return 1;
1671 }
1672 
1673 /* MOV <Wd>, #<imm>
1674      is equivalent to:
1675    ORR <Wd>, WZR, #<imm>.
1676 
1677    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1678    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1679    or where a MOVN has an immediate that could be encoded by MOVZ, or where
1680    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1681    machine-instruction mnemonic must be used.  */
1682 
1683 static int
1684 convert_movebitmask_to_mov (aarch64_inst *inst)
1685 {
1686   int is32;
1687   uint64_t value;
1688 
1689   /* Should have been assured by the base opcode value.  */
1690   assert (inst->operands[1].reg.regno == 0x1f);
1691   copy_operand_info (inst, 1, 2);
1692   is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1693   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1694   value = inst->operands[1].imm.value;
1695   /* ORR has an immediate that could be generated by a MOVZ or MOVN
1696      instruction.  */
1697   if (inst->operands[0].reg.regno != 0x1f
1698       && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1699 	  || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1700     return 0;
1701 
1702   inst->operands[2].type = AARCH64_OPND_NIL;
1703   return 1;
1704 }
1705 
1706 /* Some alias opcodes are disassembled by being converted from their real-form.
1707    N.B. INST->OPCODE is the real opcode rather than the alias.  */
1708 
1709 static int
1710 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1711 {
1712   switch (alias->op)
1713     {
1714     case OP_ASR_IMM:
1715     case OP_LSR_IMM:
1716       return convert_bfm_to_sr (inst);
1717     case OP_LSL_IMM:
1718       return convert_ubfm_to_lsl (inst);
1719     case OP_CINC:
1720     case OP_CINV:
1721     case OP_CNEG:
1722       return convert_from_csel (inst);
1723     case OP_CSET:
1724     case OP_CSETM:
1725       return convert_csinc_to_cset (inst);
1726     case OP_UBFX:
1727     case OP_BFXIL:
1728     case OP_SBFX:
1729       return convert_bfm_to_bfx (inst);
1730     case OP_SBFIZ:
1731     case OP_BFI:
1732     case OP_UBFIZ:
1733       return convert_bfm_to_bfi (inst);
1734     case OP_MOV_V:
1735       return convert_orr_to_mov (inst);
1736     case OP_MOV_IMM_WIDE:
1737     case OP_MOV_IMM_WIDEN:
1738       return convert_movewide_to_mov (inst);
1739     case OP_MOV_IMM_LOG:
1740       return convert_movebitmask_to_mov (inst);
1741     case OP_ROR_IMM:
1742       return convert_extr_to_ror (inst);
1743     case OP_SXTL:
1744     case OP_SXTL2:
1745     case OP_UXTL:
1746     case OP_UXTL2:
1747       return convert_shll_to_xtl (inst);
1748     default:
1749       return 0;
1750     }
1751 }
1752 
1753 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1754 				  aarch64_inst *, int);
1755 
1756 /* Given the instruction information in *INST, check if the instruction has
1757    any alias form that can be used to represent *INST.  If the answer is yes,
1758    update *INST to be in the form of the determined alias.  */
1759 
1760 /* In the opcode description table, the following flags are used in opcode
1761    entries to help establish the relations between the real and alias opcodes:
1762 
1763 	F_ALIAS:	opcode is an alias
1764 	F_HAS_ALIAS:	opcode has alias(es)
1765 	F_P1
1766 	F_P2
1767 	F_P3:		Disassembly preference priority 1-3 (the larger the
1768 			higher).  If nothing is specified, it is the priority
1769 			0 by default, i.e. the lowest priority.
1770 
1771    Although the relation between the machine and the alias instructions are not
1772    explicitly described, it can be easily determined from the base opcode
1773    values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1774    description entries:
1775 
1776    The mask of an alias opcode must be equal to or a super-set (i.e. more
1777    constrained) of that of the aliased opcode; so is the base opcode value.
1778 
1779    if (opcode_has_alias (real) && alias_opcode_p (opcode)
1780        && (opcode->mask & real->mask) == real->mask
1781        && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1782    then OPCODE is an alias of, and only of, the REAL instruction
1783 
1784    The alias relationship is forced flat-structured to keep related algorithm
1785    simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1786 
1787    During the disassembling, the decoding decision tree (in
1788    opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1789    if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1790    not specified), the disassembler will check whether there is any alias
1791    instruction exists for this real instruction.  If there is, the disassembler
1792    will try to disassemble the 32-bit binary again using the alias's rule, or
1793    try to convert the IR to the form of the alias.  In the case of the multiple
1794    aliases, the aliases are tried one by one from the highest priority
1795    (currently the flag F_P3) to the lowest priority (no priority flag), and the
1796    first succeeds first adopted.
1797 
1798    You may ask why there is a need for the conversion of IR from one form to
1799    another in handling certain aliases.  This is because on one hand it avoids
1800    adding more operand code to handle unusual encoding/decoding; on other
1801    hand, during the disassembling, the conversion is an effective approach to
1802    check the condition of an alias (as an alias may be adopted only if certain
1803    conditions are met).
1804 
1805    In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1806    aarch64_opcode_table and generated aarch64_find_alias_opcode and
1807    aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help.  */
1808 
1809 static void
1810 determine_disassembling_preference (struct aarch64_inst *inst)
1811 {
1812   const aarch64_opcode *opcode;
1813   const aarch64_opcode *alias;
1814 
1815   opcode = inst->opcode;
1816 
1817   /* This opcode does not have an alias, so use itself.  */
1818   if (opcode_has_alias (opcode) == FALSE)
1819     return;
1820 
1821   alias = aarch64_find_alias_opcode (opcode);
1822   assert (alias);
1823 
1824 #ifdef DEBUG_AARCH64
1825   if (debug_dump)
1826     {
1827       const aarch64_opcode *tmp = alias;
1828       printf ("####   LIST    orderd: ");
1829       while (tmp)
1830 	{
1831 	  printf ("%s, ", tmp->name);
1832 	  tmp = aarch64_find_next_alias_opcode (tmp);
1833 	}
1834       printf ("\n");
1835     }
1836 #endif /* DEBUG_AARCH64 */
1837 
1838   for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1839     {
1840       DEBUG_TRACE ("try %s", alias->name);
1841       assert (alias_opcode_p (alias));
1842 
1843       /* An alias can be a pseudo opcode which will never be used in the
1844 	 disassembly, e.g. BIC logical immediate is such a pseudo opcode
1845 	 aliasing AND.  */
1846       if (pseudo_opcode_p (alias))
1847 	{
1848 	  DEBUG_TRACE ("skip pseudo %s", alias->name);
1849 	  continue;
1850 	}
1851 
1852       if ((inst->value & alias->mask) != alias->opcode)
1853 	{
1854 	  DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
1855 	  continue;
1856 	}
1857       /* No need to do any complicated transformation on operands, if the alias
1858 	 opcode does not have any operand.  */
1859       if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
1860 	{
1861 	  DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
1862 	  aarch64_replace_opcode (inst, alias);
1863 	  return;
1864 	}
1865       if (alias->flags & F_CONV)
1866 	{
1867 	  aarch64_inst copy;
1868 	  memcpy (&copy, inst, sizeof (aarch64_inst));
1869 	  /* ALIAS is the preference as long as the instruction can be
1870 	     successfully converted to the form of ALIAS.  */
1871 	  if (convert_to_alias (&copy, alias) == 1)
1872 	    {
1873 	      aarch64_replace_opcode (&copy, alias);
1874 	      assert (aarch64_match_operands_constraint (&copy, NULL));
1875 	      DEBUG_TRACE ("succeed with %s via conversion", alias->name);
1876 	      memcpy (inst, &copy, sizeof (aarch64_inst));
1877 	      return;
1878 	    }
1879 	}
1880       else
1881 	{
1882 	  /* Directly decode the alias opcode.  */
1883 	  aarch64_inst temp;
1884 	  memset (&temp, '\0', sizeof (aarch64_inst));
1885 	  if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
1886 	    {
1887 	      DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
1888 	      memcpy (inst, &temp, sizeof (aarch64_inst));
1889 	      return;
1890 	    }
1891 	}
1892     }
1893 }
1894 
1895 /* Decode the CODE according to OPCODE; fill INST.  Return 0 if the decoding
1896    fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1897    return 1.
1898 
1899    If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1900    determined and used to disassemble CODE; this is done just before the
1901    return.  */
1902 
1903 static int
1904 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
1905 		       aarch64_inst *inst, int noaliases_p)
1906 {
1907   int i;
1908 
1909   DEBUG_TRACE ("enter with %s", opcode->name);
1910 
1911   assert (opcode && inst);
1912 
1913   /* Check the base opcode.  */
1914   if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
1915     {
1916       DEBUG_TRACE ("base opcode match FAIL");
1917       goto decode_fail;
1918     }
1919 
1920   /* Clear inst.  */
1921   memset (inst, '\0', sizeof (aarch64_inst));
1922 
1923   inst->opcode = opcode;
1924   inst->value = code;
1925 
1926   /* Assign operand codes and indexes.  */
1927   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1928     {
1929       if (opcode->operands[i] == AARCH64_OPND_NIL)
1930 	break;
1931       inst->operands[i].type = opcode->operands[i];
1932       inst->operands[i].idx = i;
1933     }
1934 
1935   /* Call the opcode decoder indicated by flags.  */
1936   if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
1937     {
1938       DEBUG_TRACE ("opcode flag-based decoder FAIL");
1939       goto decode_fail;
1940     }
1941 
1942   /* Call operand decoders.  */
1943   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1944     {
1945       const aarch64_operand *opnd;
1946       enum aarch64_opnd type;
1947       type = opcode->operands[i];
1948       if (type == AARCH64_OPND_NIL)
1949 	break;
1950       opnd = &aarch64_operands[type];
1951       if (operand_has_extractor (opnd)
1952 	  && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
1953 	{
1954 	  DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
1955 	  goto decode_fail;
1956 	}
1957     }
1958 
1959   /* Match the qualifiers.  */
1960   if (aarch64_match_operands_constraint (inst, NULL) == 1)
1961     {
1962       /* Arriving here, the CODE has been determined as a valid instruction
1963 	 of OPCODE and *INST has been filled with information of this OPCODE
1964 	 instruction.  Before the return, check if the instruction has any
1965 	 alias and should be disassembled in the form of its alias instead.
1966 	 If the answer is yes, *INST will be updated.  */
1967       if (!noaliases_p)
1968 	determine_disassembling_preference (inst);
1969       DEBUG_TRACE ("SUCCESS");
1970       return 1;
1971     }
1972   else
1973     {
1974       DEBUG_TRACE ("constraint matching FAIL");
1975     }
1976 
1977 decode_fail:
1978   return 0;
1979 }
1980 
1981 /* This does some user-friendly fix-up to *INST.  It is currently focus on
1982    the adjustment of qualifiers to help the printed instruction
1983    recognized/understood more easily.  */
1984 
1985 static void
1986 user_friendly_fixup (aarch64_inst *inst)
1987 {
1988   switch (inst->opcode->iclass)
1989     {
1990     case testbranch:
1991       /* TBNZ Xn|Wn, #uimm6, label
1992 	 Test and Branch Not Zero: conditionally jumps to label if bit number
1993 	 uimm6 in register Xn is not zero.  The bit number implies the width of
1994 	 the register, which may be written and should be disassembled as Wn if
1995 	 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
1996 	 */
1997       if (inst->operands[1].imm.value < 32)
1998 	inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
1999       break;
2000     default: break;
2001     }
2002 }
2003 
2004 /* Decode INSN and fill in *INST the instruction information.  */
2005 
2006 static int
2007 disas_aarch64_insn (uint64_t pc ATTRIBUTE_UNUSED, uint32_t insn,
2008 		    aarch64_inst *inst)
2009 {
2010   const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2011 
2012 #ifdef DEBUG_AARCH64
2013   if (debug_dump)
2014     {
2015       const aarch64_opcode *tmp = opcode;
2016       printf ("\n");
2017       DEBUG_TRACE ("opcode lookup:");
2018       while (tmp != NULL)
2019 	{
2020 	  aarch64_verbose ("  %s", tmp->name);
2021 	  tmp = aarch64_find_next_opcode (tmp);
2022 	}
2023     }
2024 #endif /* DEBUG_AARCH64 */
2025 
2026   /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2027      distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2028      opcode field and value, apart from the difference that one of them has an
2029      extra field as part of the opcode, but such a field is used for operand
2030      encoding in other opcode(s) ('immh' in the case of the example).  */
2031   while (opcode != NULL)
2032     {
2033       /* But only one opcode can be decoded successfully for, as the
2034 	 decoding routine will check the constraint carefully.  */
2035       if (aarch64_opcode_decode (opcode, insn, inst, no_aliases) == 1)
2036 	return ERR_OK;
2037       opcode = aarch64_find_next_opcode (opcode);
2038     }
2039 
2040   return ERR_UND;
2041 }
2042 
2043 /* Print operands.  */
2044 
2045 static void
2046 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2047 		const aarch64_opnd_info *opnds, struct disassemble_info *info)
2048 {
2049   int i, pcrel_p, num_printed;
2050   for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2051     {
2052       const size_t size = 128;
2053       char str[size];
2054       /* We regard the opcode operand info more, however we also look into
2055 	 the inst->operands to support the disassembling of the optional
2056 	 operand.
2057 	 The two operand code should be the same in all cases, apart from
2058 	 when the operand can be optional.  */
2059       if (opcode->operands[i] == AARCH64_OPND_NIL
2060 	  || opnds[i].type == AARCH64_OPND_NIL)
2061 	break;
2062 
2063       /* Generate the operand string in STR.  */
2064       aarch64_print_operand (str, size, pc, opcode, opnds, i, &pcrel_p,
2065 			     &info->target);
2066 
2067       /* Print the delimiter (taking account of omitted operand(s)).  */
2068       if (str[0] != '\0')
2069 	(*info->fprintf_func) (info->stream, "%s",
2070 			       num_printed++ == 0 ? "\t" : ", ");
2071 
2072       /* Print the operand.  */
2073       if (pcrel_p)
2074 	(*info->print_address_func) (info->target, info);
2075       else
2076 	(*info->fprintf_func) (info->stream, "%s", str);
2077     }
2078 }
2079 
2080 /* Print the instruction mnemonic name.  */
2081 
2082 static void
2083 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2084 {
2085   if (inst->opcode->flags & F_COND)
2086     {
2087       /* For instructions that are truly conditionally executed, e.g. b.cond,
2088 	 prepare the full mnemonic name with the corresponding condition
2089 	 suffix.  */
2090       char name[8], *ptr;
2091       size_t len;
2092 
2093       ptr = strchr (inst->opcode->name, '.');
2094       assert (ptr && inst->cond);
2095       len = ptr - inst->opcode->name;
2096       assert (len < 8);
2097       strncpy (name, inst->opcode->name, len);
2098       name [len] = '\0';
2099       (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2100     }
2101   else
2102     (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2103 }
2104 
2105 /* Print the instruction according to *INST.  */
2106 
2107 static void
2108 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2109 		    struct disassemble_info *info)
2110 {
2111   print_mnemonic_name (inst, info);
2112   print_operands (pc, inst->opcode, inst->operands, info);
2113 }
2114 
2115 /* Entry-point of the instruction disassembler and printer.  */
2116 
2117 static void
2118 print_insn_aarch64_word (bfd_vma pc,
2119 			 uint32_t word,
2120 			 struct disassemble_info *info)
2121 {
2122   static const char *err_msg[6] =
2123     {
2124       [ERR_OK]   = "_",
2125       [-ERR_UND] = "undefined",
2126       [-ERR_UNP] = "unpredictable",
2127       [-ERR_NYI] = "NYI"
2128     };
2129 
2130   int ret;
2131   aarch64_inst inst;
2132 
2133   info->insn_info_valid = 1;
2134   info->branch_delay_insns = 0;
2135   info->data_size = 0;
2136   info->target = 0;
2137   info->target2 = 0;
2138 
2139   if (info->flags & INSN_HAS_RELOC)
2140     /* If the instruction has a reloc associated with it, then
2141        the offset field in the instruction will actually be the
2142        addend for the reloc.  (If we are using REL type relocs).
2143        In such cases, we can ignore the pc when computing
2144        addresses, since the addend is not currently pc-relative.  */
2145     pc = 0;
2146 
2147   ret = disas_aarch64_insn (pc, word, &inst);
2148 
2149   if (((word >> 21) & 0x3ff) == 1)
2150     {
2151       /* RESERVED for ALES. */
2152       assert (ret != ERR_OK);
2153       ret = ERR_NYI;
2154     }
2155 
2156   switch (ret)
2157     {
2158     case ERR_UND:
2159     case ERR_UNP:
2160     case ERR_NYI:
2161       /* Handle undefined instructions.  */
2162       info->insn_type = dis_noninsn;
2163       (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2164 			     word, err_msg[-ret]);
2165       break;
2166     case ERR_OK:
2167       user_friendly_fixup (&inst);
2168       print_aarch64_insn (pc, &inst, info);
2169       break;
2170     default:
2171       abort ();
2172     }
2173 }
2174 
2175 /* Disallow mapping symbols ($x, $d etc) from
2176    being displayed in symbol relative addresses.  */
2177 
2178 bfd_boolean
2179 aarch64_symbol_is_valid (asymbol * sym,
2180 			 struct disassemble_info * info ATTRIBUTE_UNUSED)
2181 {
2182   const char * name;
2183 
2184   if (sym == NULL)
2185     return FALSE;
2186 
2187   name = bfd_asymbol_name (sym);
2188 
2189   return name
2190     && (name[0] != '$'
2191 	|| (name[1] != 'x' && name[1] != 'd')
2192 	|| (name[2] != '\0' && name[2] != '.'));
2193 }
2194 
2195 /* Print data bytes on INFO->STREAM.  */
2196 
2197 static void
2198 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2199 		 uint32_t word,
2200 		 struct disassemble_info *info)
2201 {
2202   switch (info->bytes_per_chunk)
2203     {
2204     case 1:
2205       info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2206       break;
2207     case 2:
2208       info->fprintf_func (info->stream, ".short\t0x%04x", word);
2209       break;
2210     case 4:
2211       info->fprintf_func (info->stream, ".word\t0x%08x", word);
2212       break;
2213     default:
2214       abort ();
2215     }
2216 }
2217 
2218 /* Try to infer the code or data type from a symbol.
2219    Returns nonzero if *MAP_TYPE was set.  */
2220 
2221 static int
2222 get_sym_code_type (struct disassemble_info *info, int n,
2223 		   enum map_type *map_type)
2224 {
2225   elf_symbol_type *es;
2226   unsigned int type;
2227   const char *name;
2228 
2229   es = *(elf_symbol_type **)(info->symtab + n);
2230   type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2231 
2232   /* If the symbol has function type then use that.  */
2233   if (type == STT_FUNC)
2234     {
2235       *map_type = MAP_INSN;
2236       return TRUE;
2237     }
2238 
2239   /* Check for mapping symbols.  */
2240   name = bfd_asymbol_name(info->symtab[n]);
2241   if (name[0] == '$'
2242       && (name[1] == 'x' || name[1] == 'd')
2243       && (name[2] == '\0' || name[2] == '.'))
2244     {
2245       *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2246       return TRUE;
2247     }
2248 
2249   return FALSE;
2250 }
2251 
2252 /* Entry-point of the AArch64 disassembler.  */
2253 
2254 int
2255 print_insn_aarch64 (bfd_vma pc,
2256 		    struct disassemble_info *info)
2257 {
2258   bfd_byte	buffer[INSNLEN];
2259   int		status;
2260   void		(*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2261   bfd_boolean   found = FALSE;
2262   unsigned int	size = 4;
2263   unsigned long	data;
2264 
2265   if (info->disassembler_options)
2266     {
2267       set_default_aarch64_dis_options (info);
2268 
2269       parse_aarch64_dis_options (info->disassembler_options);
2270 
2271       /* To avoid repeated parsing of these options, we remove them here.  */
2272       info->disassembler_options = NULL;
2273     }
2274 
2275   /* Aarch64 instructions are always little-endian */
2276   info->endian_code = BFD_ENDIAN_LITTLE;
2277 
2278   /* First check the full symtab for a mapping symbol, even if there
2279      are no usable non-mapping symbols for this address.  */
2280   if (info->symtab_size != 0
2281       && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2282     {
2283       enum map_type type = MAP_INSN;
2284       int last_sym = -1;
2285       bfd_vma addr;
2286       int n;
2287 
2288       if (pc <= last_mapping_addr)
2289 	last_mapping_sym = -1;
2290 
2291       /* Start scanning at the start of the function, or wherever
2292 	 we finished last time.  */
2293       n = info->symtab_pos + 1;
2294       if (n < last_mapping_sym)
2295 	n = last_mapping_sym;
2296 
2297       /* Scan up to the location being disassembled.  */
2298       for (; n < info->symtab_size; n++)
2299 	{
2300 	  addr = bfd_asymbol_value (info->symtab[n]);
2301 	  if (addr > pc)
2302 	    break;
2303 	  if ((info->section == NULL
2304 	       || info->section == info->symtab[n]->section)
2305 	      && get_sym_code_type (info, n, &type))
2306 	    {
2307 	      last_sym = n;
2308 	      found = TRUE;
2309 	    }
2310 	}
2311 
2312       if (!found)
2313 	{
2314 	  n = info->symtab_pos;
2315 	  if (n < last_mapping_sym)
2316 	    n = last_mapping_sym;
2317 
2318 	  /* No mapping symbol found at this address.  Look backwards
2319 	     for a preceeding one.  */
2320 	  for (; n >= 0; n--)
2321 	    {
2322 	      if (get_sym_code_type (info, n, &type))
2323 		{
2324 		  last_sym = n;
2325 		  found = TRUE;
2326 		  break;
2327 		}
2328 	    }
2329 	}
2330 
2331       last_mapping_sym = last_sym;
2332       last_type = type;
2333 
2334       /* Look a little bit ahead to see if we should print out
2335 	 less than four bytes of data.  If there's a symbol,
2336 	 mapping or otherwise, after two bytes then don't
2337 	 print more.  */
2338       if (last_type == MAP_DATA)
2339 	{
2340 	  size = 4 - (pc & 3);
2341 	  for (n = last_sym + 1; n < info->symtab_size; n++)
2342 	    {
2343 	      addr = bfd_asymbol_value (info->symtab[n]);
2344 	      if (addr > pc)
2345 		{
2346 		  if (addr - pc < size)
2347 		    size = addr - pc;
2348 		  break;
2349 		}
2350 	    }
2351 	  /* If the next symbol is after three bytes, we need to
2352 	     print only part of the data, so that we can use either
2353 	     .byte or .short.  */
2354 	  if (size == 3)
2355 	    size = (pc & 1) ? 1 : 2;
2356 	}
2357     }
2358 
2359   if (last_type == MAP_DATA)
2360     {
2361       /* size was set above.  */
2362       info->bytes_per_chunk = size;
2363       info->display_endian = info->endian;
2364       printer = print_insn_data;
2365     }
2366   else
2367     {
2368       info->bytes_per_chunk = size = INSNLEN;
2369       info->display_endian = info->endian_code;
2370       printer = print_insn_aarch64_word;
2371     }
2372 
2373   status = (*info->read_memory_func) (pc, buffer, size, info);
2374   if (status != 0)
2375     {
2376       (*info->memory_error_func) (status, pc, info);
2377       return -1;
2378     }
2379 
2380   data = bfd_get_bits (buffer, size * 8,
2381 		       info->display_endian == BFD_ENDIAN_BIG);
2382 
2383   (*printer) (pc, data, info);
2384 
2385   return size;
2386 }
2387 
2388 void
2389 print_aarch64_disassembler_options (FILE *stream)
2390 {
2391   fprintf (stream, _("\n\
2392 The following AARCH64 specific disassembler options are supported for use\n\
2393 with the -M switch (multiple options should be separated by commas):\n"));
2394 
2395   fprintf (stream, _("\n\
2396   no-aliases         Don't print instruction aliases.\n"));
2397 
2398   fprintf (stream, _("\n\
2399   aliases            Do print instruction aliases.\n"));
2400 
2401 #ifdef DEBUG_AARCH64
2402   fprintf (stream, _("\n\
2403   debug_dump         Temp switch for debug trace.\n"));
2404 #endif /* DEBUG_AARCH64 */
2405 
2406   fprintf (stream, _("\n"));
2407 }
2408