xref: /netbsd-src/external/gpl3/binutils/dist/opcodes/aarch64-dis.c (revision cb63e24e8d6aae7ddac1859a9015f48b1d8bd90e)
1 /* aarch64-dis.c -- AArch64 disassembler.
2    Copyright (C) 2009-2024 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include <stdint.h>
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 #include "safe-ctype.h"
29 #include "obstack.h"
30 
31 #define obstack_chunk_alloc xmalloc
32 #define obstack_chunk_free free
33 
34 #define INSNLEN 4
35 
36 /* This character is used to encode style information within the output
37    buffers.  See get_style_text and print_operands for more details.  */
38 #define STYLE_MARKER_CHAR '\002'
39 
40 /* Cached mapping symbol state.  */
41 enum map_type
42 {
43   MAP_INSN,
44   MAP_DATA
45 };
46 
47 static aarch64_feature_set arch_variant; /* See select_aarch64_variant.  */
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_stop_offset = 0;
51 static bfd_vma last_mapping_addr = 0;
52 
53 /* Other options */
54 static int no_aliases = 0;	/* If set disassemble as most general inst.  */
55 static int no_notes = 1;	/* If set do not print disassemble notes in the
56 				  output as comments.  */
57 
58 /* Currently active instruction sequence.  */
59 static aarch64_instr_sequence insn_sequence;
60 
61 static void
set_default_aarch64_dis_options(struct disassemble_info * info ATTRIBUTE_UNUSED)62 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
63 {
64 }
65 
66 static void
parse_aarch64_dis_option(const char * option,unsigned int len ATTRIBUTE_UNUSED)67 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
68 {
69   /* Try to match options that are simple flags */
70   if (startswith (option, "no-aliases"))
71     {
72       no_aliases = 1;
73       return;
74     }
75 
76   if (startswith (option, "aliases"))
77     {
78       no_aliases = 0;
79       return;
80     }
81 
82   if (startswith (option, "no-notes"))
83     {
84       no_notes = 1;
85       return;
86     }
87 
88   if (startswith (option, "notes"))
89     {
90       no_notes = 0;
91       return;
92     }
93 
94 #ifdef DEBUG_AARCH64
95   if (startswith (option, "debug_dump"))
96     {
97       debug_dump = 1;
98       return;
99     }
100 #endif /* DEBUG_AARCH64 */
101 
102   /* Invalid option.  */
103   opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
104 }
105 
106 static void
parse_aarch64_dis_options(const char * options)107 parse_aarch64_dis_options (const char *options)
108 {
109   const char *option_end;
110 
111   if (options == NULL)
112     return;
113 
114   while (*options != '\0')
115     {
116       /* Skip empty options.  */
117       if (*options == ',')
118 	{
119 	  options++;
120 	  continue;
121 	}
122 
123       /* We know that *options is neither NUL or a comma.  */
124       option_end = options + 1;
125       while (*option_end != ',' && *option_end != '\0')
126 	option_end++;
127 
128       parse_aarch64_dis_option (options, option_end - options);
129 
130       /* Go on to the next one.  If option_end points to a comma, it
131 	 will be skipped above.  */
132       options = option_end;
133     }
134 }
135 
136 /* Functions doing the instruction disassembling.  */
137 
138 /* The unnamed arguments consist of the number of fields and information about
139    these fields where the VALUE will be extracted from CODE and returned.
140    MASK can be zero or the base mask of the opcode.
141 
142    N.B. the fields are required to be in such an order than the most signficant
143    field for VALUE comes the first, e.g. the <index> in
144     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
145    is encoded in H:L:M in some cases, the fields H:L:M should be passed in
146    the order of H, L, M.  */
147 
148 aarch64_insn
extract_fields(aarch64_insn code,aarch64_insn mask,...)149 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
150 {
151   uint32_t num;
152   const aarch64_field *field;
153   enum aarch64_field_kind kind;
154   va_list va;
155 
156   va_start (va, mask);
157   num = va_arg (va, uint32_t);
158   assert (num <= 5);
159   aarch64_insn value = 0x0;
160   while (num--)
161     {
162       kind = va_arg (va, enum aarch64_field_kind);
163       field = &fields[kind];
164       value <<= field->width;
165       value |= extract_field (kind, code, mask);
166     }
167   va_end (va);
168   return value;
169 }
170 
171 /* Extract the value of all fields in SELF->fields after START from
172    instruction CODE.  The least significant bit comes from the final field.  */
173 
174 static aarch64_insn
extract_all_fields_after(const aarch64_operand * self,unsigned int start,aarch64_insn code)175 extract_all_fields_after (const aarch64_operand *self, unsigned int start,
176 			  aarch64_insn code)
177 {
178   aarch64_insn value;
179   unsigned int i;
180   enum aarch64_field_kind kind;
181 
182   value = 0;
183   for (i = start;
184        i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
185     {
186       kind = self->fields[i];
187       value <<= fields[kind].width;
188       value |= extract_field (kind, code, 0);
189     }
190   return value;
191 }
192 
193 /* Extract the value of all fields in SELF->fields from instruction CODE.
194    The least significant bit comes from the final field.  */
195 
196 static aarch64_insn
extract_all_fields(const aarch64_operand * self,aarch64_insn code)197 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
198 {
199   return extract_all_fields_after (self, 0, code);
200 }
201 
202 /* Sign-extend bit I of VALUE.  */
203 static inline uint64_t
sign_extend(aarch64_insn value,unsigned i)204 sign_extend (aarch64_insn value, unsigned i)
205 {
206   uint64_t ret, sign;
207 
208   assert (i < 32);
209   ret = value;
210   sign = (uint64_t) 1 << i;
211   return ((ret & (sign + sign - 1)) ^ sign) - sign;
212 }
213 
214 /* N.B. the following inline helpfer functions create a dependency on the
215    order of operand qualifier enumerators.  */
216 
217 /* Given VALUE, return qualifier for a general purpose register.  */
218 static inline enum aarch64_opnd_qualifier
get_greg_qualifier_from_value(aarch64_insn value)219 get_greg_qualifier_from_value (aarch64_insn value)
220 {
221   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
222   assert (value <= 0x1
223 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
224   return qualifier;
225 }
226 
227 /* Given VALUE, return qualifier for a vector register.  This does not support
228    decoding instructions that accept the 2H vector type.  */
229 
230 static inline enum aarch64_opnd_qualifier
get_vreg_qualifier_from_value(aarch64_insn value)231 get_vreg_qualifier_from_value (aarch64_insn value)
232 {
233   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
234 
235   /* Instructions using vector type 2H should not call this function.  Skip over
236      the 2H qualifier.  */
237   if (qualifier >= AARCH64_OPND_QLF_V_2H)
238     qualifier += 1;
239 
240   assert (value <= 0x8
241 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
242   return qualifier;
243 }
244 
245 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register.  */
246 static inline enum aarch64_opnd_qualifier
get_sreg_qualifier_from_value(aarch64_insn value)247 get_sreg_qualifier_from_value (aarch64_insn value)
248 {
249   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
250 
251   assert (value <= 0x4
252 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
253   return qualifier;
254 }
255 
256 /* Given the instruction in *INST which is probably half way through the
257    decoding and our caller wants to know the expected qualifier for operand
258    I.  Return such a qualifier if we can establish it; otherwise return
259    AARCH64_OPND_QLF_NIL.  */
260 
261 static aarch64_opnd_qualifier_t
get_expected_qualifier(const aarch64_inst * inst,int i)262 get_expected_qualifier (const aarch64_inst *inst, int i)
263 {
264   aarch64_opnd_qualifier_seq_t qualifiers;
265   /* Should not be called if the qualifier is known.  */
266   assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
267   int invalid_count;
268   if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
269 			       i, qualifiers, &invalid_count))
270     return qualifiers[i];
271   else
272     return AARCH64_OPND_QLF_NIL;
273 }
274 
275 /* Operand extractors.  */
276 
277 bool
aarch64_ext_none(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info ATTRIBUTE_UNUSED,const aarch64_insn code ATTRIBUTE_UNUSED,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)278 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
279 		  aarch64_opnd_info *info ATTRIBUTE_UNUSED,
280 		  const aarch64_insn code ATTRIBUTE_UNUSED,
281 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
282 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
283 {
284   return true;
285 }
286 
287 bool
aarch64_ext_regno(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)288 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
289 		   const aarch64_insn code,
290 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
291 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
292 {
293   info->reg.regno = (extract_field (self->fields[0], code, 0)
294 		     + get_operand_specific_data (self));
295   return true;
296 }
297 
298 bool
aarch64_ext_regno_pair(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code ATTRIBUTE_UNUSED,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)299 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
300 		   const aarch64_insn code ATTRIBUTE_UNUSED,
301 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
302 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
303 {
304   assert (info->idx == 1
305 	  || info->idx == 2
306 	  || info->idx == 3
307 	  || info->idx == 5);
308 
309   unsigned prev_regno = inst->operands[info->idx - 1].reg.regno;
310   info->reg.regno = (prev_regno == 0x1f) ? 0x1f
311 					 : prev_regno + 1;
312   return true;
313 }
314 
315 /* e.g. IC <ic_op>{, <Xt>}.  */
316 bool
aarch64_ext_regrt_sysins(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)317 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
318 			  const aarch64_insn code,
319 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
320 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
321 {
322   info->reg.regno = extract_field (self->fields[0], code, 0);
323   assert (info->idx == 1
324 	  && (aarch64_get_operand_class (inst->operands[0].type)
325 	      == AARCH64_OPND_CLASS_SYSTEM));
326   /* This will make the constraint checking happy and more importantly will
327      help the disassembler determine whether this operand is optional or
328      not.  */
329   info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
330 
331   return true;
332 }
333 
334 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
335 bool
aarch64_ext_reglane(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)336 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
337 		     const aarch64_insn code,
338 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
339 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
340 {
341   /* regno */
342   info->reglane.regno = extract_field (self->fields[0], code,
343 				       inst->opcode->mask);
344 
345   /* Index and/or type.  */
346   if (inst->opcode->iclass == asisdone
347     || inst->opcode->iclass == asimdins)
348     {
349       if (info->type == AARCH64_OPND_En
350 	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
351 	{
352 	  unsigned shift;
353 	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
354 	  assert (info->idx == 1);	/* Vn */
355 	  aarch64_insn value = extract_field (FLD_imm4_11, code, 0);
356 	  /* Depend on AARCH64_OPND_Ed to determine the qualifier.  */
357 	  info->qualifier = get_expected_qualifier (inst, info->idx);
358 	  shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
359 	  info->reglane.index = value >> shift;
360 	}
361       else
362 	{
363 	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
364 	     imm5<3:0>	<V>
365 	     0000	RESERVED
366 	     xxx1	B
367 	     xx10	H
368 	     x100	S
369 	     1000	D  */
370 	  int pos = -1;
371 	  aarch64_insn value = extract_field (FLD_imm5, code, 0);
372 	  while (++pos <= 3 && (value & 0x1) == 0)
373 	    value >>= 1;
374 	  if (pos > 3)
375 	    return false;
376 	  info->qualifier = get_sreg_qualifier_from_value (pos);
377 	  info->reglane.index = (unsigned) (value >> 1);
378 	}
379     }
380   else if (inst->opcode->iclass == dotproduct)
381     {
382       /* Need information in other operand(s) to help decoding.  */
383       info->qualifier = get_expected_qualifier (inst, info->idx);
384       switch (info->qualifier)
385 	{
386 	case AARCH64_OPND_QLF_S_4B:
387 	case AARCH64_OPND_QLF_S_2H:
388 	  /* L:H */
389 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
390 	  info->reglane.regno &= 0x1f;
391 	  break;
392 	default:
393 	  return false;
394 	}
395     }
396   else if (inst->opcode->iclass == cryptosm3)
397     {
398       /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>].  */
399       info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
400     }
401   else
402     {
403       /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
404          or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
405 
406       /* Need information in other operand(s) to help decoding.  */
407       info->qualifier = get_expected_qualifier (inst, info->idx);
408       switch (info->qualifier)
409 	{
410 	case AARCH64_OPND_QLF_S_H:
411 	  if (info->type == AARCH64_OPND_Em16)
412 	    {
413 	      /* h:l:m */
414 	      info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
415 						    FLD_M);
416 	      info->reglane.regno &= 0xf;
417 	    }
418 	  else
419 	    {
420 	      /* h:l */
421 	      info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
422 	    }
423 	  break;
424 	case AARCH64_OPND_QLF_S_S:
425 	  /* h:l */
426 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
427 	  break;
428 	case AARCH64_OPND_QLF_S_D:
429 	  /* H */
430 	  info->reglane.index = extract_field (FLD_H, code, 0);
431 	  break;
432 	default:
433 	  return false;
434 	}
435 
436       if (inst->opcode->op == OP_FCMLA_ELEM
437 	  && info->qualifier != AARCH64_OPND_QLF_S_H)
438 	{
439 	  /* Complex operand takes two elements.  */
440 	  if (info->reglane.index & 1)
441 	    return false;
442 	  info->reglane.index /= 2;
443 	}
444     }
445 
446   return true;
447 }
448 
449 bool
aarch64_ext_reglist(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)450 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
451 		     const aarch64_insn code,
452 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
453 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
454 {
455   /* R */
456   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
457   /* len */
458   info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
459   info->reglist.stride = 1;
460   return true;
461 }
462 
463 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions.  */
464 bool
aarch64_ext_ldst_reglist(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)465 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
466 			  aarch64_opnd_info *info, const aarch64_insn code,
467 			  const aarch64_inst *inst,
468 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
469 {
470   aarch64_insn value;
471   /* Number of elements in each structure to be loaded/stored.  */
472   unsigned expected_num = get_opcode_dependent_value (inst->opcode);
473 
474   struct
475     {
476       unsigned is_reserved;
477       unsigned num_regs;
478       unsigned num_elements;
479     } data [] =
480   {   {0, 4, 4},
481       {1, 4, 4},
482       {0, 4, 1},
483       {0, 4, 2},
484       {0, 3, 3},
485       {1, 3, 3},
486       {0, 3, 1},
487       {0, 1, 1},
488       {0, 2, 2},
489       {1, 2, 2},
490       {0, 2, 1},
491   };
492 
493   /* Rt */
494   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
495   /* opcode */
496   value = extract_field (FLD_opcode, code, 0);
497   /* PR 21595: Check for a bogus value.  */
498   if (value >= ARRAY_SIZE (data))
499     return false;
500   if (expected_num != data[value].num_elements || data[value].is_reserved)
501     return false;
502   info->reglist.num_regs = data[value].num_regs;
503   info->reglist.stride = 1;
504 
505   return true;
506 }
507 
508 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
509    lanes instructions.  */
510 bool
aarch64_ext_ldst_reglist_r(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)511 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
512 			    aarch64_opnd_info *info, const aarch64_insn code,
513 			    const aarch64_inst *inst,
514 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
515 {
516   aarch64_insn value;
517 
518   /* Rt */
519   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
520   /* S */
521   value = extract_field (FLD_S, code, 0);
522 
523   /* Number of registers is equal to the number of elements in
524      each structure to be loaded/stored.  */
525   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
526   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
527 
528   /* Except when it is LD1R.  */
529   if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
530     info->reglist.num_regs = 2;
531 
532   info->reglist.stride = 1;
533   return true;
534 }
535 
536 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
537    load/store single element instructions.  */
538 bool
aarch64_ext_ldst_elemlist(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)539 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
540 			   aarch64_opnd_info *info, const aarch64_insn code,
541 			   const aarch64_inst *inst ATTRIBUTE_UNUSED,
542 			   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
543 {
544   aarch64_field field = {0, 0};
545   aarch64_insn QSsize;		/* fields Q:S:size.  */
546   aarch64_insn opcodeh2;	/* opcode<2:1> */
547 
548   /* Rt */
549   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
550 
551   /* Decode the index, opcode<2:1> and size.  */
552   gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
553   opcodeh2 = extract_field_2 (&field, code, 0);
554   QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
555   switch (opcodeh2)
556     {
557     case 0x0:
558       info->qualifier = AARCH64_OPND_QLF_S_B;
559       /* Index encoded in "Q:S:size".  */
560       info->reglist.index = QSsize;
561       break;
562     case 0x1:
563       if (QSsize & 0x1)
564 	/* UND.  */
565 	return false;
566       info->qualifier = AARCH64_OPND_QLF_S_H;
567       /* Index encoded in "Q:S:size<1>".  */
568       info->reglist.index = QSsize >> 1;
569       break;
570     case 0x2:
571       if ((QSsize >> 1) & 0x1)
572 	/* UND.  */
573 	return false;
574       if ((QSsize & 0x1) == 0)
575 	{
576 	  info->qualifier = AARCH64_OPND_QLF_S_S;
577 	  /* Index encoded in "Q:S".  */
578 	  info->reglist.index = QSsize >> 2;
579 	}
580       else
581 	{
582 	  if (extract_field (FLD_S, code, 0))
583 	    /* UND */
584 	    return false;
585 	  info->qualifier = AARCH64_OPND_QLF_S_D;
586 	  /* Index encoded in "Q".  */
587 	  info->reglist.index = QSsize >> 3;
588 	}
589       break;
590     default:
591       return false;
592     }
593 
594   info->reglist.has_index = 1;
595   info->reglist.num_regs = 0;
596   info->reglist.stride = 1;
597   /* Number of registers is equal to the number of elements in
598      each structure to be loaded/stored.  */
599   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
600   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
601 
602   return true;
603 }
604 
605 /* Decode fields immh:immb and/or Q for e.g.
606    SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
607    or SSHR <V><d>, <V><n>, #<shift>.  */
608 
609 bool
aarch64_ext_advsimd_imm_shift(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)610 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
611 			       aarch64_opnd_info *info, const aarch64_insn code,
612 			       const aarch64_inst *inst,
613 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
614 {
615   int pos;
616   aarch64_insn Q, imm, immh;
617   enum aarch64_insn_class iclass = inst->opcode->iclass;
618 
619   immh = extract_field (FLD_immh, code, 0);
620   if (immh == 0)
621     return false;
622   imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
623   pos = 4;
624   /* Get highest set bit in immh.  */
625   while (--pos >= 0 && (immh & 0x8) == 0)
626     immh <<= 1;
627 
628   assert ((iclass == asimdshf || iclass == asisdshf)
629 	  && (info->type == AARCH64_OPND_IMM_VLSR
630 	      || info->type == AARCH64_OPND_IMM_VLSL));
631 
632   if (iclass == asimdshf)
633     {
634       Q = extract_field (FLD_Q, code, 0);
635       /* immh	Q	<T>
636 	 0000	x	SEE AdvSIMD modified immediate
637 	 0001	0	8B
638 	 0001	1	16B
639 	 001x	0	4H
640 	 001x	1	8H
641 	 01xx	0	2S
642 	 01xx	1	4S
643 	 1xxx	0	RESERVED
644 	 1xxx	1	2D  */
645       info->qualifier =
646 	get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
647     }
648   else
649     info->qualifier = get_sreg_qualifier_from_value (pos);
650 
651   if (info->type == AARCH64_OPND_IMM_VLSR)
652     /* immh	<shift>
653        0000	SEE AdvSIMD modified immediate
654        0001	(16-UInt(immh:immb))
655        001x	(32-UInt(immh:immb))
656        01xx	(64-UInt(immh:immb))
657        1xxx	(128-UInt(immh:immb))  */
658     info->imm.value = (16 << pos) - imm;
659   else
660     /* immh:immb
661        immh	<shift>
662        0000	SEE AdvSIMD modified immediate
663        0001	(UInt(immh:immb)-8)
664        001x	(UInt(immh:immb)-16)
665        01xx	(UInt(immh:immb)-32)
666        1xxx	(UInt(immh:immb)-64)  */
667     info->imm.value = imm - (8 << pos);
668 
669   return true;
670 }
671 
672 /* Decode shift immediate for e.g. sshr (imm).  */
673 bool
aarch64_ext_shll_imm(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)674 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
675 		      aarch64_opnd_info *info, const aarch64_insn code,
676 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
677 		      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
678 {
679   int64_t imm;
680   aarch64_insn val;
681   val = extract_field (FLD_size, code, 0);
682   switch (val)
683     {
684     case 0: imm = 8; break;
685     case 1: imm = 16; break;
686     case 2: imm = 32; break;
687     default: return false;
688     }
689   info->imm.value = imm;
690   return true;
691 }
692 
693 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
694    value in the field(s) will be extracted as unsigned immediate value.  */
695 bool
aarch64_ext_imm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)696 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
697 		 const aarch64_insn code,
698 		 const aarch64_inst *inst,
699 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
700 {
701   uint64_t imm;
702 
703   imm = extract_all_fields (self, code);
704 
705   if (operand_need_sign_extension (self))
706     imm = sign_extend (imm, get_operand_fields_width (self) - 1);
707 
708   if (operand_need_shift_by_two (self))
709     imm <<= 2;
710   else if (operand_need_shift_by_three (self))
711     imm <<= 3;
712   else if (operand_need_shift_by_four (self))
713     imm <<= 4;
714 
715   if (info->type == AARCH64_OPND_ADDR_ADRP)
716     imm <<= 12;
717 
718   if (inst->operands[0].type == AARCH64_OPND_PSTATEFIELD
719       && inst->operands[0].sysreg.flags & F_IMM_IN_CRM)
720     imm &= PSTATE_DECODE_CRM_IMM (inst->operands[0].sysreg.flags);
721 
722   info->imm.value = imm;
723   return true;
724 }
725 
726 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
727 bool
aarch64_ext_imm_half(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors)728 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
729 		      const aarch64_insn code,
730 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
731 		      aarch64_operand_error *errors)
732 {
733   aarch64_ext_imm (self, info, code, inst, errors);
734   info->shifter.kind = AARCH64_MOD_LSL;
735   info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
736   return true;
737 }
738 
739 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
740      MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
741 bool
aarch64_ext_advsimd_imm_modified(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)742 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
743 				  aarch64_opnd_info *info,
744 				  const aarch64_insn code,
745 				  const aarch64_inst *inst ATTRIBUTE_UNUSED,
746 				  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
747 {
748   uint64_t imm;
749   enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
750   aarch64_field field = {0, 0};
751 
752   assert (info->idx == 1);
753 
754   if (info->type == AARCH64_OPND_SIMD_FPIMM)
755     info->imm.is_fp = 1;
756 
757   /* a:b:c:d:e:f:g:h */
758   imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
759   if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
760     {
761       /* Either MOVI <Dd>, #<imm>
762 	 or     MOVI <Vd>.2D, #<imm>.
763 	 <imm> is a 64-bit immediate
764 	 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
765 	 encoded in "a:b:c:d:e:f:g:h".	*/
766       int i;
767       unsigned abcdefgh = imm;
768       for (imm = 0ull, i = 0; i < 8; i++)
769 	if (((abcdefgh >> i) & 0x1) != 0)
770 	  imm |= 0xffull << (8 * i);
771     }
772   info->imm.value = imm;
773 
774   /* cmode */
775   info->qualifier = get_expected_qualifier (inst, info->idx);
776   switch (info->qualifier)
777     {
778     case AARCH64_OPND_QLF_NIL:
779       /* no shift */
780       info->shifter.kind = AARCH64_MOD_NONE;
781       return 1;
782     case AARCH64_OPND_QLF_LSL:
783       /* shift zeros */
784       info->shifter.kind = AARCH64_MOD_LSL;
785       switch (aarch64_get_qualifier_esize (opnd0_qualifier))
786 	{
787 	case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break;	/* per word */
788 	case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break;	/* per half */
789 	case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break;	/* per byte */
790 	default: return false;
791 	}
792       /* 00: 0; 01: 8; 10:16; 11:24.  */
793       info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
794       break;
795     case AARCH64_OPND_QLF_MSL:
796       /* shift ones */
797       info->shifter.kind = AARCH64_MOD_MSL;
798       gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
799       info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
800       break;
801     default:
802       return false;
803     }
804 
805   return true;
806 }
807 
808 /* Decode an 8-bit floating-point immediate.  */
809 bool
aarch64_ext_fpimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)810 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
811 		   const aarch64_insn code,
812 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
813 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
814 {
815   info->imm.value = extract_all_fields (self, code);
816   info->imm.is_fp = 1;
817   return true;
818 }
819 
820 /* Decode a 1-bit rotate immediate (#90 or #270).  */
821 bool
aarch64_ext_imm_rotate1(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)822 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
823 			 const aarch64_insn code,
824 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
825 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
826 {
827   uint64_t rot = extract_field (self->fields[0], code, 0);
828   assert (rot < 2U);
829   info->imm.value = rot * 180 + 90;
830   return true;
831 }
832 
833 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270).  */
834 bool
aarch64_ext_imm_rotate2(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)835 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
836 			 const aarch64_insn code,
837 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
838 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
839 {
840   uint64_t rot = extract_field (self->fields[0], code, 0);
841   assert (rot < 4U);
842   info->imm.value = rot * 90;
843   return true;
844 }
845 
846 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>.  */
847 bool
aarch64_ext_fbits(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)848 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
849 		   aarch64_opnd_info *info, const aarch64_insn code,
850 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
851 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
852 {
853   info->imm.value = 64- extract_field (FLD_scale, code, 0);
854   return true;
855 }
856 
857 /* Decode arithmetic immediate for e.g.
858      SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
859 bool
aarch64_ext_aimm(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)860 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
861 		  aarch64_opnd_info *info, const aarch64_insn code,
862 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
863 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
864 {
865   aarch64_insn value;
866 
867   info->shifter.kind = AARCH64_MOD_LSL;
868   /* shift */
869   value = extract_field (FLD_shift, code, 0);
870   if (value >= 2)
871     return false;
872   info->shifter.amount = value ? 12 : 0;
873   /* imm12 (unsigned) */
874   info->imm.value = extract_field (FLD_imm12, code, 0);
875 
876   return true;
877 }
878 
879 /* Return true if VALUE is a valid logical immediate encoding, storing the
880    decoded value in *RESULT if so.  ESIZE is the number of bytes in the
881    decoded immediate.  */
882 static bool
decode_limm(uint32_t esize,aarch64_insn value,int64_t * result)883 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
884 {
885   uint64_t imm, mask;
886   uint32_t N, R, S;
887   unsigned simd_size;
888 
889   /* value is N:immr:imms.  */
890   S = value & 0x3f;
891   R = (value >> 6) & 0x3f;
892   N = (value >> 12) & 0x1;
893 
894   /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
895      (in other words, right rotated by R), then replicated.  */
896   if (N != 0)
897     {
898       simd_size = 64;
899       mask = 0xffffffffffffffffull;
900     }
901   else
902     {
903       switch (S)
904 	{
905 	case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32;           break;
906 	case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
907 	case 0x30 ... 0x37: /* 110xxx */ simd_size =  8; S &= 0x7; break;
908 	case 0x38 ... 0x3b: /* 1110xx */ simd_size =  4; S &= 0x3; break;
909 	case 0x3c ... 0x3d: /* 11110x */ simd_size =  2; S &= 0x1; break;
910 	default: return false;
911 	}
912       mask = (1ull << simd_size) - 1;
913       /* Top bits are IGNORED.  */
914       R &= simd_size - 1;
915     }
916 
917   if (simd_size > esize * 8)
918     return false;
919 
920   /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected.  */
921   if (S == simd_size - 1)
922     return false;
923   /* S+1 consecutive bits to 1.  */
924   /* NOTE: S can't be 63 due to detection above.  */
925   imm = (1ull << (S + 1)) - 1;
926   /* Rotate to the left by simd_size - R.  */
927   if (R != 0)
928     imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
929   /* Replicate the value according to SIMD size.  */
930   switch (simd_size)
931     {
932     case  2: imm = (imm <<  2) | imm;
933       /* Fall through.  */
934     case  4: imm = (imm <<  4) | imm;
935       /* Fall through.  */
936     case  8: imm = (imm <<  8) | imm;
937       /* Fall through.  */
938     case 16: imm = (imm << 16) | imm;
939       /* Fall through.  */
940     case 32: imm = (imm << 32) | imm;
941       /* Fall through.  */
942     case 64: break;
943     default: return 0;
944     }
945 
946   *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
947 
948   return true;
949 }
950 
951 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>.  */
952 bool
aarch64_ext_limm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)953 aarch64_ext_limm (const aarch64_operand *self,
954 		  aarch64_opnd_info *info, const aarch64_insn code,
955 		  const aarch64_inst *inst,
956 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
957 {
958   uint32_t esize;
959   aarch64_insn value;
960 
961   value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
962 			  self->fields[2]);
963   esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
964   return decode_limm (esize, value, &info->imm.value);
965 }
966 
967 /* Decode a logical immediate for the BIC alias of AND (etc.).  */
968 bool
aarch64_ext_inv_limm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)969 aarch64_ext_inv_limm (const aarch64_operand *self,
970 		      aarch64_opnd_info *info, const aarch64_insn code,
971 		      const aarch64_inst *inst,
972 		      aarch64_operand_error *errors)
973 {
974   if (!aarch64_ext_limm (self, info, code, inst, errors))
975     return false;
976   info->imm.value = ~info->imm.value;
977   return true;
978 }
979 
980 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
981    or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
982 bool
aarch64_ext_ft(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)983 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
984 		aarch64_opnd_info *info,
985 		const aarch64_insn code, const aarch64_inst *inst,
986 		aarch64_operand_error *errors ATTRIBUTE_UNUSED)
987 {
988   aarch64_insn value;
989 
990   /* Rt */
991   info->reg.regno = extract_field (FLD_Rt, code, 0);
992 
993   /* size */
994   value = extract_field (FLD_ldst_size, code, 0);
995   if (inst->opcode->iclass == ldstpair_indexed
996       || inst->opcode->iclass == ldstnapair_offs
997       || inst->opcode->iclass == ldstpair_off
998       || inst->opcode->iclass == loadlit)
999     {
1000       enum aarch64_opnd_qualifier qualifier;
1001       switch (value)
1002 	{
1003 	case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1004 	case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1005 	case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
1006 	default: return false;
1007 	}
1008       info->qualifier = qualifier;
1009     }
1010   else
1011     {
1012       /* opc1:size */
1013       value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
1014       if (value > 0x4)
1015 	return false;
1016       info->qualifier = get_sreg_qualifier_from_value (value);
1017     }
1018 
1019   return true;
1020 }
1021 
1022 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
1023 bool
aarch64_ext_addr_simple(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1024 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
1025 			 aarch64_opnd_info *info,
1026 			 aarch64_insn code,
1027 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1028 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1029 {
1030   /* Rn */
1031   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1032   return true;
1033 }
1034 
1035 /* Decode the address operand for rcpc3 instructions with optional load/store
1036    datasize offset, e.g. STILPP <Xs>, <Xt>, [<Xn|SP>{,#-16}]! and
1037    LIDAP <Xs>, <Xt>, [<Xn|SP>]{,#-16}.  */
1038 bool
aarch64_ext_rcpc3_addr_opt_offset(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * err ATTRIBUTE_UNUSED)1039 aarch64_ext_rcpc3_addr_opt_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1040 				   aarch64_opnd_info *info,
1041 				   aarch64_insn code,
1042 				   const aarch64_inst *inst ATTRIBUTE_UNUSED,
1043 				   aarch64_operand_error *err ATTRIBUTE_UNUSED)
1044 {
1045   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1046   if (!extract_field (FLD_opc2, code, 0))
1047     {
1048       info->addr.writeback = 1;
1049 
1050       enum aarch64_opnd type;
1051       for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
1052 	{
1053 	  aarch64_opnd_info opnd = info[i];
1054 	  type = opnd.type;
1055 	  if (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS)
1056 	    break;
1057 	}
1058 
1059       assert (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS);
1060       int offset = calc_ldst_datasize (inst->operands);
1061 
1062       switch (type)
1063 	{
1064 	case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
1065 	case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
1066 	  info->addr.offset.imm = -offset;
1067 	  info->addr.preind = 1;
1068 	  break;
1069 	case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
1070 	case AARCH64_OPND_RCPC3_ADDR_POSTIND:
1071 	  info->addr.offset.imm = offset;
1072 	  info->addr.postind = 1;
1073 	  break;
1074 	default:
1075 	  return false;
1076 	}
1077     }
1078   return true;
1079 }
1080 
1081 bool
aarch64_ext_rcpc3_addr_offset(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1082 aarch64_ext_rcpc3_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1083 			       aarch64_opnd_info *info,
1084 			       aarch64_insn code,
1085 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1086 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1087 {
1088   info->qualifier = get_expected_qualifier (inst, info->idx);
1089 
1090   /* Rn */
1091   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1092 
1093   /* simm9 */
1094   aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1095   info->addr.offset.imm = sign_extend (imm, 8);
1096   return true;
1097 }
1098 
1099 /* Decode the address operand for e.g.
1100      stlur <Xt>, [<Xn|SP>{, <amount>}].  */
1101 bool
aarch64_ext_addr_offset(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1102 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1103 			 aarch64_opnd_info *info,
1104 			 aarch64_insn code, const aarch64_inst *inst,
1105 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1106 {
1107   info->qualifier = get_expected_qualifier (inst, info->idx);
1108 
1109   /* Rn */
1110   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1111 
1112   /* simm9 */
1113   aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1114   info->addr.offset.imm = sign_extend (imm, 8);
1115   if (extract_field (self->fields[2], code, 0) == 1) {
1116     info->addr.writeback = 1;
1117     info->addr.preind = 1;
1118   }
1119   return true;
1120 }
1121 
1122 /* Decode the address operand for e.g.
1123      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1124 bool
aarch64_ext_addr_regoff(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1125 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1126 			 aarch64_opnd_info *info,
1127 			 aarch64_insn code, const aarch64_inst *inst,
1128 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1129 {
1130   aarch64_insn S, value;
1131 
1132   /* Rn */
1133   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1134   /* Rm */
1135   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1136   /* option */
1137   value = extract_field (FLD_option, code, 0);
1138   info->shifter.kind =
1139     aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1140   /* Fix-up the shifter kind; although the table-driven approach is
1141      efficient, it is slightly inflexible, thus needing this fix-up.  */
1142   if (info->shifter.kind == AARCH64_MOD_UXTX)
1143     info->shifter.kind = AARCH64_MOD_LSL;
1144   /* S */
1145   S = extract_field (FLD_S, code, 0);
1146   if (S == 0)
1147     {
1148       info->shifter.amount = 0;
1149       info->shifter.amount_present = 0;
1150     }
1151   else
1152     {
1153       int size;
1154       /* Need information in other operand(s) to help achieve the decoding
1155 	 from 'S' field.  */
1156       info->qualifier = get_expected_qualifier (inst, info->idx);
1157       /* Get the size of the data element that is accessed, which may be
1158 	 different from that of the source register size, e.g. in strb/ldrb.  */
1159       size = aarch64_get_qualifier_esize (info->qualifier);
1160       info->shifter.amount = get_logsz (size);
1161       info->shifter.amount_present = 1;
1162     }
1163 
1164   return true;
1165 }
1166 
1167 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>.  */
1168 bool
aarch64_ext_addr_simm(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1169 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1170 		       aarch64_insn code, const aarch64_inst *inst,
1171 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1172 {
1173   aarch64_insn imm;
1174   info->qualifier = get_expected_qualifier (inst, info->idx);
1175 
1176   /* Rn */
1177   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1178   /* simm (imm9 or imm7)  */
1179   imm = extract_field (self->fields[0], code, 0);
1180   info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1181   if (self->fields[0] == FLD_imm7
1182       || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1183     /* scaled immediate in ld/st pair instructions.  */
1184     info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1185   /* qualifier */
1186   if (inst->opcode->iclass == ldst_unscaled
1187       || inst->opcode->iclass == ldstnapair_offs
1188       || inst->opcode->iclass == ldstpair_off
1189       || inst->opcode->iclass == ldst_unpriv)
1190     info->addr.writeback = 0;
1191   else
1192     {
1193       /* pre/post- index */
1194       info->addr.writeback = 1;
1195       if (extract_field (self->fields[1], code, 0) == 1)
1196 	info->addr.preind = 1;
1197       else
1198 	info->addr.postind = 1;
1199     }
1200 
1201   return true;
1202 }
1203 
1204 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}].  */
1205 bool
aarch64_ext_addr_uimm12(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1206 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1207 			 aarch64_insn code,
1208 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1209 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1210 {
1211   int shift;
1212   info->qualifier = get_expected_qualifier (inst, info->idx);
1213   shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1214   /* Rn */
1215   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1216   /* uimm12 */
1217   info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1218   return true;
1219 }
1220 
1221 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}].  */
1222 bool
aarch64_ext_addr_simm10(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1223 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1224 			 aarch64_insn code,
1225 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1226 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1227 {
1228   aarch64_insn imm;
1229 
1230   info->qualifier = get_expected_qualifier (inst, info->idx);
1231   /* Rn */
1232   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1233   /* simm10 */
1234   imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1235   info->addr.offset.imm = sign_extend (imm, 9) << 3;
1236   if (extract_field (self->fields[3], code, 0) == 1) {
1237     info->addr.writeback = 1;
1238     info->addr.preind = 1;
1239   }
1240   return true;
1241 }
1242 
1243 /* Decode the address operand for e.g.
1244      LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
1245 bool
aarch64_ext_simd_addr_post(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1246 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1247 			    aarch64_opnd_info *info,
1248 			    aarch64_insn code, const aarch64_inst *inst,
1249 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1250 {
1251   /* The opcode dependent area stores the number of elements in
1252      each structure to be loaded/stored.  */
1253   int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1254 
1255   /* Rn */
1256   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1257   /* Rm | #<amount>  */
1258   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1259   if (info->addr.offset.regno == 31)
1260     {
1261       if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1262 	/* Special handling of loading single structure to all lane.  */
1263 	info->addr.offset.imm = (is_ld1r ? 1
1264 				 : inst->operands[0].reglist.num_regs)
1265 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1266       else
1267 	info->addr.offset.imm = inst->operands[0].reglist.num_regs
1268 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1269 	  * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1270     }
1271   else
1272     info->addr.offset.is_reg = 1;
1273   info->addr.writeback = 1;
1274 
1275   return true;
1276 }
1277 
1278 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
1279 bool
aarch64_ext_cond(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1280 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1281 		  aarch64_opnd_info *info,
1282 		  aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1283 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1284 {
1285   aarch64_insn value;
1286   /* cond */
1287   value = extract_field (FLD_cond, code, 0);
1288   info->cond = get_cond_from_value (value);
1289   return true;
1290 }
1291 
1292 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
1293 bool
aarch64_ext_sysreg(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1294 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1295 		    aarch64_opnd_info *info,
1296 		    aarch64_insn code,
1297 		    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1298 		    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1299 {
1300   /* op0:op1:CRn:CRm:op2 */
1301   info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1302 				       FLD_CRm, FLD_op2);
1303   info->sysreg.flags = 0;
1304 
1305   /* If a system instruction, check which restrictions should be on the register
1306      value during decoding, these will be enforced then.  */
1307   if (inst->opcode->iclass == ic_system)
1308     {
1309       /* Check to see if it's read-only, else check if it's write only.
1310 	 if it's both or unspecified don't care.  */
1311       if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1312 	info->sysreg.flags = F_REG_READ;
1313       else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1314 	       == F_SYS_WRITE)
1315 	info->sysreg.flags = F_REG_WRITE;
1316     }
1317 
1318   return true;
1319 }
1320 
1321 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
1322 bool
aarch64_ext_pstatefield(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1323 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1324 			 aarch64_opnd_info *info, aarch64_insn code,
1325 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1326 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1327 {
1328   int i;
1329   aarch64_insn fld_crm = extract_field (FLD_CRm, code, 0);
1330   /* op1:op2 */
1331   info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1332   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1333     if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1334       {
1335         /* PSTATEFIELD name can be encoded partially in CRm[3:1].  */
1336         uint32_t flags = aarch64_pstatefields[i].flags;
1337         if ((flags & F_REG_IN_CRM)
1338             && ((fld_crm & 0xe) != PSTATE_DECODE_CRM (flags)))
1339           continue;
1340         info->sysreg.flags = flags;
1341         return true;
1342       }
1343   /* Reserved value in <pstatefield>.  */
1344   return false;
1345 }
1346 
1347 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
1348 bool
aarch64_ext_sysins_op(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1349 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1350 		       aarch64_opnd_info *info,
1351 		       aarch64_insn code,
1352 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1353 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1354 {
1355   int i;
1356   aarch64_insn value;
1357   const aarch64_sys_ins_reg *sysins_ops;
1358   /* op0:op1:CRn:CRm:op2 */
1359   value = extract_fields (code, 0, 5,
1360 			  FLD_op0, FLD_op1, FLD_CRn,
1361 			  FLD_CRm, FLD_op2);
1362 
1363   switch (info->type)
1364     {
1365     case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1366     case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1367     case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1368     case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1369     case AARCH64_OPND_SYSREG_TLBIP: sysins_ops = aarch64_sys_regs_tlbi; break;
1370     case AARCH64_OPND_SYSREG_SR:
1371 	sysins_ops = aarch64_sys_regs_sr;
1372 	 /* Let's remove op2 for rctx.  Refer to comments in the definition of
1373 	    aarch64_sys_regs_sr[].  */
1374 	value = value & ~(0x7);
1375 	break;
1376     default: return false;
1377     }
1378 
1379   for (i = 0; sysins_ops[i].name != NULL; ++i)
1380     if (sysins_ops[i].value == value)
1381       {
1382 	info->sysins_op = sysins_ops + i;
1383 	DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1384 		     info->sysins_op->name,
1385 		     (unsigned)info->sysins_op->value,
1386 		     aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1387 	return true;
1388       }
1389 
1390   return false;
1391 }
1392 
1393 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
1394 
1395 bool
aarch64_ext_barrier(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1396 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1397 		     aarch64_opnd_info *info,
1398 		     aarch64_insn code,
1399 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1400 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1401 {
1402   /* CRm */
1403   info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1404   return true;
1405 }
1406 
1407 /* Decode the memory barrier option operand for DSB <option>nXS|#<imm>.  */
1408 
1409 bool
aarch64_ext_barrier_dsb_nxs(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1410 aarch64_ext_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
1411 		     aarch64_opnd_info *info,
1412 		     aarch64_insn code,
1413 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1414 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1415 {
1416   /* For the DSB nXS barrier variant immediate is encoded in 2-bit field.  */
1417   aarch64_insn field = extract_field (FLD_CRm_dsb_nxs, code, 0);
1418   info->barrier = aarch64_barrier_dsb_nxs_options + field;
1419   return true;
1420 }
1421 
1422 /* Decode the prefetch operation option operand for e.g.
1423      PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
1424 
1425 bool
aarch64_ext_prfop(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1426 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1427 		   aarch64_opnd_info *info,
1428 		   aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1429 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1430 {
1431   /* prfop in Rt */
1432   info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1433   return true;
1434 }
1435 
1436 /* Decode the hint number for an alias taking an operand.  Set info->hint_option
1437    to the matching name/value pair in aarch64_hint_options.  */
1438 
1439 bool
aarch64_ext_hint(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1440 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1441 		  aarch64_opnd_info *info,
1442 		  aarch64_insn code,
1443 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1444 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1445 {
1446   /* CRm:op2.  */
1447   unsigned hint_number;
1448   int i;
1449 
1450   hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1451 
1452   for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1453     {
1454       if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1455 	{
1456 	  info->hint_option = &(aarch64_hint_options[i]);
1457 	  return true;
1458 	}
1459     }
1460 
1461   return false;
1462 }
1463 
1464 /* Decode the extended register operand for e.g.
1465      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1466 bool
aarch64_ext_reg_extended(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1467 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1468 			  aarch64_opnd_info *info,
1469 			  aarch64_insn code,
1470 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1471 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1472 {
1473   aarch64_insn value;
1474 
1475   /* Rm */
1476   info->reg.regno = extract_field (FLD_Rm, code, 0);
1477   /* option */
1478   value = extract_field (FLD_option, code, 0);
1479   info->shifter.kind =
1480     aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1481   /* imm3 */
1482   info->shifter.amount = extract_field (FLD_imm3_10, code,  0);
1483 
1484   /* This makes the constraint checking happy.  */
1485   info->shifter.operator_present = 1;
1486 
1487   /* Assume inst->operands[0].qualifier has been resolved.  */
1488   assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1489   info->qualifier = AARCH64_OPND_QLF_W;
1490   if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1491       && (info->shifter.kind == AARCH64_MOD_UXTX
1492 	  || info->shifter.kind == AARCH64_MOD_SXTX))
1493     info->qualifier = AARCH64_OPND_QLF_X;
1494 
1495   return true;
1496 }
1497 
1498 /* Decode the shifted register operand for e.g.
1499      SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
1500 bool
aarch64_ext_reg_shifted(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1501 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1502 			 aarch64_opnd_info *info,
1503 			 aarch64_insn code,
1504 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1505 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1506 {
1507   aarch64_insn value;
1508 
1509   /* Rm */
1510   info->reg.regno = extract_field (FLD_Rm, code, 0);
1511   /* shift */
1512   value = extract_field (FLD_shift, code, 0);
1513   info->shifter.kind =
1514     aarch64_get_operand_modifier_from_value (value, false /* extend_p */);
1515   if (info->shifter.kind == AARCH64_MOD_ROR
1516       && inst->opcode->iclass != log_shift)
1517     /* ROR is not available for the shifted register operand in arithmetic
1518        instructions.  */
1519     return false;
1520   /* imm6 */
1521   info->shifter.amount = extract_field (FLD_imm6_10, code,  0);
1522 
1523   /* This makes the constraint checking happy.  */
1524   info->shifter.operator_present = 1;
1525 
1526   return true;
1527 }
1528 
1529 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1530    where <offset> is given by the OFFSET parameter and where <factor> is
1531    1 plus SELF's operand-dependent value.  fields[0] specifies the field
1532    that holds <base>.  */
1533 static bool
aarch64_ext_sve_addr_reg_mul_vl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,int64_t offset)1534 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1535 				 aarch64_opnd_info *info, aarch64_insn code,
1536 				 int64_t offset)
1537 {
1538   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1539   info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1540   info->addr.offset.is_reg = false;
1541   info->addr.writeback = false;
1542   info->addr.preind = true;
1543   if (offset != 0)
1544     info->shifter.kind = AARCH64_MOD_MUL_VL;
1545   info->shifter.amount = 1;
1546   info->shifter.operator_present = (info->addr.offset.imm != 0);
1547   info->shifter.amount_present = false;
1548   return true;
1549 }
1550 
1551 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1552    where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1553    SELF's operand-dependent value.  fields[0] specifies the field that
1554    holds <base>.  <simm4> is encoded in the SVE_imm4 field.  */
1555 bool
aarch64_ext_sve_addr_ri_s4xvl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1556 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1557 			       aarch64_opnd_info *info, aarch64_insn code,
1558 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1559 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1560 {
1561   int offset;
1562 
1563   offset = extract_field (FLD_SVE_imm4, code, 0);
1564   offset = ((offset + 8) & 15) - 8;
1565   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1566 }
1567 
1568 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1569    where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1570    SELF's operand-dependent value.  fields[0] specifies the field that
1571    holds <base>.  <simm6> is encoded in the SVE_imm6 field.  */
1572 bool
aarch64_ext_sve_addr_ri_s6xvl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1573 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1574 			       aarch64_opnd_info *info, aarch64_insn code,
1575 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1576 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1577 {
1578   int offset;
1579 
1580   offset = extract_field (FLD_SVE_imm6, code, 0);
1581   offset = (((offset + 32) & 63) - 32);
1582   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1583 }
1584 
1585 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1586    where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1587    SELF's operand-dependent value.  fields[0] specifies the field that
1588    holds <base>.  <simm9> is encoded in the concatenation of the SVE_imm6
1589    and imm3 fields, with imm3 being the less-significant part.  */
1590 bool
aarch64_ext_sve_addr_ri_s9xvl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1591 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1592 			       aarch64_opnd_info *info,
1593 			       aarch64_insn code,
1594 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1595 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1596 {
1597   int offset;
1598 
1599   offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3_10);
1600   offset = (((offset + 256) & 511) - 256);
1601   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1602 }
1603 
1604 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1605    is given by the OFFSET parameter and where <shift> is SELF's operand-
1606    dependent value.  fields[0] specifies the base register field <base>.  */
1607 static bool
aarch64_ext_sve_addr_reg_imm(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,int64_t offset)1608 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1609 			      aarch64_opnd_info *info, aarch64_insn code,
1610 			      int64_t offset)
1611 {
1612   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1613   info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1614   info->addr.offset.is_reg = false;
1615   info->addr.writeback = false;
1616   info->addr.preind = true;
1617   info->shifter.operator_present = false;
1618   info->shifter.amount_present = false;
1619   return true;
1620 }
1621 
1622 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1623    is a 4-bit signed number and where <shift> is SELF's operand-dependent
1624    value.  fields[0] specifies the base register field.  */
1625 bool
aarch64_ext_sve_addr_ri_s4(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1626 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1627 			    aarch64_opnd_info *info, aarch64_insn code,
1628 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1629 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1630 {
1631   int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1632   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1633 }
1634 
1635 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1636    is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1637    value.  fields[0] specifies the base register field.  */
1638 bool
aarch64_ext_sve_addr_ri_u6(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1639 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1640 			    aarch64_opnd_info *info, aarch64_insn code,
1641 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1642 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1643 {
1644   int offset = extract_field (FLD_SVE_imm6, code, 0);
1645   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1646 }
1647 
1648 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1649    is SELF's operand-dependent value.  fields[0] specifies the base
1650    register field and fields[1] specifies the offset register field.  */
1651 bool
aarch64_ext_sve_addr_rr_lsl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1652 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1653 			     aarch64_opnd_info *info, aarch64_insn code,
1654 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1655 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1656 {
1657   int index_regno;
1658 
1659   index_regno = extract_field (self->fields[1], code, 0);
1660   if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1661     return false;
1662 
1663   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1664   info->addr.offset.regno = index_regno;
1665   info->addr.offset.is_reg = true;
1666   info->addr.writeback = false;
1667   info->addr.preind = true;
1668   info->shifter.kind = AARCH64_MOD_LSL;
1669   info->shifter.amount = get_operand_specific_data (self);
1670   info->shifter.operator_present = (info->shifter.amount != 0);
1671   info->shifter.amount_present = (info->shifter.amount != 0);
1672   return true;
1673 }
1674 
1675 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1676    <shift> is SELF's operand-dependent value.  fields[0] specifies the
1677    base register field, fields[1] specifies the offset register field and
1678    fields[2] is a single-bit field that selects SXTW over UXTW.  */
1679 bool
aarch64_ext_sve_addr_rz_xtw(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1680 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1681 			     aarch64_opnd_info *info, aarch64_insn code,
1682 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1683 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1684 {
1685   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1686   info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1687   info->addr.offset.is_reg = true;
1688   info->addr.writeback = false;
1689   info->addr.preind = true;
1690   if (extract_field (self->fields[2], code, 0))
1691     info->shifter.kind = AARCH64_MOD_SXTW;
1692   else
1693     info->shifter.kind = AARCH64_MOD_UXTW;
1694   info->shifter.amount = get_operand_specific_data (self);
1695   info->shifter.operator_present = true;
1696   info->shifter.amount_present = (info->shifter.amount != 0);
1697   return true;
1698 }
1699 
1700 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1701    5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1702    fields[0] specifies the base register field.  */
1703 bool
aarch64_ext_sve_addr_zi_u5(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1704 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1705 			    aarch64_opnd_info *info, aarch64_insn code,
1706 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1707 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1708 {
1709   int offset = extract_field (FLD_imm5, code, 0);
1710   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1711 }
1712 
1713 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1714    where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1715    number.  fields[0] specifies the base register field and fields[1]
1716    specifies the offset register field.  */
1717 static bool
aarch64_ext_sve_addr_zz(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,enum aarch64_modifier_kind kind)1718 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1719 			 aarch64_insn code, enum aarch64_modifier_kind kind)
1720 {
1721   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1722   info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1723   info->addr.offset.is_reg = true;
1724   info->addr.writeback = false;
1725   info->addr.preind = true;
1726   info->shifter.kind = kind;
1727   info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1728   info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1729 				    || info->shifter.amount != 0);
1730   info->shifter.amount_present = (info->shifter.amount != 0);
1731   return true;
1732 }
1733 
1734 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1735    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1736    field and fields[1] specifies the offset register field.  */
1737 bool
aarch64_ext_sve_addr_zz_lsl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1738 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1739 			     aarch64_opnd_info *info, aarch64_insn code,
1740 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1741 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1742 {
1743   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1744 }
1745 
1746 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1747    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1748    field and fields[1] specifies the offset register field.  */
1749 bool
aarch64_ext_sve_addr_zz_sxtw(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1750 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1751 			      aarch64_opnd_info *info, aarch64_insn code,
1752 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1753 			      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1754 {
1755   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1756 }
1757 
1758 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1759    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1760    field and fields[1] specifies the offset register field.  */
1761 bool
aarch64_ext_sve_addr_zz_uxtw(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1762 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1763 			      aarch64_opnd_info *info, aarch64_insn code,
1764 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1765 			      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1766 {
1767   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1768 }
1769 
1770 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1771    has the raw field value and that the low 8 bits decode to VALUE.  */
1772 static bool
decode_sve_aimm(aarch64_opnd_info * info,int64_t value)1773 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1774 {
1775   info->shifter.kind = AARCH64_MOD_LSL;
1776   info->shifter.amount = 0;
1777   if (info->imm.value & 0x100)
1778     {
1779       if (value == 0)
1780 	/* Decode 0x100 as #0, LSL #8.  */
1781 	info->shifter.amount = 8;
1782       else
1783 	value *= 256;
1784     }
1785   info->shifter.operator_present = (info->shifter.amount != 0);
1786   info->shifter.amount_present = (info->shifter.amount != 0);
1787   info->imm.value = value;
1788   return true;
1789 }
1790 
1791 /* Decode an SVE ADD/SUB immediate.  */
1792 bool
aarch64_ext_sve_aimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)1793 aarch64_ext_sve_aimm (const aarch64_operand *self,
1794 		      aarch64_opnd_info *info, const aarch64_insn code,
1795 		      const aarch64_inst *inst,
1796 		      aarch64_operand_error *errors)
1797 {
1798   return (aarch64_ext_imm (self, info, code, inst, errors)
1799 	  && decode_sve_aimm (info, (uint8_t) info->imm.value));
1800 }
1801 
1802 bool
aarch64_ext_sve_aligned_reglist(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1803 aarch64_ext_sve_aligned_reglist (const aarch64_operand *self,
1804 				 aarch64_opnd_info *info, aarch64_insn code,
1805 				 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1806 				 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1807 {
1808   unsigned int num_regs = get_operand_specific_data (self);
1809   unsigned int val = extract_field (self->fields[0], code, 0);
1810   info->reglist.first_regno = val * num_regs;
1811   info->reglist.num_regs = num_regs;
1812   info->reglist.stride = 1;
1813   return true;
1814 }
1815 
1816 /* Decode an SVE CPY/DUP immediate.  */
1817 bool
aarch64_ext_sve_asimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)1818 aarch64_ext_sve_asimm (const aarch64_operand *self,
1819 		       aarch64_opnd_info *info, const aarch64_insn code,
1820 		       const aarch64_inst *inst,
1821 		       aarch64_operand_error *errors)
1822 {
1823   return (aarch64_ext_imm (self, info, code, inst, errors)
1824 	  && decode_sve_aimm (info, (int8_t) info->imm.value));
1825 }
1826 
1827 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1828    The fields array specifies which field to use.  */
1829 bool
aarch64_ext_sve_float_half_one(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1830 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1831 				aarch64_opnd_info *info, aarch64_insn code,
1832 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1833 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1834 {
1835   if (extract_field (self->fields[0], code, 0))
1836     info->imm.value = 0x3f800000;
1837   else
1838     info->imm.value = 0x3f000000;
1839   info->imm.is_fp = true;
1840   return true;
1841 }
1842 
1843 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1844    The fields array specifies which field to use.  */
1845 bool
aarch64_ext_sve_float_half_two(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1846 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1847 				aarch64_opnd_info *info, aarch64_insn code,
1848 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1849 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1850 {
1851   if (extract_field (self->fields[0], code, 0))
1852     info->imm.value = 0x40000000;
1853   else
1854     info->imm.value = 0x3f000000;
1855   info->imm.is_fp = true;
1856   return true;
1857 }
1858 
1859 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1860    The fields array specifies which field to use.  */
1861 bool
aarch64_ext_sve_float_zero_one(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1862 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1863 				aarch64_opnd_info *info, aarch64_insn code,
1864 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1865 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1866 {
1867   if (extract_field (self->fields[0], code, 0))
1868     info->imm.value = 0x3f800000;
1869   else
1870     info->imm.value = 0x0;
1871   info->imm.is_fp = true;
1872   return true;
1873 }
1874 
1875 /* Decode ZA tile vector, vector indicator, vector selector, qualifier and
1876    immediate on numerous SME instruction fields such as MOVA.  */
1877 bool
aarch64_ext_sme_za_hv_tiles(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1878 aarch64_ext_sme_za_hv_tiles (const aarch64_operand *self,
1879                              aarch64_opnd_info *info, aarch64_insn code,
1880                              const aarch64_inst *inst ATTRIBUTE_UNUSED,
1881                              aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1882 {
1883   int fld_size = extract_field (self->fields[0], code, 0);
1884   int fld_q = extract_field (self->fields[1], code, 0);
1885   int fld_v = extract_field (self->fields[2], code, 0);
1886   int fld_rv = extract_field (self->fields[3], code, 0);
1887   int fld_zan_imm = extract_field (self->fields[4], code, 0);
1888 
1889   /* Deduce qualifier encoded in size and Q fields.  */
1890   if (fld_size == 0)
1891     {
1892       info->indexed_za.regno = 0;
1893       info->indexed_za.index.imm = fld_zan_imm;
1894     }
1895   else if (fld_size == 1)
1896     {
1897       info->indexed_za.regno = fld_zan_imm >> 3;
1898       info->indexed_za.index.imm = fld_zan_imm & 0x07;
1899     }
1900   else if (fld_size == 2)
1901     {
1902       info->indexed_za.regno = fld_zan_imm >> 2;
1903       info->indexed_za.index.imm = fld_zan_imm & 0x03;
1904     }
1905   else if (fld_size == 3 && fld_q == 0)
1906     {
1907       info->indexed_za.regno = fld_zan_imm >> 1;
1908       info->indexed_za.index.imm = fld_zan_imm & 0x01;
1909     }
1910   else if (fld_size == 3 && fld_q == 1)
1911     {
1912       info->indexed_za.regno = fld_zan_imm;
1913       info->indexed_za.index.imm = 0;
1914     }
1915   else
1916     return false;
1917 
1918   info->indexed_za.index.regno = fld_rv + 12;
1919   info->indexed_za.v = fld_v;
1920 
1921   return true;
1922 }
1923 
1924 bool
aarch64_ext_sme_za_hv_tiles_range(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1925 aarch64_ext_sme_za_hv_tiles_range (const aarch64_operand *self,
1926 				   aarch64_opnd_info *info, aarch64_insn code,
1927 				   const aarch64_inst *inst ATTRIBUTE_UNUSED,
1928 				   aarch64_operand_error *errors
1929 				     ATTRIBUTE_UNUSED)
1930 {
1931   int ebytes = aarch64_get_qualifier_esize (info->qualifier);
1932   int range_size = get_opcode_dependent_value (inst->opcode);
1933   int fld_v = extract_field (self->fields[0], code, 0);
1934   int fld_rv = extract_field (self->fields[1], code, 0);
1935   int fld_zan_imm = extract_field (self->fields[2], code, 0);
1936   int max_value = 16 / range_size / ebytes;
1937 
1938   if (max_value == 0)
1939     max_value = 1;
1940 
1941   int regno = fld_zan_imm / max_value;
1942   if (regno >= ebytes)
1943     return false;
1944 
1945   info->indexed_za.regno = regno;
1946   info->indexed_za.index.imm = (fld_zan_imm % max_value) * range_size;
1947   info->indexed_za.index.countm1 = range_size - 1;
1948   info->indexed_za.index.regno = fld_rv + 12;
1949   info->indexed_za.v = fld_v;
1950 
1951   return true;
1952 }
1953 
1954 /* Decode in SME instruction ZERO list of up to eight 64-bit element tile names
1955    separated by commas, encoded in the "imm8" field.
1956 
1957    For programmer convenience an assembler must also accept the names of
1958    32-bit, 16-bit and 8-bit element tiles which are converted into the
1959    corresponding set of 64-bit element tiles.
1960 */
1961 bool
aarch64_ext_sme_za_list(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1962 aarch64_ext_sme_za_list (const aarch64_operand *self,
1963                          aarch64_opnd_info *info, aarch64_insn code,
1964                          const aarch64_inst *inst ATTRIBUTE_UNUSED,
1965                          aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1966 {
1967   int mask = extract_field (self->fields[0], code, 0);
1968   info->imm.value = mask;
1969   return true;
1970 }
1971 
1972 /* Decode ZA array vector select register (Rv field), optional vector and
1973    memory offset (imm4_11 field).
1974 */
1975 bool
aarch64_ext_sme_za_array(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1976 aarch64_ext_sme_za_array (const aarch64_operand *self,
1977                           aarch64_opnd_info *info, aarch64_insn code,
1978                           const aarch64_inst *inst,
1979                           aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1980 {
1981   int regno = extract_field (self->fields[0], code, 0);
1982   if (info->type == AARCH64_OPND_SME_ZA_array_off4)
1983     regno += 12;
1984   else
1985     regno += 8;
1986   int imm = extract_field (self->fields[1], code, 0);
1987   int num_offsets = get_operand_specific_data (self);
1988   if (num_offsets == 0)
1989     num_offsets = 1;
1990   info->indexed_za.index.regno = regno;
1991   info->indexed_za.index.imm = imm * num_offsets;
1992   info->indexed_za.index.countm1 = num_offsets - 1;
1993   info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
1994   return true;
1995 }
1996 
1997 /* Decode two ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds.  */
1998 bool
aarch64_ext_sme_za_vrs1(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1999 aarch64_ext_sme_za_vrs1 (const aarch64_operand *self,
2000 			  aarch64_opnd_info *info, aarch64_insn code,
2001 			  const aarch64_inst *inst,
2002 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2003 {
2004   int v = extract_field (self->fields[0], code, 0);
2005   int regno = 12 + extract_field (self->fields[1], code, 0);
2006   int imm, za_reg, num_offset = 2;
2007 
2008   switch (info->qualifier)
2009     {
2010     case AARCH64_OPND_QLF_S_B:
2011       imm = extract_field (self->fields[2], code, 0);
2012       info->indexed_za.index.imm = imm * num_offset;
2013       break;
2014     case AARCH64_OPND_QLF_S_H:
2015     case AARCH64_OPND_QLF_S_S:
2016       za_reg = extract_field (self->fields[2], code, 0);
2017       imm = extract_field (self->fields[3], code, 0);
2018       info->indexed_za.index.imm = imm * num_offset;
2019       info->indexed_za.regno = za_reg;
2020       break;
2021     case AARCH64_OPND_QLF_S_D:
2022       za_reg = extract_field (self->fields[2], code, 0);
2023       info->indexed_za.regno = za_reg;
2024       break;
2025     default:
2026       return false;
2027     }
2028 
2029   info->indexed_za.index.regno = regno;
2030   info->indexed_za.index.countm1 = num_offset - 1;
2031   info->indexed_za.v = v;
2032   info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2033   return true;
2034 }
2035 
2036 /* Decode four ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds.  */
2037 bool
aarch64_ext_sme_za_vrs2(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2038 aarch64_ext_sme_za_vrs2 (const aarch64_operand *self,
2039 			  aarch64_opnd_info *info, aarch64_insn code,
2040 			  const aarch64_inst *inst,
2041 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2042 {
2043   int v = extract_field (self->fields[0], code, 0);
2044   int regno = 12 + extract_field (self->fields[1], code, 0);
2045   int imm, za_reg, num_offset =4;
2046 
2047   switch (info->qualifier)
2048     {
2049     case AARCH64_OPND_QLF_S_B:
2050       imm = extract_field (self->fields[2], code, 0);
2051       info->indexed_za.index.imm = imm * num_offset;
2052       break;
2053     case AARCH64_OPND_QLF_S_H:
2054       za_reg = extract_field (self->fields[2], code, 0);
2055       imm = extract_field (self->fields[3], code, 0);
2056       info->indexed_za.index.imm = imm * num_offset;
2057       info->indexed_za.regno = za_reg;
2058       break;
2059     case AARCH64_OPND_QLF_S_S:
2060     case AARCH64_OPND_QLF_S_D:
2061       za_reg = extract_field (self->fields[2], code, 0);
2062       info->indexed_za.regno = za_reg;
2063       break;
2064     default:
2065       return false;
2066     }
2067 
2068   info->indexed_za.index.regno = regno;
2069   info->indexed_za.index.countm1 = num_offset - 1;
2070   info->indexed_za.v = v;
2071   info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2072   return true;
2073 }
2074 
2075 bool
aarch64_ext_sme_addr_ri_u4xvl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2076 aarch64_ext_sme_addr_ri_u4xvl (const aarch64_operand *self,
2077                                aarch64_opnd_info *info, aarch64_insn code,
2078                                const aarch64_inst *inst ATTRIBUTE_UNUSED,
2079                                aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2080 {
2081   int regno = extract_field (self->fields[0], code, 0);
2082   int imm = extract_field (self->fields[1], code, 0);
2083   info->addr.base_regno = regno;
2084   info->addr.offset.imm = imm;
2085   /* MUL VL operator is always present for this operand.  */
2086   info->shifter.kind = AARCH64_MOD_MUL_VL;
2087   info->shifter.operator_present = (imm != 0);
2088   return true;
2089 }
2090 
2091 /* Decode {SM|ZA} filed for SMSTART and SMSTOP instructions.  */
2092 bool
aarch64_ext_sme_sm_za(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2093 aarch64_ext_sme_sm_za (const aarch64_operand *self,
2094                        aarch64_opnd_info *info, aarch64_insn code,
2095                        const aarch64_inst *inst ATTRIBUTE_UNUSED,
2096                        aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2097 {
2098   info->pstatefield = 0x1b;
2099   aarch64_insn fld_crm = extract_field (self->fields[0], code, 0);
2100   fld_crm >>= 1;    /* CRm[3:1].  */
2101 
2102   if (fld_crm == 0x1)
2103     info->reg.regno = 's';
2104   else if (fld_crm == 0x2)
2105     info->reg.regno = 'z';
2106   else
2107     return false;
2108 
2109   return true;
2110 }
2111 
2112 bool
aarch64_ext_sme_pred_reg_with_index(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2113 aarch64_ext_sme_pred_reg_with_index (const aarch64_operand *self,
2114 				     aarch64_opnd_info *info, aarch64_insn code,
2115 				     const aarch64_inst *inst ATTRIBUTE_UNUSED,
2116 				     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2117 {
2118   aarch64_insn fld_rm = extract_field (self->fields[0], code, 0);
2119   aarch64_insn fld_pn = extract_field (self->fields[1], code, 0);
2120   aarch64_insn fld_i1 = extract_field (self->fields[2], code, 0);
2121   aarch64_insn fld_tszh = extract_field (self->fields[3], code, 0);
2122   aarch64_insn fld_tszl = extract_field (self->fields[4], code, 0);
2123   int imm;
2124 
2125   info->indexed_za.regno = fld_pn;
2126   info->indexed_za.index.regno = fld_rm + 12;
2127 
2128   if (fld_tszl & 0x1)
2129     imm = (fld_i1 << 3) | (fld_tszh << 2) | (fld_tszl >> 1);
2130   else if (fld_tszl & 0x2)
2131     imm = (fld_i1 << 2) | (fld_tszh << 1) | (fld_tszl >> 2);
2132   else if (fld_tszl & 0x4)
2133     imm = (fld_i1 << 1) | fld_tszh;
2134   else if (fld_tszh)
2135     imm = fld_i1;
2136   else
2137     return false;
2138 
2139   info->indexed_za.index.imm = imm;
2140   return true;
2141 }
2142 
2143 /* Decode Zn[MM], where MM has a 7-bit triangular encoding.  The fields
2144    array specifies which field to use for Zn.  MM is encoded in the
2145    concatenation of imm5 and SVE_tszh, with imm5 being the less
2146    significant part.  */
2147 bool
aarch64_ext_sve_index(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2148 aarch64_ext_sve_index (const aarch64_operand *self,
2149 		       aarch64_opnd_info *info, aarch64_insn code,
2150 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
2151 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2152 {
2153   int val;
2154 
2155   info->reglane.regno = extract_field (self->fields[0], code, 0);
2156   val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
2157   if ((val & 31) == 0)
2158     return 0;
2159   while ((val & 1) == 0)
2160     val /= 2;
2161   info->reglane.index = val / 2;
2162   return true;
2163 }
2164 
2165 /* Decode Zn.<T>[<imm>], where <imm> is an immediate with range of 0 to one less
2166    than the number of elements in 128 bit, which can encode il:tsz.  */
2167 bool
aarch64_ext_sve_index_imm(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2168 aarch64_ext_sve_index_imm (const aarch64_operand *self,
2169 			   aarch64_opnd_info *info, aarch64_insn code,
2170 			   const aarch64_inst *inst ATTRIBUTE_UNUSED,
2171 			   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2172 {
2173   int val;
2174 
2175   info->reglane.regno = extract_field (self->fields[0], code, 0);
2176   val = extract_fields (code, 0, 2, self->fields[2], self->fields[1]);
2177   if ((val & 15) == 0)
2178     return 0;
2179   while ((val & 1) == 0)
2180     val /= 2;
2181   info->reglane.index = val / 2;
2182   return true;
2183 }
2184 
2185 /* Decode a logical immediate for the MOV alias of SVE DUPM.  */
2186 bool
aarch64_ext_sve_limm_mov(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)2187 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
2188 			  aarch64_opnd_info *info, const aarch64_insn code,
2189 			  const aarch64_inst *inst,
2190 			  aarch64_operand_error *errors)
2191 {
2192   int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
2193   return (aarch64_ext_limm (self, info, code, inst, errors)
2194 	  && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
2195 }
2196 
2197 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
2198    and where MM occupies the most-significant part.  The operand-dependent
2199    value specifies the number of bits in Zn.  */
2200 bool
aarch64_ext_sve_quad_index(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2201 aarch64_ext_sve_quad_index (const aarch64_operand *self,
2202 			    aarch64_opnd_info *info, aarch64_insn code,
2203 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
2204 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2205 {
2206   unsigned int reg_bits = get_operand_specific_data (self);
2207   unsigned int val = extract_all_fields (self, code);
2208   info->reglane.regno = val & ((1 << reg_bits) - 1);
2209   info->reglane.index = val >> reg_bits;
2210   return true;
2211 }
2212 
2213 /* Decode {Zn.<T> - Zm.<T>}.  The fields array specifies which field
2214    to use for Zn.  The opcode-dependent value specifies the number
2215    of registers in the list.  */
2216 bool
aarch64_ext_sve_reglist(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2217 aarch64_ext_sve_reglist (const aarch64_operand *self,
2218 			 aarch64_opnd_info *info, aarch64_insn code,
2219 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2220 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2221 {
2222   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2223   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
2224   info->reglist.stride = 1;
2225   return true;
2226 }
2227 
2228 /* Decode {Zn.<T> , Zm.<T>}.  The fields array specifies which field
2229    to use for Zn.  The opcode-dependent value specifies the number
2230    of registers in the list.  */
2231 bool
aarch64_ext_sve_reglist_zt(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2232 aarch64_ext_sve_reglist_zt (const aarch64_operand *self,
2233 			    aarch64_opnd_info *info, aarch64_insn code,
2234 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
2235 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2236 {
2237   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2238   info->reglist.num_regs = get_operand_specific_data (self);
2239   info->reglist.stride = 1;
2240   return true;
2241 }
2242 
2243 /* Decode a strided register list.  The first field holds the top bit
2244    (0 or 16) and the second field holds the lower bits.  The stride is
2245    16 divided by the list length.  */
2246 bool
aarch64_ext_sve_strided_reglist(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2247 aarch64_ext_sve_strided_reglist (const aarch64_operand *self,
2248 				 aarch64_opnd_info *info, aarch64_insn code,
2249 				 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2250 				 aarch64_operand_error *errors
2251 				   ATTRIBUTE_UNUSED)
2252 {
2253   unsigned int upper = extract_field (self->fields[0], code, 0);
2254   unsigned int lower = extract_field (self->fields[1], code, 0);
2255   info->reglist.first_regno = upper * 16 + lower;
2256   info->reglist.num_regs = get_operand_specific_data (self);
2257   info->reglist.stride = 16 / info->reglist.num_regs;
2258   return true;
2259 }
2260 
2261 /* Decode <pattern>{, MUL #<amount>}.  The fields array specifies which
2262    fields to use for <pattern>.  <amount> - 1 is encoded in the SVE_imm4
2263    field.  */
2264 bool
aarch64_ext_sve_scale(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)2265 aarch64_ext_sve_scale (const aarch64_operand *self,
2266 		       aarch64_opnd_info *info, aarch64_insn code,
2267 		       const aarch64_inst *inst, aarch64_operand_error *errors)
2268 {
2269   int val;
2270 
2271   if (!aarch64_ext_imm (self, info, code, inst, errors))
2272     return false;
2273   val = extract_field (FLD_SVE_imm4, code, 0);
2274   info->shifter.kind = AARCH64_MOD_MUL;
2275   info->shifter.amount = val + 1;
2276   info->shifter.operator_present = (val != 0);
2277   info->shifter.amount_present = (val != 0);
2278   return true;
2279 }
2280 
2281 /* Return the top set bit in VALUE, which is expected to be relatively
2282    small.  */
2283 static uint64_t
get_top_bit(uint64_t value)2284 get_top_bit (uint64_t value)
2285 {
2286   while ((value & -value) != value)
2287     value -= value & -value;
2288   return value;
2289 }
2290 
2291 /* Decode an SVE shift-left immediate.  */
2292 bool
aarch64_ext_sve_shlimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)2293 aarch64_ext_sve_shlimm (const aarch64_operand *self,
2294 			aarch64_opnd_info *info, const aarch64_insn code,
2295 			const aarch64_inst *inst, aarch64_operand_error *errors)
2296 {
2297   if (!aarch64_ext_imm (self, info, code, inst, errors)
2298       || info->imm.value == 0)
2299     return false;
2300 
2301   info->imm.value -= get_top_bit (info->imm.value);
2302   return true;
2303 }
2304 
2305 /* Decode an SVE shift-right immediate.  */
2306 bool
aarch64_ext_sve_shrimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)2307 aarch64_ext_sve_shrimm (const aarch64_operand *self,
2308 			aarch64_opnd_info *info, const aarch64_insn code,
2309 			const aarch64_inst *inst, aarch64_operand_error *errors)
2310 {
2311   if (!aarch64_ext_imm (self, info, code, inst, errors)
2312       || info->imm.value == 0)
2313     return false;
2314 
2315   info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
2316   return true;
2317 }
2318 
2319 /* Decode X0-X30.  Register 31 is unallocated.  */
2320 bool
aarch64_ext_x0_to_x30(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2321 aarch64_ext_x0_to_x30 (const aarch64_operand *self, aarch64_opnd_info *info,
2322 		       const aarch64_insn code,
2323 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
2324 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2325 {
2326   info->reg.regno = extract_field (self->fields[0], code, 0);
2327   return info->reg.regno <= 30;
2328 }
2329 
2330 /* Decode an indexed register, with the first field being the register
2331    number and the remaining fields being the index.  */
2332 bool
aarch64_ext_simple_index(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2333 aarch64_ext_simple_index (const aarch64_operand *self, aarch64_opnd_info *info,
2334 			  const aarch64_insn code,
2335 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
2336 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2337 {
2338   int bias = get_operand_specific_data (self);
2339   info->reglane.regno = extract_field (self->fields[0], code, 0) + bias;
2340   info->reglane.index = extract_all_fields_after (self, 1, code);
2341   return true;
2342 }
2343 
2344 /* Decode a plain shift-right immediate, when there is only a single
2345    element size.  */
2346 bool
aarch64_ext_plain_shrimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)2347 aarch64_ext_plain_shrimm (const aarch64_operand *self, aarch64_opnd_info *info,
2348 			  const aarch64_insn code,
2349 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
2350 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2351 {
2352   unsigned int base = 1 << get_operand_field_width (self, 0);
2353   info->imm.value = base - extract_field (self->fields[0], code, 0);
2354   return true;
2355 }
2356 
2357 /* Bitfields that are commonly used to encode certain operands' information
2358    may be partially used as part of the base opcode in some instructions.
2359    For example, the bit 1 of the field 'size' in
2360      FCVTXN <Vb><d>, <Va><n>
2361    is actually part of the base opcode, while only size<0> is available
2362    for encoding the register type.  Another example is the AdvSIMD
2363    instruction ORR (register), in which the field 'size' is also used for
2364    the base opcode, leaving only the field 'Q' available to encode the
2365    vector register arrangement specifier '8B' or '16B'.
2366 
2367    This function tries to deduce the qualifier from the value of partially
2368    constrained field(s).  Given the VALUE of such a field or fields, the
2369    qualifiers CANDIDATES and the MASK (indicating which bits are valid for
2370    operand encoding), the function returns the matching qualifier or
2371    AARCH64_OPND_QLF_NIL if nothing matches.
2372 
2373    N.B. CANDIDATES is a group of possible qualifiers that are valid for
2374    one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
2375    may end with AARCH64_OPND_QLF_NIL.  */
2376 
2377 static enum aarch64_opnd_qualifier
get_qualifier_from_partial_encoding(aarch64_insn value,const enum aarch64_opnd_qualifier * candidates,aarch64_insn mask)2378 get_qualifier_from_partial_encoding (aarch64_insn value,
2379 				     const enum aarch64_opnd_qualifier* \
2380 				     candidates,
2381 				     aarch64_insn mask)
2382 {
2383   int i;
2384   DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
2385   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2386     {
2387       aarch64_insn standard_value;
2388       if (candidates[i] == AARCH64_OPND_QLF_NIL)
2389 	break;
2390       standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
2391       if ((standard_value & mask) == (value & mask))
2392 	return candidates[i];
2393     }
2394   return AARCH64_OPND_QLF_NIL;
2395 }
2396 
2397 /* Given a list of qualifier sequences, return all possible valid qualifiers
2398    for operand IDX in QUALIFIERS.
2399    Assume QUALIFIERS is an array whose length is large enough.  */
2400 
2401 static void
get_operand_possible_qualifiers(int idx,const aarch64_opnd_qualifier_seq_t * list,enum aarch64_opnd_qualifier * qualifiers)2402 get_operand_possible_qualifiers (int idx,
2403 				 const aarch64_opnd_qualifier_seq_t *list,
2404 				 enum aarch64_opnd_qualifier *qualifiers)
2405 {
2406   int i;
2407   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2408     if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
2409       break;
2410 }
2411 
2412 /* Decode the size Q field for e.g. SHADD.
2413    We tag one operand with the qualifer according to the code;
2414    whether the qualifier is valid for this opcode or not, it is the
2415    duty of the semantic checking.  */
2416 
2417 static int
decode_sizeq(aarch64_inst * inst)2418 decode_sizeq (aarch64_inst *inst)
2419 {
2420   int idx;
2421   enum aarch64_opnd_qualifier qualifier;
2422   aarch64_insn code;
2423   aarch64_insn value, mask;
2424   enum aarch64_field_kind fld_sz;
2425   enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2426 
2427   if (inst->opcode->iclass == asisdlse
2428      || inst->opcode->iclass == asisdlsep
2429      || inst->opcode->iclass == asisdlso
2430      || inst->opcode->iclass == asisdlsop)
2431     fld_sz = FLD_vldst_size;
2432   else
2433     fld_sz = FLD_size;
2434 
2435   code = inst->value;
2436   value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
2437   /* Obtain the info that which bits of fields Q and size are actually
2438      available for operand encoding.  Opcodes like FMAXNM and FMLA have
2439      size[1] unavailable.  */
2440   mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
2441 
2442   /* The index of the operand we are going to tag a qualifier and the qualifer
2443      itself are reasoned from the value of the size and Q fields and the
2444      possible valid qualifier lists.  */
2445   idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
2446   DEBUG_TRACE ("key idx: %d", idx);
2447 
2448   /* For most related instruciton, size:Q are fully available for operand
2449      encoding.  */
2450   if (mask == 0x7)
2451     {
2452       inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
2453       return 1;
2454     }
2455 
2456   get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2457 				   candidates);
2458 #ifdef DEBUG_AARCH64
2459   if (debug_dump)
2460     {
2461       int i;
2462       for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
2463 	   && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2464 	DEBUG_TRACE ("qualifier %d: %s", i,
2465 		     aarch64_get_qualifier_name(candidates[i]));
2466       DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
2467     }
2468 #endif /* DEBUG_AARCH64 */
2469 
2470   qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
2471 
2472   if (qualifier == AARCH64_OPND_QLF_NIL)
2473     return 0;
2474 
2475   inst->operands[idx].qualifier = qualifier;
2476   return 1;
2477 }
2478 
2479 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
2480      e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
2481 
2482 static int
decode_asimd_fcvt(aarch64_inst * inst)2483 decode_asimd_fcvt (aarch64_inst *inst)
2484 {
2485   aarch64_field field = {0, 0};
2486   aarch64_insn value;
2487   enum aarch64_opnd_qualifier qualifier;
2488 
2489   gen_sub_field (FLD_size, 0, 1, &field);
2490   value = extract_field_2 (&field, inst->value, 0);
2491   qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
2492     : AARCH64_OPND_QLF_V_2D;
2493   switch (inst->opcode->op)
2494     {
2495     case OP_FCVTN:
2496     case OP_FCVTN2:
2497       /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
2498       inst->operands[1].qualifier = qualifier;
2499       break;
2500     case OP_FCVTL:
2501     case OP_FCVTL2:
2502       /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
2503       inst->operands[0].qualifier = qualifier;
2504       break;
2505     default:
2506       return 0;
2507     }
2508 
2509   return 1;
2510 }
2511 
2512 /* Decode size[0], i.e. bit 22, for
2513      e.g. FCVTXN <Vb><d>, <Va><n>.  */
2514 
2515 static int
decode_asisd_fcvtxn(aarch64_inst * inst)2516 decode_asisd_fcvtxn (aarch64_inst *inst)
2517 {
2518   aarch64_field field = {0, 0};
2519   gen_sub_field (FLD_size, 0, 1, &field);
2520   if (!extract_field_2 (&field, inst->value, 0))
2521     return 0;
2522   inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2523   return 1;
2524 }
2525 
2526 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
2527 static int
decode_fcvt(aarch64_inst * inst)2528 decode_fcvt (aarch64_inst *inst)
2529 {
2530   enum aarch64_opnd_qualifier qualifier;
2531   aarch64_insn value;
2532   const aarch64_field field = {15, 2};
2533 
2534   /* opc dstsize */
2535   value = extract_field_2 (&field, inst->value, 0);
2536   switch (value)
2537     {
2538     case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2539     case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2540     case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2541     default: return 0;
2542     }
2543   inst->operands[0].qualifier = qualifier;
2544 
2545   return 1;
2546 }
2547 
2548 /* Do miscellaneous decodings that are not common enough to be driven by
2549    flags.  */
2550 
2551 static int
do_misc_decoding(aarch64_inst * inst)2552 do_misc_decoding (aarch64_inst *inst)
2553 {
2554   unsigned int value;
2555   switch (inst->opcode->op)
2556     {
2557     case OP_FCVT:
2558       return decode_fcvt (inst);
2559 
2560     case OP_FCVTN:
2561     case OP_FCVTN2:
2562     case OP_FCVTL:
2563     case OP_FCVTL2:
2564       return decode_asimd_fcvt (inst);
2565 
2566     case OP_FCVTXN_S:
2567       return decode_asisd_fcvtxn (inst);
2568 
2569     case OP_MOV_P_P:
2570     case OP_MOVS_P_P:
2571       value = extract_field (FLD_SVE_Pn, inst->value, 0);
2572       return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2573 	      && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2574 
2575     case OP_MOV_Z_P_Z:
2576       return (extract_field (FLD_SVE_Zd, inst->value, 0)
2577 	      == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2578 
2579     case OP_MOV_Z_V:
2580       /* Index must be zero.  */
2581       value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2582       return value > 0 && value <= 16 && value == (value & -value);
2583 
2584     case OP_MOV_Z_Z:
2585       return (extract_field (FLD_SVE_Zn, inst->value, 0)
2586 	      == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2587 
2588     case OP_MOV_Z_Zi:
2589       /* Index must be nonzero.  */
2590       value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2591       return value > 0 && value != (value & -value);
2592 
2593     case OP_MOVM_P_P_P:
2594       return (extract_field (FLD_SVE_Pd, inst->value, 0)
2595 	      == extract_field (FLD_SVE_Pm, inst->value, 0));
2596 
2597     case OP_MOVZS_P_P_P:
2598     case OP_MOVZ_P_P_P:
2599       return (extract_field (FLD_SVE_Pn, inst->value, 0)
2600 	      == extract_field (FLD_SVE_Pm, inst->value, 0));
2601 
2602     case OP_NOTS_P_P_P_Z:
2603     case OP_NOT_P_P_P_Z:
2604       return (extract_field (FLD_SVE_Pm, inst->value, 0)
2605 	      == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2606 
2607     default:
2608       return 0;
2609     }
2610 }
2611 
2612 /* Opcodes that have fields shared by multiple operands are usually flagged
2613    with flags.  In this function, we detect such flags, decode the related
2614    field(s) and store the information in one of the related operands.  The
2615    'one' operand is not any operand but one of the operands that can
2616    accommadate all the information that has been decoded.  */
2617 
2618 static int
do_special_decoding(aarch64_inst * inst)2619 do_special_decoding (aarch64_inst *inst)
2620 {
2621   int idx;
2622   aarch64_insn value;
2623   /* Condition for truly conditional executed instructions, e.g. b.cond.  */
2624   if (inst->opcode->flags & F_COND)
2625     {
2626       value = extract_field (FLD_cond2, inst->value, 0);
2627       inst->cond = get_cond_from_value (value);
2628     }
2629   /* 'sf' field.  */
2630   if (inst->opcode->flags & F_SF)
2631     {
2632       idx = select_operand_for_sf_field_coding (inst->opcode);
2633       value = extract_field (FLD_sf, inst->value, 0);
2634       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2635       if ((inst->opcode->flags & F_N)
2636 	  && extract_field (FLD_N, inst->value, 0) != value)
2637 	return 0;
2638     }
2639   /* 'sf' field.  */
2640   if (inst->opcode->flags & F_LSE_SZ)
2641     {
2642       idx = select_operand_for_sf_field_coding (inst->opcode);
2643       value = extract_field (FLD_lse_sz, inst->value, 0);
2644       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2645     }
2646   /* rcpc3 'size' field.  */
2647   if (inst->opcode->flags & F_RCPC3_SIZE)
2648     {
2649       value = extract_field (FLD_rcpc3_size, inst->value, 0);
2650       for (int i = 0;
2651 	   aarch64_operands[inst->operands[i].type].op_class != AARCH64_OPND_CLASS_ADDRESS;
2652 	   i++)
2653 	{
2654 	  if (aarch64_operands[inst->operands[i].type].op_class
2655 	      == AARCH64_OPND_CLASS_INT_REG)
2656 	    inst->operands[i].qualifier = get_greg_qualifier_from_value (value & 1);
2657 	  else if (aarch64_operands[inst->operands[i].type].op_class
2658 	      == AARCH64_OPND_CLASS_FP_REG)
2659 	    {
2660 	      value += (extract_field (FLD_opc1, inst->value, 0) << 2);
2661 	      inst->operands[i].qualifier = get_sreg_qualifier_from_value (value);
2662 	    }
2663 	}
2664     }
2665 
2666   /* size:Q fields.  */
2667   if (inst->opcode->flags & F_SIZEQ)
2668     return decode_sizeq (inst);
2669 
2670   if (inst->opcode->flags & F_FPTYPE)
2671     {
2672       idx = select_operand_for_fptype_field_coding (inst->opcode);
2673       value = extract_field (FLD_type, inst->value, 0);
2674       switch (value)
2675 	{
2676 	case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2677 	case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2678 	case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2679 	default: return 0;
2680 	}
2681     }
2682 
2683   if (inst->opcode->flags & F_SSIZE)
2684     {
2685       /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2686 	 of the base opcode.  */
2687       aarch64_insn mask;
2688       enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2689       idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2690       value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2691       mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2692       /* For most related instruciton, the 'size' field is fully available for
2693 	 operand encoding.  */
2694       if (mask == 0x3)
2695 	inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2696       else
2697 	{
2698 	  get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2699 					   candidates);
2700 	  inst->operands[idx].qualifier
2701 	    = get_qualifier_from_partial_encoding (value, candidates, mask);
2702 	}
2703     }
2704 
2705   if (inst->opcode->flags & F_T)
2706     {
2707       /* Num of consecutive '0's on the right side of imm5<3:0>.  */
2708       int num = 0;
2709       unsigned val, Q;
2710       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2711 	      == AARCH64_OPND_CLASS_SIMD_REG);
2712       /* imm5<3:0>	q	<t>
2713 	 0000		x	reserved
2714 	 xxx1		0	8b
2715 	 xxx1		1	16b
2716 	 xx10		0	4h
2717 	 xx10		1	8h
2718 	 x100		0	2s
2719 	 x100		1	4s
2720 	 1000		0	reserved
2721 	 1000		1	2d  */
2722       val = extract_field (FLD_imm5, inst->value, 0);
2723       while ((val & 0x1) == 0 && ++num <= 3)
2724 	val >>= 1;
2725       if (num > 3)
2726 	return 0;
2727       Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2728       inst->operands[0].qualifier =
2729 	get_vreg_qualifier_from_value ((num << 1) | Q);
2730     }
2731 
2732   if ((inst->opcode->flags & F_OPD_SIZE) && inst->opcode->iclass == sve2_urqvs)
2733     {
2734       unsigned size;
2735       size = (unsigned) extract_field (FLD_size, inst->value,
2736 				       inst->opcode->mask);
2737       inst->operands[0].qualifier
2738 	= get_vreg_qualifier_from_value (1 + (size << 1));
2739       inst->operands[2].qualifier = get_sreg_qualifier_from_value (size);
2740     }
2741 
2742   if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2743     {
2744       /* Use Rt to encode in the case of e.g.
2745 	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
2746       idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2747       if (idx == -1)
2748 	{
2749 	  /* Otherwise use the result operand, which has to be a integer
2750 	     register.  */
2751 	  assert (aarch64_get_operand_class (inst->opcode->operands[0])
2752 		  == AARCH64_OPND_CLASS_INT_REG);
2753 	  idx = 0;
2754 	}
2755       assert (idx == 0 || idx == 1);
2756       value = extract_field (FLD_Q, inst->value, 0);
2757       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2758     }
2759 
2760   if (inst->opcode->flags & F_LDS_SIZE)
2761     {
2762       aarch64_field field = {0, 0};
2763       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2764 	      == AARCH64_OPND_CLASS_INT_REG);
2765       gen_sub_field (FLD_opc, 0, 1, &field);
2766       value = extract_field_2 (&field, inst->value, 0);
2767       inst->operands[0].qualifier
2768 	= value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2769     }
2770 
2771   /* Miscellaneous decoding; done as the last step.  */
2772   if (inst->opcode->flags & F_MISC)
2773     return do_misc_decoding (inst);
2774 
2775   return 1;
2776 }
2777 
2778 /* Converters converting a real opcode instruction to its alias form.  */
2779 
2780 /* ROR <Wd>, <Ws>, #<shift>
2781      is equivalent to:
2782    EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
2783 static int
convert_extr_to_ror(aarch64_inst * inst)2784 convert_extr_to_ror (aarch64_inst *inst)
2785 {
2786   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2787     {
2788       copy_operand_info (inst, 2, 3);
2789       inst->operands[3].type = AARCH64_OPND_NIL;
2790       return 1;
2791     }
2792   return 0;
2793 }
2794 
2795 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2796      is equivalent to:
2797    USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
2798 static int
convert_shll_to_xtl(aarch64_inst * inst)2799 convert_shll_to_xtl (aarch64_inst *inst)
2800 {
2801   if (inst->operands[2].imm.value == 0)
2802     {
2803       inst->operands[2].type = AARCH64_OPND_NIL;
2804       return 1;
2805     }
2806   return 0;
2807 }
2808 
2809 /* Convert
2810      UBFM <Xd>, <Xn>, #<shift>, #63.
2811    to
2812      LSR <Xd>, <Xn>, #<shift>.  */
2813 static int
convert_bfm_to_sr(aarch64_inst * inst)2814 convert_bfm_to_sr (aarch64_inst *inst)
2815 {
2816   int64_t imms, val;
2817 
2818   imms = inst->operands[3].imm.value;
2819   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2820   if (imms == val)
2821     {
2822       inst->operands[3].type = AARCH64_OPND_NIL;
2823       return 1;
2824     }
2825 
2826   return 0;
2827 }
2828 
2829 /* Convert MOV to ORR.  */
2830 static int
convert_orr_to_mov(aarch64_inst * inst)2831 convert_orr_to_mov (aarch64_inst *inst)
2832 {
2833   /* MOV <Vd>.<T>, <Vn>.<T>
2834      is equivalent to:
2835      ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
2836   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2837     {
2838       inst->operands[2].type = AARCH64_OPND_NIL;
2839       return 1;
2840     }
2841   return 0;
2842 }
2843 
2844 /* When <imms> >= <immr>, the instruction written:
2845      SBFX <Xd>, <Xn>, #<lsb>, #<width>
2846    is equivalent to:
2847      SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
2848 
2849 static int
convert_bfm_to_bfx(aarch64_inst * inst)2850 convert_bfm_to_bfx (aarch64_inst *inst)
2851 {
2852   int64_t immr, imms;
2853 
2854   immr = inst->operands[2].imm.value;
2855   imms = inst->operands[3].imm.value;
2856   if (imms >= immr)
2857     {
2858       int64_t lsb = immr;
2859       inst->operands[2].imm.value = lsb;
2860       inst->operands[3].imm.value = imms + 1 - lsb;
2861       /* The two opcodes have different qualifiers for
2862 	 the immediate operands; reset to help the checking.  */
2863       reset_operand_qualifier (inst, 2);
2864       reset_operand_qualifier (inst, 3);
2865       return 1;
2866     }
2867 
2868   return 0;
2869 }
2870 
2871 /* When <imms> < <immr>, the instruction written:
2872      SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2873    is equivalent to:
2874      SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
2875 
2876 static int
convert_bfm_to_bfi(aarch64_inst * inst)2877 convert_bfm_to_bfi (aarch64_inst *inst)
2878 {
2879   int64_t immr, imms, val;
2880 
2881   immr = inst->operands[2].imm.value;
2882   imms = inst->operands[3].imm.value;
2883   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2884   if (imms < immr)
2885     {
2886       inst->operands[2].imm.value = (val - immr) & (val - 1);
2887       inst->operands[3].imm.value = imms + 1;
2888       /* The two opcodes have different qualifiers for
2889 	 the immediate operands; reset to help the checking.  */
2890       reset_operand_qualifier (inst, 2);
2891       reset_operand_qualifier (inst, 3);
2892       return 1;
2893     }
2894 
2895   return 0;
2896 }
2897 
2898 /* The instruction written:
2899      BFC <Xd>, #<lsb>, #<width>
2900    is equivalent to:
2901      BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1).  */
2902 
2903 static int
convert_bfm_to_bfc(aarch64_inst * inst)2904 convert_bfm_to_bfc (aarch64_inst *inst)
2905 {
2906   int64_t immr, imms, val;
2907 
2908   /* Should have been assured by the base opcode value.  */
2909   assert (inst->operands[1].reg.regno == 0x1f);
2910 
2911   immr = inst->operands[2].imm.value;
2912   imms = inst->operands[3].imm.value;
2913   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2914   if (imms < immr)
2915     {
2916       /* Drop XZR from the second operand.  */
2917       copy_operand_info (inst, 1, 2);
2918       copy_operand_info (inst, 2, 3);
2919       inst->operands[3].type = AARCH64_OPND_NIL;
2920 
2921       /* Recalculate the immediates.  */
2922       inst->operands[1].imm.value = (val - immr) & (val - 1);
2923       inst->operands[2].imm.value = imms + 1;
2924 
2925       /* The two opcodes have different qualifiers for the operands; reset to
2926 	 help the checking.  */
2927       reset_operand_qualifier (inst, 1);
2928       reset_operand_qualifier (inst, 2);
2929       reset_operand_qualifier (inst, 3);
2930 
2931       return 1;
2932     }
2933 
2934   return 0;
2935 }
2936 
2937 /* The instruction written:
2938      LSL <Xd>, <Xn>, #<shift>
2939    is equivalent to:
2940      UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
2941 
2942 static int
convert_ubfm_to_lsl(aarch64_inst * inst)2943 convert_ubfm_to_lsl (aarch64_inst *inst)
2944 {
2945   int64_t immr = inst->operands[2].imm.value;
2946   int64_t imms = inst->operands[3].imm.value;
2947   int64_t val
2948     = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2949 
2950   if ((immr == 0 && imms == val) || immr == imms + 1)
2951     {
2952       inst->operands[3].type = AARCH64_OPND_NIL;
2953       inst->operands[2].imm.value = val - imms;
2954       return 1;
2955     }
2956 
2957   return 0;
2958 }
2959 
2960 /* CINC <Wd>, <Wn>, <cond>
2961      is equivalent to:
2962    CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2963      where <cond> is not AL or NV.  */
2964 
2965 static int
convert_from_csel(aarch64_inst * inst)2966 convert_from_csel (aarch64_inst *inst)
2967 {
2968   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2969       && (inst->operands[3].cond->value & 0xe) != 0xe)
2970     {
2971       copy_operand_info (inst, 2, 3);
2972       inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2973       inst->operands[3].type = AARCH64_OPND_NIL;
2974       return 1;
2975     }
2976   return 0;
2977 }
2978 
2979 /* CSET <Wd>, <cond>
2980      is equivalent to:
2981    CSINC <Wd>, WZR, WZR, invert(<cond>)
2982      where <cond> is not AL or NV.  */
2983 
2984 static int
convert_csinc_to_cset(aarch64_inst * inst)2985 convert_csinc_to_cset (aarch64_inst *inst)
2986 {
2987   if (inst->operands[1].reg.regno == 0x1f
2988       && inst->operands[2].reg.regno == 0x1f
2989       && (inst->operands[3].cond->value & 0xe) != 0xe)
2990     {
2991       copy_operand_info (inst, 1, 3);
2992       inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2993       inst->operands[3].type = AARCH64_OPND_NIL;
2994       inst->operands[2].type = AARCH64_OPND_NIL;
2995       return 1;
2996     }
2997   return 0;
2998 }
2999 
3000 /* MOV <Wd>, #<imm>
3001      is equivalent to:
3002    MOVZ <Wd>, #<imm16_5>, LSL #<shift>.
3003 
3004    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3005    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3006    or where a MOVN has an immediate that could be encoded by MOVZ, or where
3007    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3008    machine-instruction mnemonic must be used.  */
3009 
3010 static int
convert_movewide_to_mov(aarch64_inst * inst)3011 convert_movewide_to_mov (aarch64_inst *inst)
3012 {
3013   uint64_t value = inst->operands[1].imm.value;
3014   /* MOVZ/MOVN #0 have a shift amount other than LSL #0.  */
3015   if (value == 0 && inst->operands[1].shifter.amount != 0)
3016     return 0;
3017   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3018   inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
3019   value <<= inst->operands[1].shifter.amount;
3020   /* As an alias convertor, it has to be clear that the INST->OPCODE
3021      is the opcode of the real instruction.  */
3022   if (inst->opcode->op == OP_MOVN)
3023     {
3024       int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3025       value = ~value;
3026       /* A MOVN has an immediate that could be encoded by MOVZ.  */
3027       if (aarch64_wide_constant_p (value, is32, NULL))
3028 	return 0;
3029     }
3030   inst->operands[1].imm.value = value;
3031   inst->operands[1].shifter.amount = 0;
3032   return 1;
3033 }
3034 
3035 /* MOV <Wd>, #<imm>
3036      is equivalent to:
3037    ORR <Wd>, WZR, #<imm>.
3038 
3039    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3040    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3041    or where a MOVN has an immediate that could be encoded by MOVZ, or where
3042    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3043    machine-instruction mnemonic must be used.  */
3044 
3045 static int
convert_movebitmask_to_mov(aarch64_inst * inst)3046 convert_movebitmask_to_mov (aarch64_inst *inst)
3047 {
3048   int is32;
3049   uint64_t value;
3050 
3051   /* Should have been assured by the base opcode value.  */
3052   assert (inst->operands[1].reg.regno == 0x1f);
3053   copy_operand_info (inst, 1, 2);
3054   is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3055   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3056   value = inst->operands[1].imm.value;
3057   /* ORR has an immediate that could be generated by a MOVZ or MOVN
3058      instruction.  */
3059   if (inst->operands[0].reg.regno != 0x1f
3060       && (aarch64_wide_constant_p (value, is32, NULL)
3061 	  || aarch64_wide_constant_p (~value, is32, NULL)))
3062     return 0;
3063 
3064   inst->operands[2].type = AARCH64_OPND_NIL;
3065   return 1;
3066 }
3067 
3068 /* Some alias opcodes are disassembled by being converted from their real-form.
3069    N.B. INST->OPCODE is the real opcode rather than the alias.  */
3070 
3071 static int
convert_to_alias(aarch64_inst * inst,const aarch64_opcode * alias)3072 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
3073 {
3074   switch (alias->op)
3075     {
3076     case OP_ASR_IMM:
3077     case OP_LSR_IMM:
3078       return convert_bfm_to_sr (inst);
3079     case OP_LSL_IMM:
3080       return convert_ubfm_to_lsl (inst);
3081     case OP_CINC:
3082     case OP_CINV:
3083     case OP_CNEG:
3084       return convert_from_csel (inst);
3085     case OP_CSET:
3086     case OP_CSETM:
3087       return convert_csinc_to_cset (inst);
3088     case OP_UBFX:
3089     case OP_BFXIL:
3090     case OP_SBFX:
3091       return convert_bfm_to_bfx (inst);
3092     case OP_SBFIZ:
3093     case OP_BFI:
3094     case OP_UBFIZ:
3095       return convert_bfm_to_bfi (inst);
3096     case OP_BFC:
3097       return convert_bfm_to_bfc (inst);
3098     case OP_MOV_V:
3099       return convert_orr_to_mov (inst);
3100     case OP_MOV_IMM_WIDE:
3101     case OP_MOV_IMM_WIDEN:
3102       return convert_movewide_to_mov (inst);
3103     case OP_MOV_IMM_LOG:
3104       return convert_movebitmask_to_mov (inst);
3105     case OP_ROR_IMM:
3106       return convert_extr_to_ror (inst);
3107     case OP_SXTL:
3108     case OP_SXTL2:
3109     case OP_UXTL:
3110     case OP_UXTL2:
3111       return convert_shll_to_xtl (inst);
3112     default:
3113       return 0;
3114     }
3115 }
3116 
3117 static bool
3118 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
3119 		       aarch64_inst *, int, aarch64_operand_error *errors);
3120 
3121 /* Given the instruction information in *INST, check if the instruction has
3122    any alias form that can be used to represent *INST.  If the answer is yes,
3123    update *INST to be in the form of the determined alias.  */
3124 
3125 /* In the opcode description table, the following flags are used in opcode
3126    entries to help establish the relations between the real and alias opcodes:
3127 
3128 	F_ALIAS:	opcode is an alias
3129 	F_HAS_ALIAS:	opcode has alias(es)
3130 	F_P1
3131 	F_P2
3132 	F_P3:		Disassembly preference priority 1-3 (the larger the
3133 			higher).  If nothing is specified, it is the priority
3134 			0 by default, i.e. the lowest priority.
3135 
3136    Although the relation between the machine and the alias instructions are not
3137    explicitly described, it can be easily determined from the base opcode
3138    values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
3139    description entries:
3140 
3141    The mask of an alias opcode must be equal to or a super-set (i.e. more
3142    constrained) of that of the aliased opcode; so is the base opcode value.
3143 
3144    if (opcode_has_alias (real) && alias_opcode_p (opcode)
3145        && (opcode->mask & real->mask) == real->mask
3146        && (real->mask & opcode->opcode) == (real->mask & real->opcode))
3147    then OPCODE is an alias of, and only of, the REAL instruction
3148 
3149    The alias relationship is forced flat-structured to keep related algorithm
3150    simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
3151 
3152    During the disassembling, the decoding decision tree (in
3153    opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
3154    if the decoding of such a machine instruction succeeds (and -Mno-aliases is
3155    not specified), the disassembler will check whether there is any alias
3156    instruction exists for this real instruction.  If there is, the disassembler
3157    will try to disassemble the 32-bit binary again using the alias's rule, or
3158    try to convert the IR to the form of the alias.  In the case of the multiple
3159    aliases, the aliases are tried one by one from the highest priority
3160    (currently the flag F_P3) to the lowest priority (no priority flag), and the
3161    first succeeds first adopted.
3162 
3163    You may ask why there is a need for the conversion of IR from one form to
3164    another in handling certain aliases.  This is because on one hand it avoids
3165    adding more operand code to handle unusual encoding/decoding; on other
3166    hand, during the disassembling, the conversion is an effective approach to
3167    check the condition of an alias (as an alias may be adopted only if certain
3168    conditions are met).
3169 
3170    In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
3171    aarch64_opcode_table and generated aarch64_find_alias_opcode and
3172    aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help.  */
3173 
3174 static void
determine_disassembling_preference(struct aarch64_inst * inst,aarch64_operand_error * errors)3175 determine_disassembling_preference (struct aarch64_inst *inst,
3176 				    aarch64_operand_error *errors)
3177 {
3178   const aarch64_opcode *opcode;
3179   const aarch64_opcode *alias;
3180 
3181   opcode = inst->opcode;
3182 
3183   /* This opcode does not have an alias, so use itself.  */
3184   if (!opcode_has_alias (opcode))
3185     return;
3186 
3187   alias = aarch64_find_alias_opcode (opcode);
3188   assert (alias);
3189 
3190 #ifdef DEBUG_AARCH64
3191   if (debug_dump)
3192     {
3193       const aarch64_opcode *tmp = alias;
3194       printf ("####   LIST    orderd: ");
3195       while (tmp)
3196 	{
3197 	  printf ("%s, ", tmp->name);
3198 	  tmp = aarch64_find_next_alias_opcode (tmp);
3199 	}
3200       printf ("\n");
3201     }
3202 #endif /* DEBUG_AARCH64 */
3203 
3204   for (; alias; alias = aarch64_find_next_alias_opcode (alias))
3205     {
3206       DEBUG_TRACE ("try %s", alias->name);
3207       assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
3208 
3209       /* An alias can be a pseudo opcode which will never be used in the
3210 	 disassembly, e.g. BIC logical immediate is such a pseudo opcode
3211 	 aliasing AND.  */
3212       if (pseudo_opcode_p (alias))
3213 	{
3214 	  DEBUG_TRACE ("skip pseudo %s", alias->name);
3215 	  continue;
3216 	}
3217 
3218       if ((inst->value & alias->mask) != alias->opcode)
3219 	{
3220 	  DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
3221 	  continue;
3222 	}
3223 
3224       if (!AARCH64_CPU_HAS_ALL_FEATURES (arch_variant, *alias->avariant))
3225 	{
3226 	  DEBUG_TRACE ("skip %s: we're missing features", alias->name);
3227 	  continue;
3228 	}
3229 
3230       /* No need to do any complicated transformation on operands, if the alias
3231 	 opcode does not have any operand.  */
3232       if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
3233 	{
3234 	  DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
3235 	  aarch64_replace_opcode (inst, alias);
3236 	  return;
3237 	}
3238       if (alias->flags & F_CONV)
3239 	{
3240 	  aarch64_inst copy;
3241 	  memcpy (&copy, inst, sizeof (aarch64_inst));
3242 	  /* ALIAS is the preference as long as the instruction can be
3243 	     successfully converted to the form of ALIAS.  */
3244 	  if (convert_to_alias (&copy, alias) == 1)
3245 	    {
3246 	      aarch64_replace_opcode (&copy, alias);
3247 	      if (aarch64_match_operands_constraint (&copy, NULL) != 1)
3248 		{
3249 		  DEBUG_TRACE ("FAILED with alias %s ", alias->name);
3250 		}
3251 	      else
3252 		{
3253 		  DEBUG_TRACE ("succeed with %s via conversion", alias->name);
3254 		  memcpy (inst, &copy, sizeof (aarch64_inst));
3255 		}
3256 	      return;
3257 	    }
3258 	}
3259       else
3260 	{
3261 	  /* Directly decode the alias opcode.  */
3262 	  aarch64_inst temp;
3263 	  memset (&temp, '\0', sizeof (aarch64_inst));
3264 	  if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
3265 	    {
3266 	      DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
3267 	      memcpy (inst, &temp, sizeof (aarch64_inst));
3268 	      return;
3269 	    }
3270 	}
3271     }
3272 }
3273 
3274 /* Some instructions (including all SVE ones) use the instruction class
3275    to describe how a qualifiers_list index is represented in the instruction
3276    encoding.  If INST is such an instruction, decode the appropriate fields
3277    and fill in the operand qualifiers accordingly.  Return true if no
3278    problems are found.  */
3279 
3280 static bool
aarch64_decode_variant_using_iclass(aarch64_inst * inst)3281 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
3282 {
3283   int i, variant;
3284 
3285   variant = 0;
3286   switch (inst->opcode->iclass)
3287     {
3288     case sme_mov:
3289       variant = extract_fields (inst->value, 0, 2, FLD_SME_Q, FLD_SME_size_22);
3290       if (variant >= 4 && variant < 7)
3291 	return false;
3292       if (variant == 7)
3293 	variant = 4;
3294       break;
3295 
3296     case sme_psel:
3297       i = extract_fields (inst->value, 0, 2, FLD_SME_tszh, FLD_SME_tszl);
3298       if (i == 0)
3299 	return false;
3300       while ((i & 1) == 0)
3301 	{
3302 	  i >>= 1;
3303 	  variant += 1;
3304 	}
3305       break;
3306 
3307     case sme_shift:
3308       i = extract_field (FLD_SVE_tszh, inst->value, 0);
3309       goto sve_shift;
3310 
3311     case sme_size_12_bhs:
3312       variant = extract_field (FLD_SME_size_12, inst->value, 0);
3313       if (variant >= 3)
3314 	return false;
3315       break;
3316 
3317     case sme_size_12_hs:
3318       variant = extract_field (FLD_SME_size_12, inst->value, 0);
3319       if (variant != 1 && variant != 2)
3320 	return false;
3321       variant -= 1;
3322       break;
3323 
3324     case sme_size_22:
3325       variant = extract_field (FLD_SME_size_22, inst->value, 0);
3326       break;
3327 
3328     case sme_size_22_hsd:
3329       variant = extract_field (FLD_SME_size_22, inst->value, 0);
3330       if (variant < 1)
3331 	return false;
3332       variant -= 1;
3333       break;
3334 
3335     case sme_sz_23:
3336       variant = extract_field (FLD_SME_sz_23, inst->value, 0);
3337       break;
3338 
3339     case sve_cpy:
3340       variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
3341       break;
3342 
3343     case sve_index:
3344       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
3345       if ((i & 31) == 0)
3346 	return false;
3347       while ((i & 1) == 0)
3348 	{
3349 	  i >>= 1;
3350 	  variant += 1;
3351 	}
3352       break;
3353 
3354     case sve_index1:
3355       i = extract_fields (inst->value, 0, 2, FLD_SVE_tsz, FLD_SVE_i2h);
3356       if ((i & 15) == 0)
3357 	return false;
3358       while ((i & 1) == 0)
3359 	{
3360 	  i >>= 1;
3361 	  variant += 1;
3362 	}
3363       break;
3364 
3365     case sve_limm:
3366       /* Pick the smallest applicable element size.  */
3367       if ((inst->value & 0x20600) == 0x600)
3368 	variant = 0;
3369       else if ((inst->value & 0x20400) == 0x400)
3370 	variant = 1;
3371       else if ((inst->value & 0x20000) == 0)
3372 	variant = 2;
3373       else
3374 	variant = 3;
3375       break;
3376 
3377     case sme2_mov:
3378       /* .D is preferred over the other sizes in disassembly.  */
3379       variant = 3;
3380       break;
3381 
3382     case sme2_movaz:
3383     case sme_misc:
3384     case sve_misc:
3385       /* These instructions have only a single variant.  */
3386       break;
3387 
3388     case sve_movprfx:
3389       variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
3390       break;
3391 
3392     case sve_pred_zm:
3393       variant = extract_field (FLD_SVE_M_4, inst->value, 0);
3394       break;
3395 
3396     case sve_shift_pred:
3397       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
3398     sve_shift:
3399       if (i == 0)
3400 	return false;
3401       while (i != 1)
3402 	{
3403 	  i >>= 1;
3404 	  variant += 1;
3405 	}
3406       break;
3407 
3408     case sve_shift_unpred:
3409       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3410       goto sve_shift;
3411 
3412     case sve_size_bhs:
3413       variant = extract_field (FLD_size, inst->value, 0);
3414       if (variant >= 3)
3415 	return false;
3416       break;
3417 
3418     case sve_size_bhsd:
3419       variant = extract_field (FLD_size, inst->value, 0);
3420       break;
3421 
3422     case sve_size_hsd:
3423       i = extract_field (FLD_size, inst->value, 0);
3424       if (i < 1)
3425 	return false;
3426       variant = i - 1;
3427       break;
3428 
3429     case sme_fp_sd:
3430     case sme_int_sd:
3431     case sve_size_bh:
3432     case sve_size_sd:
3433       variant = extract_field (FLD_SVE_sz, inst->value, 0);
3434       break;
3435 
3436     case sve_size_sd2:
3437       variant = extract_field (FLD_SVE_sz2, inst->value, 0);
3438       break;
3439 
3440     case sve_size_hsd2:
3441       i = extract_field (FLD_SVE_size, inst->value, 0);
3442       if (i < 1)
3443 	return false;
3444       variant = i - 1;
3445       break;
3446 
3447     case sve_size_13:
3448       /* Ignore low bit of this field since that is set in the opcode for
3449 	 instructions of this iclass.  */
3450       i = (extract_field (FLD_size, inst->value, 0) & 2);
3451       variant = (i >> 1);
3452       break;
3453 
3454     case sve_shift_tsz_bhsd:
3455       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3456       if (i == 0)
3457 	return false;
3458       while (i != 1)
3459 	{
3460 	  i >>= 1;
3461 	  variant += 1;
3462 	}
3463       break;
3464 
3465     case sve_size_tsz_bhs:
3466       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3467       if (i == 0)
3468 	return false;
3469       while (i != 1)
3470 	{
3471 	  if (i & 1)
3472 	    return false;
3473 	  i >>= 1;
3474 	  variant += 1;
3475 	}
3476       break;
3477 
3478     case sve_shift_tsz_hsd:
3479       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3480       if (i == 0)
3481 	return false;
3482       while (i != 1)
3483 	{
3484 	  i >>= 1;
3485 	  variant += 1;
3486 	}
3487       break;
3488 
3489     default:
3490       /* No mapping between instruction class and qualifiers.  */
3491       return true;
3492     }
3493 
3494   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3495     inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
3496   return true;
3497 }
3498 /* Decode the CODE according to OPCODE; fill INST.  Return 0 if the decoding
3499    fails, which meanes that CODE is not an instruction of OPCODE; otherwise
3500    return 1.
3501 
3502    If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
3503    determined and used to disassemble CODE; this is done just before the
3504    return.  */
3505 
3506 static bool
aarch64_opcode_decode(const aarch64_opcode * opcode,const aarch64_insn code,aarch64_inst * inst,int noaliases_p,aarch64_operand_error * errors)3507 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
3508 		       aarch64_inst *inst, int noaliases_p,
3509 		       aarch64_operand_error *errors)
3510 {
3511   int i;
3512 
3513   DEBUG_TRACE ("enter with %s", opcode->name);
3514 
3515   assert (opcode && inst);
3516 
3517   /* Clear inst.  */
3518   memset (inst, '\0', sizeof (aarch64_inst));
3519 
3520   /* Check the base opcode.  */
3521   if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
3522     {
3523       DEBUG_TRACE ("base opcode match FAIL");
3524       goto decode_fail;
3525     }
3526 
3527   inst->opcode = opcode;
3528   inst->value = code;
3529 
3530   /* Assign operand codes and indexes.  */
3531   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3532     {
3533       if (opcode->operands[i] == AARCH64_OPND_NIL)
3534 	break;
3535       inst->operands[i].type = opcode->operands[i];
3536       inst->operands[i].idx = i;
3537     }
3538 
3539   /* Call the opcode decoder indicated by flags.  */
3540   if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
3541     {
3542       DEBUG_TRACE ("opcode flag-based decoder FAIL");
3543       goto decode_fail;
3544     }
3545 
3546   /* Possibly use the instruction class to determine the correct
3547      qualifier.  */
3548   if (!aarch64_decode_variant_using_iclass (inst))
3549     {
3550       DEBUG_TRACE ("iclass-based decoder FAIL");
3551       goto decode_fail;
3552     }
3553 
3554   /* Call operand decoders.  */
3555   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3556     {
3557       const aarch64_operand *opnd;
3558       enum aarch64_opnd type;
3559 
3560       type = opcode->operands[i];
3561       if (type == AARCH64_OPND_NIL)
3562 	break;
3563       opnd = &aarch64_operands[type];
3564       if (operand_has_extractor (opnd)
3565 	  && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
3566 					 errors)))
3567 	{
3568 	  DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
3569 	  goto decode_fail;
3570 	}
3571     }
3572 
3573   /* If the opcode has a verifier, then check it now.  */
3574   if (opcode->verifier
3575       && opcode->verifier (inst, code, 0, false, errors, NULL) != ERR_OK)
3576     {
3577       DEBUG_TRACE ("operand verifier FAIL");
3578       goto decode_fail;
3579     }
3580 
3581   /* Match the qualifiers.  */
3582   if (aarch64_match_operands_constraint (inst, NULL) == 1)
3583     {
3584       /* Arriving here, the CODE has been determined as a valid instruction
3585 	 of OPCODE and *INST has been filled with information of this OPCODE
3586 	 instruction.  Before the return, check if the instruction has any
3587 	 alias and should be disassembled in the form of its alias instead.
3588 	 If the answer is yes, *INST will be updated.  */
3589       if (!noaliases_p)
3590 	determine_disassembling_preference (inst, errors);
3591       DEBUG_TRACE ("SUCCESS");
3592       return true;
3593     }
3594   else
3595     {
3596       DEBUG_TRACE ("constraint matching FAIL");
3597     }
3598 
3599  decode_fail:
3600   return false;
3601 }
3602 
3603 /* This does some user-friendly fix-up to *INST.  It is currently focus on
3604    the adjustment of qualifiers to help the printed instruction
3605    recognized/understood more easily.  */
3606 
3607 static void
user_friendly_fixup(aarch64_inst * inst)3608 user_friendly_fixup (aarch64_inst *inst)
3609 {
3610   switch (inst->opcode->iclass)
3611     {
3612     case testbranch:
3613       /* TBNZ Xn|Wn, #uimm6, label
3614 	 Test and Branch Not Zero: conditionally jumps to label if bit number
3615 	 uimm6 in register Xn is not zero.  The bit number implies the width of
3616 	 the register, which may be written and should be disassembled as Wn if
3617 	 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3618 	 */
3619       if (inst->operands[1].imm.value < 32)
3620 	inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3621       break;
3622     default: break;
3623     }
3624 }
3625 
3626 /* Decode INSN and fill in *INST the instruction information.  An alias
3627    opcode may be filled in *INSN if NOALIASES_P is FALSE.  Return zero on
3628    success.  */
3629 
3630 enum err_type
aarch64_decode_insn(aarch64_insn insn,aarch64_inst * inst,bool noaliases_p,aarch64_operand_error * errors)3631 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3632 		     bool noaliases_p,
3633 		     aarch64_operand_error *errors)
3634 {
3635   const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3636 
3637 #ifdef DEBUG_AARCH64
3638   if (debug_dump)
3639     {
3640       const aarch64_opcode *tmp = opcode;
3641       printf ("\n");
3642       DEBUG_TRACE ("opcode lookup:");
3643       while (tmp != NULL)
3644 	{
3645 	  aarch64_verbose ("  %s", tmp->name);
3646 	  tmp = aarch64_find_next_opcode (tmp);
3647 	}
3648     }
3649 #endif /* DEBUG_AARCH64 */
3650 
3651   /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3652      distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3653      opcode field and value, apart from the difference that one of them has an
3654      extra field as part of the opcode, but such a field is used for operand
3655      encoding in other opcode(s) ('immh' in the case of the example).  */
3656   while (opcode != NULL)
3657     {
3658       /* But only one opcode can be decoded successfully for, as the
3659 	 decoding routine will check the constraint carefully.  */
3660       if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3661 	return ERR_OK;
3662       opcode = aarch64_find_next_opcode (opcode);
3663     }
3664 
3665   return ERR_UND;
3666 }
3667 
3668 /* Return a short string to indicate a switch to STYLE.  These strings
3669    will be embedded into the disassembled operand text (as produced by
3670    aarch64_print_operand), and then spotted in the print_operands function
3671    so that the disassembler output can be split by style.  */
3672 
3673 static const char *
get_style_text(enum disassembler_style style)3674 get_style_text (enum disassembler_style style)
3675 {
3676   static bool init = false;
3677   static char formats[16][4];
3678   unsigned num;
3679 
3680   /* First time through we build a string for every possible format.  This
3681      code relies on there being no more than 16 different styles (there's
3682      an assert below for this).  */
3683   if (!init)
3684     {
3685       int i;
3686 
3687       for (i = 0; i <= 0xf; ++i)
3688 	{
3689 	  int res ATTRIBUTE_UNUSED
3690 	    = snprintf (&formats[i][0], sizeof (formats[i]), "%c%x%c",
3691 			STYLE_MARKER_CHAR, i, STYLE_MARKER_CHAR);
3692 	  assert (res == 3);
3693 	}
3694 
3695       init = true;
3696     }
3697 
3698   /* Return the string that marks switching to STYLE.  */
3699   num = (unsigned) style;
3700   assert (style <= 0xf);
3701   return formats[num];
3702 }
3703 
3704 /* Callback used by aarch64_print_operand to apply STYLE to the
3705    disassembler output created from FMT and ARGS.  The STYLER object holds
3706    any required state.  Must return a pointer to a string (created from FMT
3707    and ARGS) that will continue to be valid until the complete disassembled
3708    instruction has been printed.
3709 
3710    We return a string that includes two embedded style markers, the first,
3711    places at the start of the string, indicates a switch to STYLE, and the
3712    second, placed at the end of the string, indicates a switch back to the
3713    default text style.
3714 
3715    Later, when we print the operand text we take care to collapse any
3716    adjacent style markers, and to ignore any style markers that appear at
3717    the very end of a complete operand string.  */
3718 
aarch64_apply_style(struct aarch64_styler * styler,enum disassembler_style style,const char * fmt,va_list args)3719 static const char *aarch64_apply_style (struct aarch64_styler *styler,
3720 					enum disassembler_style style,
3721 					const char *fmt,
3722 					va_list args)
3723 {
3724   int res;
3725   char *ptr, *tmp;
3726   struct obstack *stack = (struct obstack *) styler->state;
3727   va_list ap;
3728 
3729   /* These are the two strings for switching styles.  */
3730   const char *style_on = get_style_text (style);
3731   const char *style_off = get_style_text (dis_style_text);
3732 
3733   /* Calculate space needed once FMT and ARGS are expanded.  */
3734   va_copy (ap, args);
3735   res = vsnprintf (NULL, 0, fmt, ap);
3736   va_end (ap);
3737   assert (res >= 0);
3738 
3739   /* Allocate space on the obstack for the expanded FMT and ARGS, as well
3740      as the two strings for switching styles, then write all of these
3741      strings onto the obstack.  */
3742   ptr = (char *) obstack_alloc (stack, res + strlen (style_on)
3743 				+ strlen (style_off) + 1);
3744   tmp = stpcpy (ptr, style_on);
3745   res = vsnprintf (tmp, (res + 1), fmt, args);
3746   assert (res >= 0);
3747   tmp += res;
3748   strcpy (tmp, style_off);
3749 
3750   return ptr;
3751 }
3752 
3753 /* Print operands.  */
3754 
3755 static void
print_operands(bfd_vma pc,const aarch64_opcode * opcode,const aarch64_opnd_info * opnds,struct disassemble_info * info,bool * has_notes)3756 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3757 		const aarch64_opnd_info *opnds, struct disassemble_info *info,
3758 		bool *has_notes)
3759 {
3760   char *notes = NULL;
3761   int i, pcrel_p, num_printed;
3762   struct aarch64_styler styler;
3763   struct obstack content;
3764   obstack_init (&content);
3765 
3766   styler.apply_style = aarch64_apply_style;
3767   styler.state = (void *) &content;
3768 
3769   for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3770     {
3771       char str[128];
3772       char cmt[128];
3773 
3774       /* We regard the opcode operand info more, however we also look into
3775 	 the inst->operands to support the disassembling of the optional
3776 	 operand.
3777 	 The two operand code should be the same in all cases, apart from
3778 	 when the operand can be optional.  */
3779       if (opcode->operands[i] == AARCH64_OPND_NIL
3780 	  || opnds[i].type == AARCH64_OPND_NIL)
3781 	break;
3782 
3783       /* Generate the operand string in STR.  */
3784       aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3785 			     &info->target, &notes, cmt, sizeof (cmt),
3786 			     arch_variant, &styler);
3787 
3788       /* Print the delimiter (taking account of omitted operand(s)).  */
3789       if (str[0] != '\0')
3790 	(*info->fprintf_styled_func) (info->stream, dis_style_text, "%s",
3791 				      num_printed++ == 0 ? "\t" : ", ");
3792 
3793       /* Print the operand.  */
3794       if (pcrel_p)
3795 	(*info->print_address_func) (info->target, info);
3796       else
3797 	{
3798 	  /* This operand came from aarch64_print_operand, and will include
3799 	     embedded strings indicating which style each character should
3800 	     have.  In the following code we split the text based on
3801 	     CURR_STYLE, and call the styled print callback to print each
3802 	     block of text in the appropriate style.  */
3803 	  char *start, *curr;
3804 	  enum disassembler_style curr_style = dis_style_text;
3805 
3806 	  start = curr = str;
3807 	  do
3808 	    {
3809 	      if (*curr == '\0'
3810 		  || (*curr == STYLE_MARKER_CHAR
3811 		      && ISXDIGIT (*(curr + 1))
3812 		      && *(curr + 2) == STYLE_MARKER_CHAR))
3813 		{
3814 		  /* Output content between our START position and CURR.  */
3815 		  int len = curr - start;
3816 		  if (len > 0)
3817 		    {
3818 		      if ((*info->fprintf_styled_func) (info->stream,
3819 							curr_style,
3820 							"%.*s",
3821 							len, start) < 0)
3822 			break;
3823 		    }
3824 
3825 		  if (*curr == '\0')
3826 		    break;
3827 
3828 		  /* Skip over the initial STYLE_MARKER_CHAR.  */
3829 		  ++curr;
3830 
3831 		  /* Update the CURR_STYLE.  As there are less than 16
3832 		     styles, it is possible, that if the input is corrupted
3833 		     in some way, that we might set CURR_STYLE to an
3834 		     invalid value.  Don't worry though, we check for this
3835 		     situation.  */
3836 		  if (*curr >= '0' && *curr <= '9')
3837 		    curr_style = (enum disassembler_style) (*curr - '0');
3838 		  else if (*curr >= 'a' && *curr <= 'f')
3839 		    curr_style = (enum disassembler_style) (*curr - 'a' + 10);
3840 		  else
3841 		    curr_style = dis_style_text;
3842 
3843 		  /* Check for an invalid style having been selected.  This
3844 		     should never happen, but it doesn't hurt to be a
3845 		     little paranoid.  */
3846 		  if (curr_style > dis_style_comment_start)
3847 		    curr_style = dis_style_text;
3848 
3849 		  /* Skip the hex character, and the closing STYLE_MARKER_CHAR.  */
3850 		  curr += 2;
3851 
3852 		  /* Reset the START to after the style marker.  */
3853 		  start = curr;
3854 		}
3855 	      else
3856 		++curr;
3857 	    }
3858 	  while (true);
3859 	}
3860 
3861       /* Print the comment.  This works because only the last operand ever
3862 	 adds a comment.  If that ever changes then we'll need to be
3863 	 smarter here.  */
3864       if (cmt[0] != '\0')
3865 	(*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3866 				      "\t// %s", cmt);
3867     }
3868 
3869     if (notes && !no_notes)
3870       {
3871 	*has_notes = true;
3872 	(*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3873 				      "  // note: %s", notes);
3874       }
3875 
3876     obstack_free (&content, NULL);
3877 }
3878 
3879 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed.  */
3880 
3881 static void
remove_dot_suffix(char * name,const aarch64_inst * inst)3882 remove_dot_suffix (char *name, const aarch64_inst *inst)
3883 {
3884   char *ptr;
3885   size_t len;
3886 
3887   ptr = strchr (inst->opcode->name, '.');
3888   assert (ptr && inst->cond);
3889   len = ptr - inst->opcode->name;
3890   assert (len < 8);
3891   strncpy (name, inst->opcode->name, len);
3892   name[len] = '\0';
3893 }
3894 
3895 /* Print the instruction mnemonic name.  */
3896 
3897 static void
print_mnemonic_name(const aarch64_inst * inst,struct disassemble_info * info)3898 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3899 {
3900   if (inst->opcode->flags & F_COND)
3901     {
3902       /* For instructions that are truly conditionally executed, e.g. b.cond,
3903 	 prepare the full mnemonic name with the corresponding condition
3904 	 suffix.  */
3905       char name[8];
3906 
3907       remove_dot_suffix (name, inst);
3908       (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3909 				    "%s.%s", name, inst->cond->names[0]);
3910     }
3911   else
3912     (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3913 				  "%s", inst->opcode->name);
3914 }
3915 
3916 /* Decide whether we need to print a comment after the operands of
3917    instruction INST.  */
3918 
3919 static void
print_comment(const aarch64_inst * inst,struct disassemble_info * info)3920 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3921 {
3922   if (inst->opcode->flags & F_COND)
3923     {
3924       char name[8];
3925       unsigned int i, num_conds;
3926 
3927       remove_dot_suffix (name, inst);
3928       num_conds = ARRAY_SIZE (inst->cond->names);
3929       for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3930 	(*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3931 				      "%s %s.%s",
3932 				      i == 1 ? "  //" : ",",
3933 				      name, inst->cond->names[i]);
3934     }
3935 }
3936 
3937 /* Build notes from verifiers into a string for printing.  */
3938 
3939 static void
print_verifier_notes(aarch64_operand_error * detail,struct disassemble_info * info)3940 print_verifier_notes (aarch64_operand_error *detail,
3941 		      struct disassemble_info *info)
3942 {
3943   if (no_notes)
3944     return;
3945 
3946   /* The output of the verifier cannot be a fatal error, otherwise the assembly
3947      would not have succeeded.  We can safely ignore these.  */
3948   assert (detail->non_fatal);
3949 
3950   (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3951 				"  // note: ");
3952   switch (detail->kind)
3953     {
3954     case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
3955       (*info->fprintf_styled_func) (info->stream, dis_style_text,
3956 				    _("this `%s' should have an immediately"
3957 				      " preceding `%s'"),
3958 				    detail->data[0].s, detail->data[1].s);
3959       break;
3960 
3961     case AARCH64_OPDE_EXPECTED_A_AFTER_B:
3962       (*info->fprintf_styled_func) (info->stream, dis_style_text,
3963 				    _("expected `%s' after previous `%s'"),
3964 				    detail->data[0].s, detail->data[1].s);
3965       break;
3966 
3967     default:
3968       assert (detail->error);
3969       (*info->fprintf_styled_func) (info->stream, dis_style_text,
3970 				    "%s", detail->error);
3971       if (detail->index >= 0)
3972 	(*info->fprintf_styled_func) (info->stream, dis_style_text,
3973 				      " at operand %d", detail->index + 1);
3974       break;
3975     }
3976 }
3977 
3978 /* Print the instruction according to *INST.  */
3979 
3980 static void
print_aarch64_insn(bfd_vma pc,const aarch64_inst * inst,const aarch64_insn code,struct disassemble_info * info,aarch64_operand_error * mismatch_details)3981 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3982 		    const aarch64_insn code,
3983 		    struct disassemble_info *info,
3984 		    aarch64_operand_error *mismatch_details)
3985 {
3986   bool has_notes = false;
3987 
3988   print_mnemonic_name (inst, info);
3989   print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3990   print_comment (inst, info);
3991 
3992   /* We've already printed a note, not enough space to print more so exit.
3993      Usually notes shouldn't overlap so it shouldn't happen that we have a note
3994      from a register and instruction at the same time.  */
3995   if (has_notes)
3996     return;
3997 
3998   /* Always run constraint verifiers, this is needed because constraints need to
3999      maintain a global state regardless of whether the instruction has the flag
4000      set or not.  */
4001   enum err_type result = verify_constraints (inst, code, pc, false,
4002 					     mismatch_details, &insn_sequence);
4003   switch (result)
4004     {
4005     case ERR_VFI:
4006       print_verifier_notes (mismatch_details, info);
4007       break;
4008     case ERR_UND:
4009     case ERR_UNP:
4010     case ERR_NYI:
4011     default:
4012       break;
4013     }
4014 }
4015 
4016 /* Entry-point of the instruction disassembler and printer.  */
4017 
4018 static void
print_insn_aarch64_word(bfd_vma pc,uint32_t word,struct disassemble_info * info,aarch64_operand_error * errors)4019 print_insn_aarch64_word (bfd_vma pc,
4020 			 uint32_t word,
4021 			 struct disassemble_info *info,
4022 			 aarch64_operand_error *errors)
4023 {
4024   static const char *err_msg[ERR_NR_ENTRIES+1] =
4025     {
4026       [ERR_OK]  = "_",
4027       [ERR_UND] = "undefined",
4028       [ERR_UNP] = "unpredictable",
4029       [ERR_NYI] = "NYI"
4030     };
4031 
4032   enum err_type ret;
4033   aarch64_inst inst;
4034 
4035   info->insn_info_valid = 1;
4036   info->branch_delay_insns = 0;
4037   info->data_size = 0;
4038   info->target = 0;
4039   info->target2 = 0;
4040 
4041   if (info->flags & INSN_HAS_RELOC)
4042     /* If the instruction has a reloc associated with it, then
4043        the offset field in the instruction will actually be the
4044        addend for the reloc.  (If we are using REL type relocs).
4045        In such cases, we can ignore the pc when computing
4046        addresses, since the addend is not currently pc-relative.  */
4047     pc = 0;
4048 
4049   ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
4050 
4051   if (((word >> 21) & 0x3ff) == 1)
4052     {
4053       /* RESERVED for ALES.  */
4054       assert (ret != ERR_OK);
4055       ret = ERR_NYI;
4056     }
4057 
4058   switch (ret)
4059     {
4060     case ERR_UND:
4061     case ERR_UNP:
4062     case ERR_NYI:
4063       /* Handle undefined instructions.  */
4064       info->insn_type = dis_noninsn;
4065       (*info->fprintf_styled_func) (info->stream,
4066 				    dis_style_assembler_directive,
4067 				    ".inst\t");
4068       (*info->fprintf_styled_func) (info->stream, dis_style_immediate,
4069 				    "0x%08x", word);
4070       (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4071 				    " ; %s", err_msg[ret]);
4072       break;
4073     case ERR_OK:
4074       user_friendly_fixup (&inst);
4075       if (inst.opcode->iclass == condbranch
4076 	  || inst.opcode->iclass == testbranch
4077 	  || inst.opcode->iclass == compbranch)
4078         info->insn_type = dis_condbranch;
4079       else if (inst.opcode->iclass == branch_imm)
4080         info->insn_type = dis_jsr;
4081       print_aarch64_insn (pc, &inst, word, info, errors);
4082       break;
4083     default:
4084       abort ();
4085     }
4086 }
4087 
4088 /* Disallow mapping symbols ($x, $d etc) from
4089    being displayed in symbol relative addresses.  */
4090 
4091 bool
aarch64_symbol_is_valid(asymbol * sym,struct disassemble_info * info ATTRIBUTE_UNUSED)4092 aarch64_symbol_is_valid (asymbol * sym,
4093 			 struct disassemble_info * info ATTRIBUTE_UNUSED)
4094 {
4095   const char * name;
4096 
4097   if (sym == NULL)
4098     return false;
4099 
4100   name = bfd_asymbol_name (sym);
4101 
4102   return name
4103     && (name[0] != '$'
4104 	|| (name[1] != 'x' && name[1] != 'd')
4105 	|| (name[2] != '\0' && name[2] != '.'));
4106 }
4107 
4108 /* Print data bytes on INFO->STREAM.  */
4109 
4110 static void
print_insn_data(bfd_vma pc ATTRIBUTE_UNUSED,uint32_t word,struct disassemble_info * info,aarch64_operand_error * errors ATTRIBUTE_UNUSED)4111 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
4112 		 uint32_t word,
4113 		 struct disassemble_info *info,
4114 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4115 {
4116   switch (info->bytes_per_chunk)
4117     {
4118     case 1:
4119       info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4120 				 ".byte\t");
4121       info->fprintf_styled_func (info->stream, dis_style_immediate,
4122 				 "0x%02x", word);
4123       break;
4124     case 2:
4125       info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4126 				 ".short\t");
4127       info->fprintf_styled_func (info->stream, dis_style_immediate,
4128 				 "0x%04x", word);
4129       break;
4130     case 4:
4131       info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4132 				 ".word\t");
4133       info->fprintf_styled_func (info->stream, dis_style_immediate,
4134 				 "0x%08x", word);
4135       break;
4136     default:
4137       abort ();
4138     }
4139 }
4140 
4141 /* Try to infer the code or data type from a symbol.
4142    Returns nonzero if *MAP_TYPE was set.  */
4143 
4144 static int
get_sym_code_type(struct disassemble_info * info,int n,enum map_type * map_type)4145 get_sym_code_type (struct disassemble_info *info, int n,
4146 		   enum map_type *map_type)
4147 {
4148   asymbol * as;
4149   elf_symbol_type *es;
4150   unsigned int type;
4151   const char *name;
4152 
4153   /* If the symbol is in a different section, ignore it.  */
4154   if (info->section != NULL && info->section != info->symtab[n]->section)
4155     return false;
4156 
4157   if (n >= info->symtab_size)
4158     return false;
4159 
4160   as = info->symtab[n];
4161   if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
4162     return false;
4163   es = (elf_symbol_type *) as;
4164 
4165   type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
4166 
4167   /* If the symbol has function type then use that.  */
4168   if (type == STT_FUNC)
4169     {
4170       *map_type = MAP_INSN;
4171       return true;
4172     }
4173 
4174   /* Check for mapping symbols.  */
4175   name = bfd_asymbol_name(info->symtab[n]);
4176   if (name[0] == '$'
4177       && (name[1] == 'x' || name[1] == 'd')
4178       && (name[2] == '\0' || name[2] == '.'))
4179     {
4180       *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
4181       return true;
4182     }
4183 
4184   return false;
4185 }
4186 
4187 /* Set the feature bits in arch_variant in order to get the correct disassembly
4188    for the chosen architecture variant.
4189 
4190    Currently we only restrict disassembly for Armv8-R and otherwise enable all
4191    non-R-profile features.  */
4192 static void
select_aarch64_variant(unsigned mach)4193 select_aarch64_variant (unsigned mach)
4194 {
4195   switch (mach)
4196     {
4197     case bfd_mach_aarch64_8R:
4198       AARCH64_SET_FEATURE (arch_variant, AARCH64_ARCH_V8R);
4199       break;
4200     default:
4201       arch_variant = (aarch64_feature_set) AARCH64_ALL_FEATURES;
4202       AARCH64_CLEAR_FEATURE (arch_variant, arch_variant, V8R);
4203     }
4204 }
4205 
4206 /* Entry-point of the AArch64 disassembler.  */
4207 
4208 int
print_insn_aarch64(bfd_vma pc,struct disassemble_info * info)4209 print_insn_aarch64 (bfd_vma pc,
4210 		    struct disassemble_info *info)
4211 {
4212   bfd_byte	buffer[INSNLEN];
4213   int		status;
4214   void		(*printer) (bfd_vma, uint32_t, struct disassemble_info *,
4215 			    aarch64_operand_error *);
4216   bool   found = false;
4217   unsigned int	size = 4;
4218   unsigned long	data;
4219   aarch64_operand_error errors;
4220   static bool set_features;
4221 
4222   if (info->disassembler_options)
4223     {
4224       set_default_aarch64_dis_options (info);
4225 
4226       parse_aarch64_dis_options (info->disassembler_options);
4227 
4228       /* To avoid repeated parsing of these options, we remove them here.  */
4229       info->disassembler_options = NULL;
4230     }
4231 
4232   if (!set_features)
4233     {
4234       select_aarch64_variant (info->mach);
4235       set_features = true;
4236     }
4237 
4238   /* Aarch64 instructions are always little-endian */
4239   info->endian_code = BFD_ENDIAN_LITTLE;
4240 
4241   /* Default to DATA.  A text section is required by the ABI to contain an
4242      INSN mapping symbol at the start.  A data section has no such
4243      requirement, hence if no mapping symbol is found the section must
4244      contain only data.  This however isn't very useful if the user has
4245      fully stripped the binaries.  If this is the case use the section
4246      attributes to determine the default.  If we have no section default to
4247      INSN as well, as we may be disassembling some raw bytes on a baremetal
4248      HEX file or similar.  */
4249   enum map_type type = MAP_DATA;
4250   if ((info->section && info->section->flags & SEC_CODE) || !info->section)
4251     type = MAP_INSN;
4252 
4253   /* First check the full symtab for a mapping symbol, even if there
4254      are no usable non-mapping symbols for this address.  */
4255   if (info->symtab_size != 0
4256       && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
4257     {
4258       int last_sym = -1;
4259       bfd_vma addr, section_vma = 0;
4260       bool can_use_search_opt_p;
4261       int n;
4262 
4263       if (pc <= last_mapping_addr)
4264 	last_mapping_sym = -1;
4265 
4266       /* Start scanning at the start of the function, or wherever
4267 	 we finished last time.  */
4268       n = info->symtab_pos + 1;
4269 
4270       /* If the last stop offset is different from the current one it means we
4271 	 are disassembling a different glob of bytes.  As such the optimization
4272 	 would not be safe and we should start over.  */
4273       can_use_search_opt_p = last_mapping_sym >= 0
4274 			     && info->stop_offset == last_stop_offset;
4275 
4276       if (n >= last_mapping_sym && can_use_search_opt_p)
4277 	n = last_mapping_sym;
4278 
4279       /* Look down while we haven't passed the location being disassembled.
4280 	 The reason for this is that there's no defined order between a symbol
4281 	 and an mapping symbol that may be at the same address.  We may have to
4282 	 look at least one position ahead.  */
4283       for (; n < info->symtab_size; n++)
4284 	{
4285 	  addr = bfd_asymbol_value (info->symtab[n]);
4286 	  if (addr > pc)
4287 	    break;
4288 	  if (get_sym_code_type (info, n, &type))
4289 	    {
4290 	      last_sym = n;
4291 	      found = true;
4292 	    }
4293 	}
4294 
4295       if (!found)
4296 	{
4297 	  n = info->symtab_pos;
4298 	  if (n >= last_mapping_sym && can_use_search_opt_p)
4299 	    n = last_mapping_sym;
4300 
4301 	  /* No mapping symbol found at this address.  Look backwards
4302 	     for a preceeding one, but don't go pass the section start
4303 	     otherwise a data section with no mapping symbol can pick up
4304 	     a text mapping symbol of a preceeding section.  The documentation
4305 	     says section can be NULL, in which case we will seek up all the
4306 	     way to the top.  */
4307 	  if (info->section)
4308 	    section_vma = info->section->vma;
4309 
4310 	  for (; n >= 0; n--)
4311 	    {
4312 	      addr = bfd_asymbol_value (info->symtab[n]);
4313 	      if (addr < section_vma)
4314 		break;
4315 
4316 	      if (get_sym_code_type (info, n, &type))
4317 		{
4318 		  last_sym = n;
4319 		  found = true;
4320 		  break;
4321 		}
4322 	    }
4323 	}
4324 
4325       last_mapping_sym = last_sym;
4326       last_type = type;
4327       last_stop_offset = info->stop_offset;
4328 
4329       /* Look a little bit ahead to see if we should print out
4330 	 less than four bytes of data.  If there's a symbol,
4331 	 mapping or otherwise, after two bytes then don't
4332 	 print more.  */
4333       if (last_type == MAP_DATA)
4334 	{
4335 	  size = 4 - (pc & 3);
4336 	  for (n = last_sym + 1; n < info->symtab_size; n++)
4337 	    {
4338 	      addr = bfd_asymbol_value (info->symtab[n]);
4339 	      if (addr > pc)
4340 		{
4341 		  if (addr - pc < size)
4342 		    size = addr - pc;
4343 		  break;
4344 		}
4345 	    }
4346 	  /* If the next symbol is after three bytes, we need to
4347 	     print only part of the data, so that we can use either
4348 	     .byte or .short.  */
4349 	  if (size == 3)
4350 	    size = (pc & 1) ? 1 : 2;
4351 	}
4352     }
4353   else
4354     last_type = type;
4355 
4356   /* PR 10263: Disassemble data if requested to do so by the user.  */
4357   if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
4358     {
4359       /* size was set above.  */
4360       info->bytes_per_chunk = size;
4361       info->display_endian = info->endian;
4362       printer = print_insn_data;
4363     }
4364   else
4365     {
4366       info->bytes_per_chunk = size = INSNLEN;
4367       info->display_endian = info->endian_code;
4368       printer = print_insn_aarch64_word;
4369     }
4370 
4371   status = (*info->read_memory_func) (pc, buffer, size, info);
4372   if (status != 0)
4373     {
4374       (*info->memory_error_func) (status, pc, info);
4375       return -1;
4376     }
4377 
4378   data = bfd_get_bits (buffer, size * 8,
4379 		       info->display_endian == BFD_ENDIAN_BIG);
4380 
4381   (*printer) (pc, data, info, &errors);
4382 
4383   return size;
4384 }
4385 
4386 void
print_aarch64_disassembler_options(FILE * stream)4387 print_aarch64_disassembler_options (FILE *stream)
4388 {
4389   fprintf (stream, _("\n\
4390 The following AARCH64 specific disassembler options are supported for use\n\
4391 with the -M switch (multiple options should be separated by commas):\n"));
4392 
4393   fprintf (stream, _("\n\
4394   no-aliases         Don't print instruction aliases.\n"));
4395 
4396   fprintf (stream, _("\n\
4397   aliases            Do print instruction aliases.\n"));
4398 
4399   fprintf (stream, _("\n\
4400   no-notes         Don't print instruction notes.\n"));
4401 
4402   fprintf (stream, _("\n\
4403   notes            Do print instruction notes.\n"));
4404 
4405 #ifdef DEBUG_AARCH64
4406   fprintf (stream, _("\n\
4407   debug_dump         Temp switch for debug trace.\n"));
4408 #endif /* DEBUG_AARCH64 */
4409 
4410   fprintf (stream, _("\n"));
4411 }
4412