xref: /netbsd-src/external/gpl3/binutils.old/dist/gas/config/tc-aarch64.c (revision e992f068c547fd6e84b3f104dc2340adcc955732)
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2 
3    Copyright (C) 2009-2022 Free Software Foundation, Inc.
4    Contributed by ARM Ltd.
5 
6    This file is part of GAS.
7 
8    GAS is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the license, or
11    (at your option) any later version.
12 
13    GAS is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program; see the file COPYING3. If not,
20    see <http://www.gnu.org/licenses/>.  */
21 
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define	 NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30 
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35 
36 #include "dwarf2dbg.h"
37 
38 /* Types of processor to assemble for.  */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42 
43 #define streq(a, b)	      (strcmp (a, b) == 0)
44 
45 #define END_OF_INSN '\0'
46 
47 static aarch64_feature_set cpu_variant;
48 
49 /* Variables that we set while parsing command-line options.  Once all
50    options have been read we re-process these values to set the real
51    assembly flags.  */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54 
55 /* Constants for known architecture features.  */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57 
58 /* Currently active instruction sequence.  */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60 
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
63 static symbolS *GOT_symbol;
64 
65 /* Which ABI to use.  */
66 enum aarch64_abi_type
67 {
68   AARCH64_ABI_NONE = 0,
69   AARCH64_ABI_LP64 = 1,
70   AARCH64_ABI_ILP32 = 2
71 };
72 
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76 
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt.  */
78 static const char *default_arch = DEFAULT_ARCH;
79 
80 /* AArch64 ABI for the output file.  */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82 
83 /* When non-zero, program to a 32-bit model, in which the C data types
84    int, long and all pointer types are 32-bit objects (ILP32); or to a
85    64-bit model, in which the C int type is 32-bits but the C long type
86    and all pointer types are 64-bit objects (LP64).  */
87 #define ilp32_p		(aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89 
90 enum vector_el_type
91 {
92   NT_invtype = -1,
93   NT_b,
94   NT_h,
95   NT_s,
96   NT_d,
97   NT_q,
98   NT_zero,
99   NT_merge
100 };
101 
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103    Values:
104      0 - Horizontal
105      1 - vertical
106 */
107 enum sme_hv_slice
108 {
109   HV_horizontal = 0,
110   HV_vertical = 1
111 };
112 
113 /* Bits for DEFINED field in vector_type_el.  */
114 #define NTA_HASTYPE     1
115 #define NTA_HASINDEX    2
116 #define NTA_HASVARWIDTH 4
117 
118 struct vector_type_el
119 {
120   enum vector_el_type type;
121   unsigned char defined;
122   unsigned width;
123   int64_t index;
124 };
125 
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT	0x00000001
127 
128 struct reloc
129 {
130   bfd_reloc_code_real_type type;
131   expressionS exp;
132   int pc_rel;
133   enum aarch64_opnd opnd;
134   uint32_t flags;
135   unsigned need_libopcodes_p : 1;
136 };
137 
138 struct aarch64_instruction
139 {
140   /* libopcodes structure for instruction intermediate representation.  */
141   aarch64_inst base;
142   /* Record assembly errors found during the parsing.  */
143   struct
144     {
145       enum aarch64_operand_error_kind kind;
146       const char *error;
147     } parsing_error;
148   /* The condition that appears in the assembly line.  */
149   int cond;
150   /* Relocation information (including the GAS internal fixup).  */
151   struct reloc reloc;
152   /* Need to generate an immediate in the literal pool.  */
153   unsigned gen_lit_pool : 1;
154 };
155 
156 typedef struct aarch64_instruction aarch64_instruction;
157 
158 static aarch64_instruction inst;
159 
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162 
163 /* Diagnostics inline function utilities.
164 
165    These are lightweight utilities which should only be called by parse_operands
166    and other parsers.  GAS processes each assembly line by parsing it against
167    instruction template(s), in the case of multiple templates (for the same
168    mnemonic name), those templates are tried one by one until one succeeds or
169    all fail.  An assembly line may fail a few templates before being
170    successfully parsed; an error saved here in most cases is not a user error
171    but an error indicating the current template is not the right template.
172    Therefore it is very important that errors can be saved at a low cost during
173    the parsing; we don't want to slow down the whole parsing by recording
174    non-user errors in detail.
175 
176    Remember that the objective is to help GAS pick up the most appropriate
177    error message in the case of multiple templates, e.g. FMOV which has 8
178    templates.  */
179 
180 static inline void
clear_error(void)181 clear_error (void)
182 {
183   inst.parsing_error.kind = AARCH64_OPDE_NIL;
184   inst.parsing_error.error = NULL;
185 }
186 
187 static inline bool
error_p(void)188 error_p (void)
189 {
190   return inst.parsing_error.kind != AARCH64_OPDE_NIL;
191 }
192 
193 static inline const char *
get_error_message(void)194 get_error_message (void)
195 {
196   return inst.parsing_error.error;
197 }
198 
199 static inline enum aarch64_operand_error_kind
get_error_kind(void)200 get_error_kind (void)
201 {
202   return inst.parsing_error.kind;
203 }
204 
205 static inline void
set_error(enum aarch64_operand_error_kind kind,const char * error)206 set_error (enum aarch64_operand_error_kind kind, const char *error)
207 {
208   inst.parsing_error.kind = kind;
209   inst.parsing_error.error = error;
210 }
211 
212 static inline void
set_recoverable_error(const char * error)213 set_recoverable_error (const char *error)
214 {
215   set_error (AARCH64_OPDE_RECOVERABLE, error);
216 }
217 
218 /* Use the DESC field of the corresponding aarch64_operand entry to compose
219    the error message.  */
220 static inline void
set_default_error(void)221 set_default_error (void)
222 {
223   set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
224 }
225 
226 static inline void
set_syntax_error(const char * error)227 set_syntax_error (const char *error)
228 {
229   set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
230 }
231 
232 static inline void
set_first_syntax_error(const char * error)233 set_first_syntax_error (const char *error)
234 {
235   if (! error_p ())
236     set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238 
239 static inline void
set_fatal_syntax_error(const char * error)240 set_fatal_syntax_error (const char *error)
241 {
242   set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
243 }
244 
245 /* Return value for certain parsers when the parsing fails; those parsers
246    return the information of the parsed result, e.g. register number, on
247    success.  */
248 #define PARSE_FAIL -1
249 
250 /* This is an invalid condition code that means no conditional field is
251    present. */
252 #define COND_ALWAYS 0x10
253 
254 typedef struct
255 {
256   const char *template;
257   uint32_t value;
258 } asm_nzcv;
259 
260 struct reloc_entry
261 {
262   char *name;
263   bfd_reloc_code_real_type reloc;
264 };
265 
266 /* Macros to define the register types and masks for the purpose
267    of parsing.  */
268 
269 #undef AARCH64_REG_TYPES
270 #define AARCH64_REG_TYPES	\
271   BASIC_REG_TYPE(R_32)	/* w[0-30] */	\
272   BASIC_REG_TYPE(R_64)	/* x[0-30] */	\
273   BASIC_REG_TYPE(SP_32)	/* wsp     */	\
274   BASIC_REG_TYPE(SP_64)	/* sp      */	\
275   BASIC_REG_TYPE(Z_32)	/* wzr     */	\
276   BASIC_REG_TYPE(Z_64)	/* xzr     */	\
277   BASIC_REG_TYPE(FP_B)	/* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
278   BASIC_REG_TYPE(FP_H)	/* h[0-31] */	\
279   BASIC_REG_TYPE(FP_S)	/* s[0-31] */	\
280   BASIC_REG_TYPE(FP_D)	/* d[0-31] */	\
281   BASIC_REG_TYPE(FP_Q)	/* q[0-31] */	\
282   BASIC_REG_TYPE(VN)	/* v[0-31] */	\
283   BASIC_REG_TYPE(ZN)	/* z[0-31] */	\
284   BASIC_REG_TYPE(PN)	/* p[0-15] */	\
285   BASIC_REG_TYPE(ZA)	/* za[0-15] */	\
286   BASIC_REG_TYPE(ZAH)	/* za[0-15]h */	\
287   BASIC_REG_TYPE(ZAV)	/* za[0-15]v */	\
288   /* Typecheck: any 64-bit int reg         (inc SP exc XZR).  */	\
289   MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64))		\
290   /* Typecheck: same, plus SVE registers.  */				\
291   MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64)		\
292 		 | REG_TYPE(ZN))					\
293   /* Typecheck: x[0-30], w[0-30] or [xw]zr.  */				\
294   MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64)			\
295 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64))			\
296   /* Typecheck: same, plus SVE registers.  */				\
297   MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64)		\
298 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64)			\
299 		 | REG_TYPE(ZN))					\
300   /* Typecheck: x[0-30], w[0-30] or {w}sp.  */				\
301   MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64)			\
302 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64))			\
303   /* Typecheck: any int                    (inc {W}SP inc [WX]ZR).  */	\
304   MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64)		\
305 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
306 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) 			\
307   /* Typecheck: any [BHSDQ]P FP.  */					\
308   MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
309 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
310   /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR).  */ \
311   MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64)		\
312 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN)	\
313 		 | REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
314 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
315   /* Typecheck: as above, but also Zn, Pn, and {W}SP.  This should only	\
316      be used for SVE instructions, since Zn and Pn are valid symbols	\
317      in other contexts.  */						\
318   MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64)	\
319 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
320 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN)	\
321 		 | REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
322 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)	\
323 		 | REG_TYPE(ZN) | REG_TYPE(PN))				\
324   /* Any integer register; used for error messages only.  */		\
325   MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64)			\
326 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
327 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64))			\
328   /* Pseudo type to mark the end of the enumerator sequence.  */	\
329   BASIC_REG_TYPE(MAX)
330 
331 #undef BASIC_REG_TYPE
332 #define BASIC_REG_TYPE(T)	REG_TYPE_##T,
333 #undef MULTI_REG_TYPE
334 #define MULTI_REG_TYPE(T,V)	BASIC_REG_TYPE(T)
335 
336 /* Register type enumerators.  */
337 typedef enum aarch64_reg_type_
338 {
339   /* A list of REG_TYPE_*.  */
340   AARCH64_REG_TYPES
341 } aarch64_reg_type;
342 
343 #undef BASIC_REG_TYPE
344 #define BASIC_REG_TYPE(T)	1 << REG_TYPE_##T,
345 #undef REG_TYPE
346 #define REG_TYPE(T)		(1 << REG_TYPE_##T)
347 #undef MULTI_REG_TYPE
348 #define MULTI_REG_TYPE(T,V)	V,
349 
350 /* Structure for a hash table entry for a register.  */
351 typedef struct
352 {
353   const char *name;
354   unsigned char number;
355   ENUM_BITFIELD (aarch64_reg_type_) type : 8;
356   unsigned char builtin;
357 } reg_entry;
358 
359 /* Values indexed by aarch64_reg_type to assist the type checking.  */
360 static const unsigned reg_type_masks[] =
361 {
362   AARCH64_REG_TYPES
363 };
364 
365 #undef BASIC_REG_TYPE
366 #undef REG_TYPE
367 #undef MULTI_REG_TYPE
368 #undef AARCH64_REG_TYPES
369 
370 /* Diagnostics used when we don't get a register of the expected type.
371    Note:  this has to synchronized with aarch64_reg_type definitions
372    above.  */
373 static const char *
get_reg_expected_msg(aarch64_reg_type reg_type)374 get_reg_expected_msg (aarch64_reg_type reg_type)
375 {
376   const char *msg;
377 
378   switch (reg_type)
379     {
380     case REG_TYPE_R_32:
381       msg = N_("integer 32-bit register expected");
382       break;
383     case REG_TYPE_R_64:
384       msg = N_("integer 64-bit register expected");
385       break;
386     case REG_TYPE_R_N:
387       msg = N_("integer register expected");
388       break;
389     case REG_TYPE_R64_SP:
390       msg = N_("64-bit integer or SP register expected");
391       break;
392     case REG_TYPE_SVE_BASE:
393       msg = N_("base register expected");
394       break;
395     case REG_TYPE_R_Z:
396       msg = N_("integer or zero register expected");
397       break;
398     case REG_TYPE_SVE_OFFSET:
399       msg = N_("offset register expected");
400       break;
401     case REG_TYPE_R_SP:
402       msg = N_("integer or SP register expected");
403       break;
404     case REG_TYPE_R_Z_SP:
405       msg = N_("integer, zero or SP register expected");
406       break;
407     case REG_TYPE_FP_B:
408       msg = N_("8-bit SIMD scalar register expected");
409       break;
410     case REG_TYPE_FP_H:
411       msg = N_("16-bit SIMD scalar or floating-point half precision "
412 	       "register expected");
413       break;
414     case REG_TYPE_FP_S:
415       msg = N_("32-bit SIMD scalar or floating-point single precision "
416 	       "register expected");
417       break;
418     case REG_TYPE_FP_D:
419       msg = N_("64-bit SIMD scalar or floating-point double precision "
420 	       "register expected");
421       break;
422     case REG_TYPE_FP_Q:
423       msg = N_("128-bit SIMD scalar or floating-point quad precision "
424 	       "register expected");
425       break;
426     case REG_TYPE_R_Z_BHSDQ_V:
427     case REG_TYPE_R_Z_SP_BHSDQ_VZP:
428       msg = N_("register expected");
429       break;
430     case REG_TYPE_BHSDQ:	/* any [BHSDQ]P FP  */
431       msg = N_("SIMD scalar or floating-point register expected");
432       break;
433     case REG_TYPE_VN:		/* any V reg  */
434       msg = N_("vector register expected");
435       break;
436     case REG_TYPE_ZN:
437       msg = N_("SVE vector register expected");
438       break;
439     case REG_TYPE_PN:
440       msg = N_("SVE predicate register expected");
441       break;
442     default:
443       as_fatal (_("invalid register type %d"), reg_type);
444     }
445   return msg;
446 }
447 
448 /* Some well known registers that we refer to directly elsewhere.  */
449 #define REG_SP	31
450 #define REG_ZR	31
451 
452 /* Instructions take 4 bytes in the object file.  */
453 #define INSN_SIZE	4
454 
455 static htab_t aarch64_ops_hsh;
456 static htab_t aarch64_cond_hsh;
457 static htab_t aarch64_shift_hsh;
458 static htab_t aarch64_sys_regs_hsh;
459 static htab_t aarch64_pstatefield_hsh;
460 static htab_t aarch64_sys_regs_ic_hsh;
461 static htab_t aarch64_sys_regs_dc_hsh;
462 static htab_t aarch64_sys_regs_at_hsh;
463 static htab_t aarch64_sys_regs_tlbi_hsh;
464 static htab_t aarch64_sys_regs_sr_hsh;
465 static htab_t aarch64_reg_hsh;
466 static htab_t aarch64_barrier_opt_hsh;
467 static htab_t aarch64_nzcv_hsh;
468 static htab_t aarch64_pldop_hsh;
469 static htab_t aarch64_hint_opt_hsh;
470 
471 /* Stuff needed to resolve the label ambiguity
472    As:
473      ...
474      label:   <insn>
475    may differ from:
476      ...
477      label:
478 	      <insn>  */
479 
480 static symbolS *last_label_seen;
481 
482 /* Literal pool structure.  Held on a per-section
483    and per-sub-section basis.  */
484 
485 #define MAX_LITERAL_POOL_SIZE 1024
486 typedef struct literal_expression
487 {
488   expressionS exp;
489   /* If exp.op == O_big then this bignum holds a copy of the global bignum value.  */
490   LITTLENUM_TYPE * bignum;
491 } literal_expression;
492 
493 typedef struct literal_pool
494 {
495   literal_expression literals[MAX_LITERAL_POOL_SIZE];
496   unsigned int next_free_entry;
497   unsigned int id;
498   symbolS *symbol;
499   segT section;
500   subsegT sub_section;
501   int size;
502   struct literal_pool *next;
503 } literal_pool;
504 
505 /* Pointer to a linked list of literal pools.  */
506 static literal_pool *list_of_pools = NULL;
507 
508 /* Pure syntax.	 */
509 
510 /* This array holds the chars that always start a comment.  If the
511    pre-processor is disabled, these aren't very useful.	 */
512 const char comment_chars[] = "";
513 
514 /* This array holds the chars that only start a comment at the beginning of
515    a line.  If the line seems to have the form '# 123 filename'
516    .line and .file directives will appear in the pre-processed output.	*/
517 /* Note that input_file.c hand checks for '#' at the beginning of the
518    first line of the input file.  This is because the compiler outputs
519    #NO_APP at the beginning of its output.  */
520 /* Also note that comments like this one will always work.  */
521 const char line_comment_chars[] = "#";
522 
523 const char line_separator_chars[] = ";";
524 
525 /* Chars that can be used to separate mant
526    from exp in floating point numbers.	*/
527 const char EXP_CHARS[] = "eE";
528 
529 /* Chars that mean this number is a floating point constant.  */
530 /* As in 0f12.456  */
531 /* or	 0d1.2345e12  */
532 
533 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
534 
535 /* Prefix character that indicates the start of an immediate value.  */
536 #define is_immediate_prefix(C) ((C) == '#')
537 
538 /* Separator character handling.  */
539 
540 #define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
541 
542 static inline bool
skip_past_char(char ** str,char c)543 skip_past_char (char **str, char c)
544 {
545   if (**str == c)
546     {
547       (*str)++;
548       return true;
549     }
550   else
551     return false;
552 }
553 
554 #define skip_past_comma(str) skip_past_char (str, ',')
555 
556 /* Arithmetic expressions (possibly involving symbols).	 */
557 
558 static bool in_aarch64_get_expression = false;
559 
560 /* Third argument to aarch64_get_expression.  */
561 #define GE_NO_PREFIX  false
562 #define GE_OPT_PREFIX true
563 
564 /* Fourth argument to aarch64_get_expression.  */
565 #define ALLOW_ABSENT  false
566 #define REJECT_ABSENT true
567 
568 /* Fifth argument to aarch64_get_expression.  */
569 #define NORMAL_RESOLUTION false
570 
571 /* Return TRUE if the string pointed by *STR is successfully parsed
572    as an valid expression; *EP will be filled with the information of
573    such an expression.  Otherwise return FALSE.
574 
575    If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
576    If REJECT_ABSENT is true then trat missing expressions as an error.
577    If DEFER_RESOLUTION is true, then do not resolve expressions against
578    constant symbols.  Necessary if the expression is part of a fixup
579    that uses a reloc that must be emitted.  */
580 
581 static bool
aarch64_get_expression(expressionS * ep,char ** str,bool allow_immediate_prefix,bool reject_absent,bool defer_resolution)582 aarch64_get_expression (expressionS *  ep,
583 			char **        str,
584 			bool           allow_immediate_prefix,
585 			bool           reject_absent,
586 			bool           defer_resolution)
587 {
588   char *save_in;
589   segT seg;
590   bool prefix_present = false;
591 
592   if (allow_immediate_prefix)
593     {
594       if (is_immediate_prefix (**str))
595 	{
596 	  (*str)++;
597 	  prefix_present = true;
598 	}
599     }
600 
601   memset (ep, 0, sizeof (expressionS));
602 
603   save_in = input_line_pointer;
604   input_line_pointer = *str;
605   in_aarch64_get_expression = true;
606   if (defer_resolution)
607     seg = deferred_expression (ep);
608   else
609     seg = expression (ep);
610   in_aarch64_get_expression = false;
611 
612   if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
613     {
614       /* We found a bad expression in md_operand().  */
615       *str = input_line_pointer;
616       input_line_pointer = save_in;
617       if (prefix_present && ! error_p ())
618 	set_fatal_syntax_error (_("bad expression"));
619       else
620 	set_first_syntax_error (_("bad expression"));
621       return false;
622     }
623 
624 #ifdef OBJ_AOUT
625   if (seg != absolute_section
626       && seg != text_section
627       && seg != data_section
628       && seg != bss_section
629       && seg != undefined_section)
630     {
631       set_syntax_error (_("bad segment"));
632       *str = input_line_pointer;
633       input_line_pointer = save_in;
634       return false;
635     }
636 #else
637   (void) seg;
638 #endif
639 
640   *str = input_line_pointer;
641   input_line_pointer = save_in;
642   return true;
643 }
644 
645 /* Turn a string in input_line_pointer into a floating point constant
646    of type TYPE, and store the appropriate bytes in *LITP.  The number
647    of LITTLENUMS emitted is stored in *SIZEP.  An error message is
648    returned, or NULL on OK.  */
649 
650 const char *
md_atof(int type,char * litP,int * sizeP)651 md_atof (int type, char *litP, int *sizeP)
652 {
653   return ieee_md_atof (type, litP, sizeP, target_big_endian);
654 }
655 
656 /* We handle all bad expressions here, so that we can report the faulty
657    instruction in the error message.  */
658 void
md_operand(expressionS * exp)659 md_operand (expressionS * exp)
660 {
661   if (in_aarch64_get_expression)
662     exp->X_op = O_illegal;
663 }
664 
665 /* Immediate values.  */
666 
667 /* Errors may be set multiple times during parsing or bit encoding
668    (particularly in the Neon bits), but usually the earliest error which is set
669    will be the most meaningful. Avoid overwriting it with later (cascading)
670    errors by calling this function.  */
671 
672 static void
first_error(const char * error)673 first_error (const char *error)
674 {
675   if (! error_p ())
676     set_syntax_error (error);
677 }
678 
679 /* Similar to first_error, but this function accepts formatted error
680    message.  */
681 static void
first_error_fmt(const char * format,...)682 first_error_fmt (const char *format, ...)
683 {
684   va_list args;
685   enum
686   { size = 100 };
687   /* N.B. this single buffer will not cause error messages for different
688      instructions to pollute each other; this is because at the end of
689      processing of each assembly line, error message if any will be
690      collected by as_bad.  */
691   static char buffer[size];
692 
693   if (! error_p ())
694     {
695       int ret ATTRIBUTE_UNUSED;
696       va_start (args, format);
697       ret = vsnprintf (buffer, size, format, args);
698       know (ret <= size - 1 && ret >= 0);
699       va_end (args);
700       set_syntax_error (buffer);
701     }
702 }
703 
704 /* Register parsing.  */
705 
706 /* Generic register parser which is called by other specialized
707    register parsers.
708    CCP points to what should be the beginning of a register name.
709    If it is indeed a valid register name, advance CCP over it and
710    return the reg_entry structure; otherwise return NULL.
711    It does not issue diagnostics.  */
712 
713 static reg_entry *
parse_reg(char ** ccp)714 parse_reg (char **ccp)
715 {
716   char *start = *ccp;
717   char *p;
718   reg_entry *reg;
719 
720 #ifdef REGISTER_PREFIX
721   if (*start != REGISTER_PREFIX)
722     return NULL;
723   start++;
724 #endif
725 
726   p = start;
727   if (!ISALPHA (*p) || !is_name_beginner (*p))
728     return NULL;
729 
730   do
731     p++;
732   while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
733 
734   reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
735 
736   if (!reg)
737     return NULL;
738 
739   *ccp = p;
740   return reg;
741 }
742 
743 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
744    return FALSE.  */
745 static bool
aarch64_check_reg_type(const reg_entry * reg,aarch64_reg_type type)746 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
747 {
748   return (reg_type_masks[type] & (1 << reg->type)) != 0;
749 }
750 
751 /* Try to parse a base or offset register.  Allow SVE base and offset
752    registers if REG_TYPE includes SVE registers.  Return the register
753    entry on success, setting *QUALIFIER to the register qualifier.
754    Return null otherwise.
755 
756    Note that this function does not issue any diagnostics.  */
757 
758 static const reg_entry *
aarch64_addr_reg_parse(char ** ccp,aarch64_reg_type reg_type,aarch64_opnd_qualifier_t * qualifier)759 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
760 			aarch64_opnd_qualifier_t *qualifier)
761 {
762   char *str = *ccp;
763   const reg_entry *reg = parse_reg (&str);
764 
765   if (reg == NULL)
766     return NULL;
767 
768   switch (reg->type)
769     {
770     case REG_TYPE_R_32:
771     case REG_TYPE_SP_32:
772     case REG_TYPE_Z_32:
773       *qualifier = AARCH64_OPND_QLF_W;
774       break;
775 
776     case REG_TYPE_R_64:
777     case REG_TYPE_SP_64:
778     case REG_TYPE_Z_64:
779       *qualifier = AARCH64_OPND_QLF_X;
780       break;
781 
782     case REG_TYPE_ZN:
783       if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
784 	  || str[0] != '.')
785 	return NULL;
786       switch (TOLOWER (str[1]))
787 	{
788 	case 's':
789 	  *qualifier = AARCH64_OPND_QLF_S_S;
790 	  break;
791 	case 'd':
792 	  *qualifier = AARCH64_OPND_QLF_S_D;
793 	  break;
794 	default:
795 	  return NULL;
796 	}
797       str += 2;
798       break;
799 
800     default:
801       return NULL;
802     }
803 
804   *ccp = str;
805 
806   return reg;
807 }
808 
809 /* Try to parse a base or offset register.  Return the register entry
810    on success, setting *QUALIFIER to the register qualifier.  Return null
811    otherwise.
812 
813    Note that this function does not issue any diagnostics.  */
814 
815 static const reg_entry *
aarch64_reg_parse_32_64(char ** ccp,aarch64_opnd_qualifier_t * qualifier)816 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
817 {
818   return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
819 }
820 
821 /* Parse the qualifier of a vector register or vector element of type
822    REG_TYPE.  Fill in *PARSED_TYPE and return TRUE if the parsing
823    succeeds; otherwise return FALSE.
824 
825    Accept only one occurrence of:
826    4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
827    b h s d q  */
828 static bool
parse_vector_type_for_operand(aarch64_reg_type reg_type,struct vector_type_el * parsed_type,char ** str)829 parse_vector_type_for_operand (aarch64_reg_type reg_type,
830 			       struct vector_type_el *parsed_type, char **str)
831 {
832   char *ptr = *str;
833   unsigned width;
834   unsigned element_size;
835   enum vector_el_type type;
836 
837   /* skip '.' */
838   gas_assert (*ptr == '.');
839   ptr++;
840 
841   if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
842     {
843       width = 0;
844       goto elt_size;
845     }
846   width = strtoul (ptr, &ptr, 10);
847   if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
848     {
849       first_error_fmt (_("bad size %d in vector width specifier"), width);
850       return false;
851     }
852 
853  elt_size:
854   switch (TOLOWER (*ptr))
855     {
856     case 'b':
857       type = NT_b;
858       element_size = 8;
859       break;
860     case 'h':
861       type = NT_h;
862       element_size = 16;
863       break;
864     case 's':
865       type = NT_s;
866       element_size = 32;
867       break;
868     case 'd':
869       type = NT_d;
870       element_size = 64;
871       break;
872     case 'q':
873       if (reg_type == REG_TYPE_ZN || width == 1)
874 	{
875 	  type = NT_q;
876 	  element_size = 128;
877 	  break;
878 	}
879       /* fall through.  */
880     default:
881       if (*ptr != '\0')
882 	first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
883       else
884 	first_error (_("missing element size"));
885       return false;
886     }
887   if (width != 0 && width * element_size != 64
888       && width * element_size != 128
889       && !(width == 2 && element_size == 16)
890       && !(width == 4 && element_size == 8))
891     {
892       first_error_fmt (_
893 		       ("invalid element size %d and vector size combination %c"),
894 		       width, *ptr);
895       return false;
896     }
897   ptr++;
898 
899   parsed_type->type = type;
900   parsed_type->width = width;
901 
902   *str = ptr;
903 
904   return true;
905 }
906 
907 /* *STR contains an SVE zero/merge predication suffix.  Parse it into
908    *PARSED_TYPE and point *STR at the end of the suffix.  */
909 
910 static bool
parse_predication_for_operand(struct vector_type_el * parsed_type,char ** str)911 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
912 {
913   char *ptr = *str;
914 
915   /* Skip '/'.  */
916   gas_assert (*ptr == '/');
917   ptr++;
918   switch (TOLOWER (*ptr))
919     {
920     case 'z':
921       parsed_type->type = NT_zero;
922       break;
923     case 'm':
924       parsed_type->type = NT_merge;
925       break;
926     default:
927       if (*ptr != '\0' && *ptr != ',')
928 	first_error_fmt (_("unexpected character `%c' in predication type"),
929 			 *ptr);
930       else
931 	first_error (_("missing predication type"));
932       return false;
933     }
934   parsed_type->width = 0;
935   *str = ptr + 1;
936   return true;
937 }
938 
939 /* Parse a register of the type TYPE.
940 
941    Return PARSE_FAIL if the string pointed by *CCP is not a valid register
942    name or the parsed register is not of TYPE.
943 
944    Otherwise return the register number, and optionally fill in the actual
945    type of the register in *RTYPE when multiple alternatives were given, and
946    return the register shape and element index information in *TYPEINFO.
947 
948    IN_REG_LIST should be set with TRUE if the caller is parsing a register
949    list.  */
950 
951 static int
parse_typed_reg(char ** ccp,aarch64_reg_type type,aarch64_reg_type * rtype,struct vector_type_el * typeinfo,bool in_reg_list)952 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
953 		 struct vector_type_el *typeinfo, bool in_reg_list)
954 {
955   char *str = *ccp;
956   const reg_entry *reg = parse_reg (&str);
957   struct vector_type_el atype;
958   struct vector_type_el parsetype;
959   bool is_typed_vecreg = false;
960 
961   atype.defined = 0;
962   atype.type = NT_invtype;
963   atype.width = -1;
964   atype.index = 0;
965 
966   if (reg == NULL)
967     {
968       if (typeinfo)
969 	*typeinfo = atype;
970       set_default_error ();
971       return PARSE_FAIL;
972     }
973 
974   if (! aarch64_check_reg_type (reg, type))
975     {
976       DEBUG_TRACE ("reg type check failed");
977       set_default_error ();
978       return PARSE_FAIL;
979     }
980   type = reg->type;
981 
982   if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
983       && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
984     {
985       if (*str == '.')
986 	{
987 	  if (!parse_vector_type_for_operand (type, &parsetype, &str))
988 	    return PARSE_FAIL;
989 	}
990       else
991 	{
992 	  if (!parse_predication_for_operand (&parsetype, &str))
993 	    return PARSE_FAIL;
994 	}
995 
996       /* Register if of the form Vn.[bhsdq].  */
997       is_typed_vecreg = true;
998 
999       if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1000 	{
1001 	  /* The width is always variable; we don't allow an integer width
1002 	     to be specified.  */
1003 	  gas_assert (parsetype.width == 0);
1004 	  atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1005 	}
1006       else if (parsetype.width == 0)
1007 	/* Expect index. In the new scheme we cannot have
1008 	   Vn.[bhsdq] represent a scalar. Therefore any
1009 	   Vn.[bhsdq] should have an index following it.
1010 	   Except in reglists of course.  */
1011 	atype.defined |= NTA_HASINDEX;
1012       else
1013 	atype.defined |= NTA_HASTYPE;
1014 
1015       atype.type = parsetype.type;
1016       atype.width = parsetype.width;
1017     }
1018 
1019   if (skip_past_char (&str, '['))
1020     {
1021       expressionS exp;
1022 
1023       /* Reject Sn[index] syntax.  */
1024       if (!is_typed_vecreg)
1025 	{
1026 	  first_error (_("this type of register can't be indexed"));
1027 	  return PARSE_FAIL;
1028 	}
1029 
1030       if (in_reg_list)
1031 	{
1032 	  first_error (_("index not allowed inside register list"));
1033 	  return PARSE_FAIL;
1034 	}
1035 
1036       atype.defined |= NTA_HASINDEX;
1037 
1038       aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1039 			      NORMAL_RESOLUTION);
1040 
1041       if (exp.X_op != O_constant)
1042 	{
1043 	  first_error (_("constant expression required"));
1044 	  return PARSE_FAIL;
1045 	}
1046 
1047       if (! skip_past_char (&str, ']'))
1048 	return PARSE_FAIL;
1049 
1050       atype.index = exp.X_add_number;
1051     }
1052   else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1053     {
1054       /* Indexed vector register expected.  */
1055       first_error (_("indexed vector register expected"));
1056       return PARSE_FAIL;
1057     }
1058 
1059   /* A vector reg Vn should be typed or indexed.  */
1060   if (type == REG_TYPE_VN && atype.defined == 0)
1061     {
1062       first_error (_("invalid use of vector register"));
1063     }
1064 
1065   if (typeinfo)
1066     *typeinfo = atype;
1067 
1068   if (rtype)
1069     *rtype = type;
1070 
1071   *ccp = str;
1072 
1073   return reg->number;
1074 }
1075 
1076 /* Parse register.
1077 
1078    Return the register number on success; return PARSE_FAIL otherwise.
1079 
1080    If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1081    the register (e.g. NEON double or quad reg when either has been requested).
1082 
1083    If this is a NEON vector register with additional type information, fill
1084    in the struct pointed to by VECTYPE (if non-NULL).
1085 
1086    This parser does not handle register list.  */
1087 
1088 static int
aarch64_reg_parse(char ** ccp,aarch64_reg_type type,aarch64_reg_type * rtype,struct vector_type_el * vectype)1089 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1090 		   aarch64_reg_type *rtype, struct vector_type_el *vectype)
1091 {
1092   struct vector_type_el atype;
1093   char *str = *ccp;
1094   int reg = parse_typed_reg (&str, type, rtype, &atype,
1095 			     /*in_reg_list= */ false);
1096 
1097   if (reg == PARSE_FAIL)
1098     return PARSE_FAIL;
1099 
1100   if (vectype)
1101     *vectype = atype;
1102 
1103   *ccp = str;
1104 
1105   return reg;
1106 }
1107 
1108 static inline bool
eq_vector_type_el(struct vector_type_el e1,struct vector_type_el e2)1109 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1110 {
1111   return
1112     e1.type == e2.type
1113     && e1.defined == e2.defined
1114     && e1.width == e2.width && e1.index == e2.index;
1115 }
1116 
1117 /* This function parses a list of vector registers of type TYPE.
1118    On success, it returns the parsed register list information in the
1119    following encoded format:
1120 
1121    bit   18-22   |   13-17   |   7-11    |    2-6    |   0-1
1122        4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1123 
1124    The information of the register shape and/or index is returned in
1125    *VECTYPE.
1126 
1127    It returns PARSE_FAIL if the register list is invalid.
1128 
1129    The list contains one to four registers.
1130    Each register can be one of:
1131    <Vt>.<T>[<index>]
1132    <Vt>.<T>
1133    All <T> should be identical.
1134    All <index> should be identical.
1135    There are restrictions on <Vt> numbers which are checked later
1136    (by reg_list_valid_p).  */
1137 
1138 static int
parse_vector_reg_list(char ** ccp,aarch64_reg_type type,struct vector_type_el * vectype)1139 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1140 		       struct vector_type_el *vectype)
1141 {
1142   char *str = *ccp;
1143   int nb_regs;
1144   struct vector_type_el typeinfo, typeinfo_first;
1145   int val, val_range;
1146   int in_range;
1147   int ret_val;
1148   int i;
1149   bool error = false;
1150   bool expect_index = false;
1151 
1152   if (*str != '{')
1153     {
1154       set_syntax_error (_("expecting {"));
1155       return PARSE_FAIL;
1156     }
1157   str++;
1158 
1159   nb_regs = 0;
1160   typeinfo_first.defined = 0;
1161   typeinfo_first.type = NT_invtype;
1162   typeinfo_first.width = -1;
1163   typeinfo_first.index = 0;
1164   ret_val = 0;
1165   val = -1;
1166   val_range = -1;
1167   in_range = 0;
1168   do
1169     {
1170       if (in_range)
1171 	{
1172 	  str++;		/* skip over '-' */
1173 	  val_range = val;
1174 	}
1175       val = parse_typed_reg (&str, type, NULL, &typeinfo,
1176 			     /*in_reg_list= */ true);
1177       if (val == PARSE_FAIL)
1178 	{
1179 	  set_first_syntax_error (_("invalid vector register in list"));
1180 	  error = true;
1181 	  continue;
1182 	}
1183       /* reject [bhsd]n */
1184       if (type == REG_TYPE_VN && typeinfo.defined == 0)
1185 	{
1186 	  set_first_syntax_error (_("invalid scalar register in list"));
1187 	  error = true;
1188 	  continue;
1189 	}
1190 
1191       if (typeinfo.defined & NTA_HASINDEX)
1192 	expect_index = true;
1193 
1194       if (in_range)
1195 	{
1196 	  if (val < val_range)
1197 	    {
1198 	      set_first_syntax_error
1199 		(_("invalid range in vector register list"));
1200 	      error = true;
1201 	    }
1202 	  val_range++;
1203 	}
1204       else
1205 	{
1206 	  val_range = val;
1207 	  if (nb_regs == 0)
1208 	    typeinfo_first = typeinfo;
1209 	  else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1210 	    {
1211 	      set_first_syntax_error
1212 		(_("type mismatch in vector register list"));
1213 	      error = true;
1214 	    }
1215 	}
1216       if (! error)
1217 	for (i = val_range; i <= val; i++)
1218 	  {
1219 	    ret_val |= i << (5 * nb_regs);
1220 	    nb_regs++;
1221 	  }
1222       in_range = 0;
1223     }
1224   while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1225 
1226   skip_whitespace (str);
1227   if (*str != '}')
1228     {
1229       set_first_syntax_error (_("end of vector register list not found"));
1230       error = true;
1231     }
1232   str++;
1233 
1234   skip_whitespace (str);
1235 
1236   if (expect_index)
1237     {
1238       if (skip_past_char (&str, '['))
1239 	{
1240 	  expressionS exp;
1241 
1242 	  aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1243 				  NORMAL_RESOLUTION);
1244 	  if (exp.X_op != O_constant)
1245 	    {
1246 	      set_first_syntax_error (_("constant expression required."));
1247 	      error = true;
1248 	    }
1249 	  if (! skip_past_char (&str, ']'))
1250 	    error = true;
1251 	  else
1252 	    typeinfo_first.index = exp.X_add_number;
1253 	}
1254       else
1255 	{
1256 	  set_first_syntax_error (_("expected index"));
1257 	  error = true;
1258 	}
1259     }
1260 
1261   if (nb_regs > 4)
1262     {
1263       set_first_syntax_error (_("too many registers in vector register list"));
1264       error = true;
1265     }
1266   else if (nb_regs == 0)
1267     {
1268       set_first_syntax_error (_("empty vector register list"));
1269       error = true;
1270     }
1271 
1272   *ccp = str;
1273   if (! error)
1274     *vectype = typeinfo_first;
1275 
1276   return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1277 }
1278 
1279 /* Directives: register aliases.  */
1280 
1281 static reg_entry *
insert_reg_alias(char * str,int number,aarch64_reg_type type)1282 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1283 {
1284   reg_entry *new;
1285   const char *name;
1286 
1287   if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1288     {
1289       if (new->builtin)
1290 	as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1291 		 str);
1292 
1293       /* Only warn about a redefinition if it's not defined as the
1294          same register.  */
1295       else if (new->number != number || new->type != type)
1296 	as_warn (_("ignoring redefinition of register alias '%s'"), str);
1297 
1298       return NULL;
1299     }
1300 
1301   name = xstrdup (str);
1302   new = XNEW (reg_entry);
1303 
1304   new->name = name;
1305   new->number = number;
1306   new->type = type;
1307   new->builtin = false;
1308 
1309   str_hash_insert (aarch64_reg_hsh, name, new, 0);
1310 
1311   return new;
1312 }
1313 
1314 /* Look for the .req directive.	 This is of the form:
1315 
1316 	new_register_name .req existing_register_name
1317 
1318    If we find one, or if it looks sufficiently like one that we want to
1319    handle any error here, return TRUE.  Otherwise return FALSE.  */
1320 
1321 static bool
create_register_alias(char * newname,char * p)1322 create_register_alias (char *newname, char *p)
1323 {
1324   const reg_entry *old;
1325   char *oldname, *nbuf;
1326   size_t nlen;
1327 
1328   /* The input scrubber ensures that whitespace after the mnemonic is
1329      collapsed to single spaces.  */
1330   oldname = p;
1331   if (!startswith (oldname, " .req "))
1332     return false;
1333 
1334   oldname += 6;
1335   if (*oldname == '\0')
1336     return false;
1337 
1338   old = str_hash_find (aarch64_reg_hsh, oldname);
1339   if (!old)
1340     {
1341       as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1342       return true;
1343     }
1344 
1345   /* If TC_CASE_SENSITIVE is defined, then newname already points to
1346      the desired alias name, and p points to its end.  If not, then
1347      the desired alias name is in the global original_case_string.  */
1348 #ifdef TC_CASE_SENSITIVE
1349   nlen = p - newname;
1350 #else
1351   newname = original_case_string;
1352   nlen = strlen (newname);
1353 #endif
1354 
1355   nbuf = xmemdup0 (newname, nlen);
1356 
1357   /* Create aliases under the new name as stated; an all-lowercase
1358      version of the new name; and an all-uppercase version of the new
1359      name.  */
1360   if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1361     {
1362       for (p = nbuf; *p; p++)
1363 	*p = TOUPPER (*p);
1364 
1365       if (strncmp (nbuf, newname, nlen))
1366 	{
1367 	  /* If this attempt to create an additional alias fails, do not bother
1368 	     trying to create the all-lower case alias.  We will fail and issue
1369 	     a second, duplicate error message.  This situation arises when the
1370 	     programmer does something like:
1371 	     foo .req r0
1372 	     Foo .req r1
1373 	     The second .req creates the "Foo" alias but then fails to create
1374 	     the artificial FOO alias because it has already been created by the
1375 	     first .req.  */
1376 	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1377 	    {
1378 	      free (nbuf);
1379 	      return true;
1380 	    }
1381 	}
1382 
1383       for (p = nbuf; *p; p++)
1384 	*p = TOLOWER (*p);
1385 
1386       if (strncmp (nbuf, newname, nlen))
1387 	insert_reg_alias (nbuf, old->number, old->type);
1388     }
1389 
1390   free (nbuf);
1391   return true;
1392 }
1393 
1394 /* Should never be called, as .req goes between the alias and the
1395    register name, not at the beginning of the line.  */
1396 static void
s_req(int a ATTRIBUTE_UNUSED)1397 s_req (int a ATTRIBUTE_UNUSED)
1398 {
1399   as_bad (_("invalid syntax for .req directive"));
1400 }
1401 
1402 /* The .unreq directive deletes an alias which was previously defined
1403    by .req.  For example:
1404 
1405        my_alias .req r11
1406        .unreq my_alias	  */
1407 
1408 static void
s_unreq(int a ATTRIBUTE_UNUSED)1409 s_unreq (int a ATTRIBUTE_UNUSED)
1410 {
1411   char *name;
1412   char saved_char;
1413 
1414   name = input_line_pointer;
1415 
1416   while (*input_line_pointer != 0
1417 	 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1418     ++input_line_pointer;
1419 
1420   saved_char = *input_line_pointer;
1421   *input_line_pointer = 0;
1422 
1423   if (!*name)
1424     as_bad (_("invalid syntax for .unreq directive"));
1425   else
1426     {
1427       reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1428 
1429       if (!reg)
1430 	as_bad (_("unknown register alias '%s'"), name);
1431       else if (reg->builtin)
1432 	as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1433 		 name);
1434       else
1435 	{
1436 	  char *p;
1437 	  char *nbuf;
1438 
1439 	  str_hash_delete (aarch64_reg_hsh, name);
1440 	  free ((char *) reg->name);
1441 	  free (reg);
1442 
1443 	  /* Also locate the all upper case and all lower case versions.
1444 	     Do not complain if we cannot find one or the other as it
1445 	     was probably deleted above.  */
1446 
1447 	  nbuf = strdup (name);
1448 	  for (p = nbuf; *p; p++)
1449 	    *p = TOUPPER (*p);
1450 	  reg = str_hash_find (aarch64_reg_hsh, nbuf);
1451 	  if (reg)
1452 	    {
1453 	      str_hash_delete (aarch64_reg_hsh, nbuf);
1454 	      free ((char *) reg->name);
1455 	      free (reg);
1456 	    }
1457 
1458 	  for (p = nbuf; *p; p++)
1459 	    *p = TOLOWER (*p);
1460 	  reg = str_hash_find (aarch64_reg_hsh, nbuf);
1461 	  if (reg)
1462 	    {
1463 	      str_hash_delete (aarch64_reg_hsh, nbuf);
1464 	      free ((char *) reg->name);
1465 	      free (reg);
1466 	    }
1467 
1468 	  free (nbuf);
1469 	}
1470     }
1471 
1472   *input_line_pointer = saved_char;
1473   demand_empty_rest_of_line ();
1474 }
1475 
1476 /* Directives: Instruction set selection.  */
1477 
1478 #ifdef OBJ_ELF
1479 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1480    spec.  (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1481    Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1482    and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
1483 
1484 /* Create a new mapping symbol for the transition to STATE.  */
1485 
1486 static void
make_mapping_symbol(enum mstate state,valueT value,fragS * frag)1487 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1488 {
1489   symbolS *symbolP;
1490   const char *symname;
1491   int type;
1492 
1493   switch (state)
1494     {
1495     case MAP_DATA:
1496       symname = "$d";
1497       type = BSF_NO_FLAGS;
1498       break;
1499     case MAP_INSN:
1500       symname = "$x";
1501       type = BSF_NO_FLAGS;
1502       break;
1503     default:
1504       abort ();
1505     }
1506 
1507   symbolP = symbol_new (symname, now_seg, frag, value);
1508   symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1509 
1510   /* Save the mapping symbols for future reference.  Also check that
1511      we do not place two mapping symbols at the same offset within a
1512      frag.  We'll handle overlap between frags in
1513      check_mapping_symbols.
1514 
1515      If .fill or other data filling directive generates zero sized data,
1516      the mapping symbol for the following code will have the same value
1517      as the one generated for the data filling directive.  In this case,
1518      we replace the old symbol with the new one at the same address.  */
1519   if (value == 0)
1520     {
1521       if (frag->tc_frag_data.first_map != NULL)
1522 	{
1523 	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1524 	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1525 			 &symbol_lastP);
1526 	}
1527       frag->tc_frag_data.first_map = symbolP;
1528     }
1529   if (frag->tc_frag_data.last_map != NULL)
1530     {
1531       know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1532 	    S_GET_VALUE (symbolP));
1533       if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1534 	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1535 		       &symbol_lastP);
1536     }
1537   frag->tc_frag_data.last_map = symbolP;
1538 }
1539 
1540 /* We must sometimes convert a region marked as code to data during
1541    code alignment, if an odd number of bytes have to be padded.  The
1542    code mapping symbol is pushed to an aligned address.  */
1543 
1544 static void
insert_data_mapping_symbol(enum mstate state,valueT value,fragS * frag,offsetT bytes)1545 insert_data_mapping_symbol (enum mstate state,
1546 			    valueT value, fragS * frag, offsetT bytes)
1547 {
1548   /* If there was already a mapping symbol, remove it.  */
1549   if (frag->tc_frag_data.last_map != NULL
1550       && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1551       frag->fr_address + value)
1552     {
1553       symbolS *symp = frag->tc_frag_data.last_map;
1554 
1555       if (value == 0)
1556 	{
1557 	  know (frag->tc_frag_data.first_map == symp);
1558 	  frag->tc_frag_data.first_map = NULL;
1559 	}
1560       frag->tc_frag_data.last_map = NULL;
1561       symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1562     }
1563 
1564   make_mapping_symbol (MAP_DATA, value, frag);
1565   make_mapping_symbol (state, value + bytes, frag);
1566 }
1567 
1568 static void mapping_state_2 (enum mstate state, int max_chars);
1569 
1570 /* Set the mapping state to STATE.  Only call this when about to
1571    emit some STATE bytes to the file.  */
1572 
1573 void
mapping_state(enum mstate state)1574 mapping_state (enum mstate state)
1575 {
1576   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1577 
1578   if (state == MAP_INSN)
1579     /* AArch64 instructions require 4-byte alignment.  When emitting
1580        instructions into any section, record the appropriate section
1581        alignment.  */
1582     record_alignment (now_seg, 2);
1583 
1584   if (mapstate == state)
1585     /* The mapping symbol has already been emitted.
1586        There is nothing else to do.  */
1587     return;
1588 
1589 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1590   if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1591     /* Emit MAP_DATA within executable section in order.  Otherwise, it will be
1592        evaluated later in the next else.  */
1593     return;
1594   else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1595     {
1596       /* Only add the symbol if the offset is > 0:
1597 	 if we're at the first frag, check it's size > 0;
1598 	 if we're not at the first frag, then for sure
1599 	 the offset is > 0.  */
1600       struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1601       const int add_symbol = (frag_now != frag_first)
1602 	|| (frag_now_fix () > 0);
1603 
1604       if (add_symbol)
1605 	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1606     }
1607 #undef TRANSITION
1608 
1609   mapping_state_2 (state, 0);
1610 }
1611 
1612 /* Same as mapping_state, but MAX_CHARS bytes have already been
1613    allocated.  Put the mapping symbol that far back.  */
1614 
1615 static void
mapping_state_2(enum mstate state,int max_chars)1616 mapping_state_2 (enum mstate state, int max_chars)
1617 {
1618   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1619 
1620   if (!SEG_NORMAL (now_seg))
1621     return;
1622 
1623   if (mapstate == state)
1624     /* The mapping symbol has already been emitted.
1625        There is nothing else to do.  */
1626     return;
1627 
1628   seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1629   make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1630 }
1631 #else
1632 #define mapping_state(x)	/* nothing */
1633 #define mapping_state_2(x, y)	/* nothing */
1634 #endif
1635 
1636 /* Directives: sectioning and alignment.  */
1637 
1638 static void
s_bss(int ignore ATTRIBUTE_UNUSED)1639 s_bss (int ignore ATTRIBUTE_UNUSED)
1640 {
1641   /* We don't support putting frags in the BSS segment, we fake it by
1642      marking in_bss, then looking at s_skip for clues.  */
1643   subseg_set (bss_section, 0);
1644   demand_empty_rest_of_line ();
1645   mapping_state (MAP_DATA);
1646 }
1647 
1648 static void
s_even(int ignore ATTRIBUTE_UNUSED)1649 s_even (int ignore ATTRIBUTE_UNUSED)
1650 {
1651   /* Never make frag if expect extra pass.  */
1652   if (!need_pass_2)
1653     frag_align (1, 0, 0);
1654 
1655   record_alignment (now_seg, 1);
1656 
1657   demand_empty_rest_of_line ();
1658 }
1659 
1660 /* Directives: Literal pools.  */
1661 
1662 static literal_pool *
find_literal_pool(int size)1663 find_literal_pool (int size)
1664 {
1665   literal_pool *pool;
1666 
1667   for (pool = list_of_pools; pool != NULL; pool = pool->next)
1668     {
1669       if (pool->section == now_seg
1670 	  && pool->sub_section == now_subseg && pool->size == size)
1671 	break;
1672     }
1673 
1674   return pool;
1675 }
1676 
1677 static literal_pool *
find_or_make_literal_pool(int size)1678 find_or_make_literal_pool (int size)
1679 {
1680   /* Next literal pool ID number.  */
1681   static unsigned int latest_pool_num = 1;
1682   literal_pool *pool;
1683 
1684   pool = find_literal_pool (size);
1685 
1686   if (pool == NULL)
1687     {
1688       /* Create a new pool.  */
1689       pool = XNEW (literal_pool);
1690       if (!pool)
1691 	return NULL;
1692 
1693       /* Currently we always put the literal pool in the current text
1694          section.  If we were generating "small" model code where we
1695          knew that all code and initialised data was within 1MB then
1696          we could output literals to mergeable, read-only data
1697          sections. */
1698 
1699       pool->next_free_entry = 0;
1700       pool->section = now_seg;
1701       pool->sub_section = now_subseg;
1702       pool->size = size;
1703       pool->next = list_of_pools;
1704       pool->symbol = NULL;
1705 
1706       /* Add it to the list.  */
1707       list_of_pools = pool;
1708     }
1709 
1710   /* New pools, and emptied pools, will have a NULL symbol.  */
1711   if (pool->symbol == NULL)
1712     {
1713       pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1714 				    &zero_address_frag, 0);
1715       pool->id = latest_pool_num++;
1716     }
1717 
1718   /* Done.  */
1719   return pool;
1720 }
1721 
1722 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1723    Return TRUE on success, otherwise return FALSE.  */
1724 static bool
add_to_lit_pool(expressionS * exp,int size)1725 add_to_lit_pool (expressionS *exp, int size)
1726 {
1727   literal_pool *pool;
1728   unsigned int entry;
1729 
1730   pool = find_or_make_literal_pool (size);
1731 
1732   /* Check if this literal value is already in the pool.  */
1733   for (entry = 0; entry < pool->next_free_entry; entry++)
1734     {
1735       expressionS * litexp = & pool->literals[entry].exp;
1736 
1737       if ((litexp->X_op == exp->X_op)
1738 	  && (exp->X_op == O_constant)
1739 	  && (litexp->X_add_number == exp->X_add_number)
1740 	  && (litexp->X_unsigned == exp->X_unsigned))
1741 	break;
1742 
1743       if ((litexp->X_op == exp->X_op)
1744 	  && (exp->X_op == O_symbol)
1745 	  && (litexp->X_add_number == exp->X_add_number)
1746 	  && (litexp->X_add_symbol == exp->X_add_symbol)
1747 	  && (litexp->X_op_symbol == exp->X_op_symbol))
1748 	break;
1749     }
1750 
1751   /* Do we need to create a new entry?  */
1752   if (entry == pool->next_free_entry)
1753     {
1754       if (entry >= MAX_LITERAL_POOL_SIZE)
1755 	{
1756 	  set_syntax_error (_("literal pool overflow"));
1757 	  return false;
1758 	}
1759 
1760       pool->literals[entry].exp = *exp;
1761       pool->next_free_entry += 1;
1762       if (exp->X_op == O_big)
1763 	{
1764 	  /* PR 16688: Bignums are held in a single global array.  We must
1765 	     copy and preserve that value now, before it is overwritten.  */
1766 	  pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1767 						  exp->X_add_number);
1768 	  memcpy (pool->literals[entry].bignum, generic_bignum,
1769 		  CHARS_PER_LITTLENUM * exp->X_add_number);
1770 	}
1771       else
1772 	pool->literals[entry].bignum = NULL;
1773     }
1774 
1775   exp->X_op = O_symbol;
1776   exp->X_add_number = ((int) entry) * size;
1777   exp->X_add_symbol = pool->symbol;
1778 
1779   return true;
1780 }
1781 
1782 /* Can't use symbol_new here, so have to create a symbol and then at
1783    a later date assign it a value. That's what these functions do.  */
1784 
1785 static void
symbol_locate(symbolS * symbolP,const char * name,segT segment,valueT valu,fragS * frag)1786 symbol_locate (symbolS * symbolP,
1787 	       const char *name,/* It is copied, the caller can modify.  */
1788 	       segT segment,	/* Segment identifier (SEG_<something>).  */
1789 	       valueT valu,	/* Symbol value.  */
1790 	       fragS * frag)	/* Associated fragment.  */
1791 {
1792   size_t name_length;
1793   char *preserved_copy_of_name;
1794 
1795   name_length = strlen (name) + 1;	/* +1 for \0.  */
1796   obstack_grow (&notes, name, name_length);
1797   preserved_copy_of_name = obstack_finish (&notes);
1798 
1799 #ifdef tc_canonicalize_symbol_name
1800   preserved_copy_of_name =
1801     tc_canonicalize_symbol_name (preserved_copy_of_name);
1802 #endif
1803 
1804   S_SET_NAME (symbolP, preserved_copy_of_name);
1805 
1806   S_SET_SEGMENT (symbolP, segment);
1807   S_SET_VALUE (symbolP, valu);
1808   symbol_clear_list_pointers (symbolP);
1809 
1810   symbol_set_frag (symbolP, frag);
1811 
1812   /* Link to end of symbol chain.  */
1813   {
1814     extern int symbol_table_frozen;
1815 
1816     if (symbol_table_frozen)
1817       abort ();
1818   }
1819 
1820   symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1821 
1822   obj_symbol_new_hook (symbolP);
1823 
1824 #ifdef tc_symbol_new_hook
1825   tc_symbol_new_hook (symbolP);
1826 #endif
1827 
1828 #ifdef DEBUG_SYMS
1829   verify_symbol_chain (symbol_rootP, symbol_lastP);
1830 #endif /* DEBUG_SYMS  */
1831 }
1832 
1833 
1834 static void
s_ltorg(int ignored ATTRIBUTE_UNUSED)1835 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1836 {
1837   unsigned int entry;
1838   literal_pool *pool;
1839   char sym_name[20];
1840   int align;
1841 
1842   for (align = 2; align <= 4; align++)
1843     {
1844       int size = 1 << align;
1845 
1846       pool = find_literal_pool (size);
1847       if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1848 	continue;
1849 
1850       /* Align pool as you have word accesses.
1851          Only make a frag if we have to.  */
1852       if (!need_pass_2)
1853 	frag_align (align, 0, 0);
1854 
1855       mapping_state (MAP_DATA);
1856 
1857       record_alignment (now_seg, align);
1858 
1859       sprintf (sym_name, "$$lit_\002%x", pool->id);
1860 
1861       symbol_locate (pool->symbol, sym_name, now_seg,
1862 		     (valueT) frag_now_fix (), frag_now);
1863       symbol_table_insert (pool->symbol);
1864 
1865       for (entry = 0; entry < pool->next_free_entry; entry++)
1866 	{
1867 	  expressionS * exp = & pool->literals[entry].exp;
1868 
1869 	  if (exp->X_op == O_big)
1870 	    {
1871 	      /* PR 16688: Restore the global bignum value.  */
1872 	      gas_assert (pool->literals[entry].bignum != NULL);
1873 	      memcpy (generic_bignum, pool->literals[entry].bignum,
1874 		      CHARS_PER_LITTLENUM * exp->X_add_number);
1875 	    }
1876 
1877 	  /* First output the expression in the instruction to the pool.  */
1878 	  emit_expr (exp, size);	/* .word|.xword  */
1879 
1880 	  if (exp->X_op == O_big)
1881 	    {
1882 	      free (pool->literals[entry].bignum);
1883 	      pool->literals[entry].bignum = NULL;
1884 	    }
1885 	}
1886 
1887       /* Mark the pool as empty.  */
1888       pool->next_free_entry = 0;
1889       pool->symbol = NULL;
1890     }
1891 }
1892 
1893 #ifdef OBJ_ELF
1894 /* Forward declarations for functions below, in the MD interface
1895    section.  */
1896 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1897 static struct reloc_table_entry * find_reloc_table_entry (char **);
1898 
1899 /* Directives: Data.  */
1900 /* N.B. the support for relocation suffix in this directive needs to be
1901    implemented properly.  */
1902 
1903 static void
s_aarch64_elf_cons(int nbytes)1904 s_aarch64_elf_cons (int nbytes)
1905 {
1906   expressionS exp;
1907 
1908 #ifdef md_flush_pending_output
1909   md_flush_pending_output ();
1910 #endif
1911 
1912   if (is_it_end_of_statement ())
1913     {
1914       demand_empty_rest_of_line ();
1915       return;
1916     }
1917 
1918 #ifdef md_cons_align
1919   md_cons_align (nbytes);
1920 #endif
1921 
1922   mapping_state (MAP_DATA);
1923   do
1924     {
1925       struct reloc_table_entry *reloc;
1926 
1927       expression (&exp);
1928 
1929       if (exp.X_op != O_symbol)
1930 	emit_expr (&exp, (unsigned int) nbytes);
1931       else
1932 	{
1933 	  skip_past_char (&input_line_pointer, '#');
1934 	  if (skip_past_char (&input_line_pointer, ':'))
1935 	    {
1936 	      reloc = find_reloc_table_entry (&input_line_pointer);
1937 	      if (reloc == NULL)
1938 		as_bad (_("unrecognized relocation suffix"));
1939 	      else
1940 		as_bad (_("unimplemented relocation suffix"));
1941 	      ignore_rest_of_line ();
1942 	      return;
1943 	    }
1944 	  else
1945 	    emit_expr (&exp, (unsigned int) nbytes);
1946 	}
1947     }
1948   while (*input_line_pointer++ == ',');
1949 
1950   /* Put terminator back into stream.  */
1951   input_line_pointer--;
1952   demand_empty_rest_of_line ();
1953 }
1954 
1955 /* Mark symbol that it follows a variant PCS convention.  */
1956 
1957 static void
s_variant_pcs(int ignored ATTRIBUTE_UNUSED)1958 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1959 {
1960   char *name;
1961   char c;
1962   symbolS *sym;
1963   asymbol *bfdsym;
1964   elf_symbol_type *elfsym;
1965 
1966   c = get_symbol_name (&name);
1967   if (!*name)
1968     as_bad (_("Missing symbol name in directive"));
1969   sym = symbol_find_or_make (name);
1970   restore_line_pointer (c);
1971   demand_empty_rest_of_line ();
1972   bfdsym = symbol_get_bfdsym (sym);
1973   elfsym = elf_symbol_from (bfdsym);
1974   gas_assert (elfsym);
1975   elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1976 }
1977 #endif /* OBJ_ELF */
1978 
1979 /* Output a 32-bit word, but mark as an instruction.  */
1980 
1981 static void
s_aarch64_inst(int ignored ATTRIBUTE_UNUSED)1982 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1983 {
1984   expressionS exp;
1985   unsigned n = 0;
1986 
1987 #ifdef md_flush_pending_output
1988   md_flush_pending_output ();
1989 #endif
1990 
1991   if (is_it_end_of_statement ())
1992     {
1993       demand_empty_rest_of_line ();
1994       return;
1995     }
1996 
1997   /* Sections are assumed to start aligned. In executable section, there is no
1998      MAP_DATA symbol pending. So we only align the address during
1999      MAP_DATA --> MAP_INSN transition.
2000      For other sections, this is not guaranteed.  */
2001   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2002   if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2003     frag_align_code (2, 0);
2004 
2005 #ifdef OBJ_ELF
2006   mapping_state (MAP_INSN);
2007 #endif
2008 
2009   do
2010     {
2011       expression (&exp);
2012       if (exp.X_op != O_constant)
2013 	{
2014 	  as_bad (_("constant expression required"));
2015 	  ignore_rest_of_line ();
2016 	  return;
2017 	}
2018 
2019       if (target_big_endian)
2020 	{
2021 	  unsigned int val = exp.X_add_number;
2022 	  exp.X_add_number = SWAP_32 (val);
2023 	}
2024       emit_expr (&exp, INSN_SIZE);
2025       ++n;
2026     }
2027   while (*input_line_pointer++ == ',');
2028 
2029   dwarf2_emit_insn (n * INSN_SIZE);
2030 
2031   /* Put terminator back into stream.  */
2032   input_line_pointer--;
2033   demand_empty_rest_of_line ();
2034 }
2035 
2036 static void
s_aarch64_cfi_b_key_frame(int ignored ATTRIBUTE_UNUSED)2037 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2038 {
2039   demand_empty_rest_of_line ();
2040   struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2041   fde->pauth_key = AARCH64_PAUTH_KEY_B;
2042 }
2043 
2044 #ifdef OBJ_ELF
2045 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction.  */
2046 
2047 static void
s_tlsdescadd(int ignored ATTRIBUTE_UNUSED)2048 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2049 {
2050   expressionS exp;
2051 
2052   expression (&exp);
2053   frag_grow (4);
2054   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2055 		   BFD_RELOC_AARCH64_TLSDESC_ADD);
2056 
2057   demand_empty_rest_of_line ();
2058 }
2059 
2060 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction.  */
2061 
2062 static void
s_tlsdesccall(int ignored ATTRIBUTE_UNUSED)2063 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2064 {
2065   expressionS exp;
2066 
2067   /* Since we're just labelling the code, there's no need to define a
2068      mapping symbol.  */
2069   expression (&exp);
2070   /* Make sure there is enough room in this frag for the following
2071      blr.  This trick only works if the blr follows immediately after
2072      the .tlsdesc directive.  */
2073   frag_grow (4);
2074   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2075 		   BFD_RELOC_AARCH64_TLSDESC_CALL);
2076 
2077   demand_empty_rest_of_line ();
2078 }
2079 
2080 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction.  */
2081 
2082 static void
s_tlsdescldr(int ignored ATTRIBUTE_UNUSED)2083 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2084 {
2085   expressionS exp;
2086 
2087   expression (&exp);
2088   frag_grow (4);
2089   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2090 		   BFD_RELOC_AARCH64_TLSDESC_LDR);
2091 
2092   demand_empty_rest_of_line ();
2093 }
2094 #endif	/* OBJ_ELF */
2095 
2096 static void s_aarch64_arch (int);
2097 static void s_aarch64_cpu (int);
2098 static void s_aarch64_arch_extension (int);
2099 
2100 /* This table describes all the machine specific pseudo-ops the assembler
2101    has to support.  The fields are:
2102      pseudo-op name without dot
2103      function to call to execute this pseudo-op
2104      Integer arg to pass to the function.  */
2105 
2106 const pseudo_typeS md_pseudo_table[] = {
2107   /* Never called because '.req' does not start a line.  */
2108   {"req", s_req, 0},
2109   {"unreq", s_unreq, 0},
2110   {"bss", s_bss, 0},
2111   {"even", s_even, 0},
2112   {"ltorg", s_ltorg, 0},
2113   {"pool", s_ltorg, 0},
2114   {"cpu", s_aarch64_cpu, 0},
2115   {"arch", s_aarch64_arch, 0},
2116   {"arch_extension", s_aarch64_arch_extension, 0},
2117   {"inst", s_aarch64_inst, 0},
2118   {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2119 #ifdef OBJ_ELF
2120   {"tlsdescadd", s_tlsdescadd, 0},
2121   {"tlsdesccall", s_tlsdesccall, 0},
2122   {"tlsdescldr", s_tlsdescldr, 0},
2123   {"word", s_aarch64_elf_cons, 4},
2124   {"long", s_aarch64_elf_cons, 4},
2125   {"xword", s_aarch64_elf_cons, 8},
2126   {"dword", s_aarch64_elf_cons, 8},
2127   {"variant_pcs", s_variant_pcs, 0},
2128 #endif
2129   {"float16", float_cons, 'h'},
2130   {"bfloat16", float_cons, 'b'},
2131   {0, 0, 0}
2132 };
2133 
2134 
2135 /* Check whether STR points to a register name followed by a comma or the
2136    end of line; REG_TYPE indicates which register types are checked
2137    against.  Return TRUE if STR is such a register name; otherwise return
2138    FALSE.  The function does not intend to produce any diagnostics, but since
2139    the register parser aarch64_reg_parse, which is called by this function,
2140    does produce diagnostics, we call clear_error to clear any diagnostics
2141    that may be generated by aarch64_reg_parse.
2142    Also, the function returns FALSE directly if there is any user error
2143    present at the function entry.  This prevents the existing diagnostics
2144    state from being spoiled.
2145    The function currently serves parse_constant_immediate and
2146    parse_big_immediate only.  */
2147 static bool
reg_name_p(char * str,aarch64_reg_type reg_type)2148 reg_name_p (char *str, aarch64_reg_type reg_type)
2149 {
2150   int reg;
2151 
2152   /* Prevent the diagnostics state from being spoiled.  */
2153   if (error_p ())
2154     return false;
2155 
2156   reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2157 
2158   /* Clear the parsing error that may be set by the reg parser.  */
2159   clear_error ();
2160 
2161   if (reg == PARSE_FAIL)
2162     return false;
2163 
2164   skip_whitespace (str);
2165   if (*str == ',' || is_end_of_line[(unsigned char) *str])
2166     return true;
2167 
2168   return false;
2169 }
2170 
2171 /* Parser functions used exclusively in instruction operands.  */
2172 
2173 /* Parse an immediate expression which may not be constant.
2174 
2175    To prevent the expression parser from pushing a register name
2176    into the symbol table as an undefined symbol, firstly a check is
2177    done to find out whether STR is a register of type REG_TYPE followed
2178    by a comma or the end of line.  Return FALSE if STR is such a string.  */
2179 
2180 static bool
parse_immediate_expression(char ** str,expressionS * exp,aarch64_reg_type reg_type)2181 parse_immediate_expression (char **str, expressionS *exp,
2182 			    aarch64_reg_type reg_type)
2183 {
2184   if (reg_name_p (*str, reg_type))
2185     {
2186       set_recoverable_error (_("immediate operand required"));
2187       return false;
2188     }
2189 
2190   aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2191 			  NORMAL_RESOLUTION);
2192 
2193   if (exp->X_op == O_absent)
2194     {
2195       set_fatal_syntax_error (_("missing immediate expression"));
2196       return false;
2197     }
2198 
2199   return true;
2200 }
2201 
2202 /* Constant immediate-value read function for use in insn parsing.
2203    STR points to the beginning of the immediate (with the optional
2204    leading #); *VAL receives the value.  REG_TYPE says which register
2205    names should be treated as registers rather than as symbolic immediates.
2206 
2207    Return TRUE on success; otherwise return FALSE.  */
2208 
2209 static bool
parse_constant_immediate(char ** str,int64_t * val,aarch64_reg_type reg_type)2210 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2211 {
2212   expressionS exp;
2213 
2214   if (! parse_immediate_expression (str, &exp, reg_type))
2215     return false;
2216 
2217   if (exp.X_op != O_constant)
2218     {
2219       set_syntax_error (_("constant expression required"));
2220       return false;
2221     }
2222 
2223   *val = exp.X_add_number;
2224   return true;
2225 }
2226 
2227 static uint32_t
encode_imm_float_bits(uint32_t imm)2228 encode_imm_float_bits (uint32_t imm)
2229 {
2230   return ((imm >> 19) & 0x7f)	/* b[25:19] -> b[6:0] */
2231     | ((imm >> (31 - 7)) & 0x80);	/* b[31]    -> b[7]   */
2232 }
2233 
2234 /* Return TRUE if the single-precision floating-point value encoded in IMM
2235    can be expressed in the AArch64 8-bit signed floating-point format with
2236    3-bit exponent and normalized 4 bits of precision; in other words, the
2237    floating-point value must be expressable as
2238      (+/-) n / 16 * power (2, r)
2239    where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4.  */
2240 
2241 static bool
aarch64_imm_float_p(uint32_t imm)2242 aarch64_imm_float_p (uint32_t imm)
2243 {
2244   /* If a single-precision floating-point value has the following bit
2245      pattern, it can be expressed in the AArch64 8-bit floating-point
2246      format:
2247 
2248      3 32222222 2221111111111
2249      1 09876543 21098765432109876543210
2250      n Eeeeeexx xxxx0000000000000000000
2251 
2252      where n, e and each x are either 0 or 1 independently, with
2253      E == ~ e.  */
2254 
2255   uint32_t pattern;
2256 
2257   /* Prepare the pattern for 'Eeeeee'.  */
2258   if (((imm >> 30) & 0x1) == 0)
2259     pattern = 0x3e000000;
2260   else
2261     pattern = 0x40000000;
2262 
2263   return (imm & 0x7ffff) == 0		/* lower 19 bits are 0.  */
2264     && ((imm & 0x7e000000) == pattern);	/* bits 25 - 29 == ~ bit 30.  */
2265 }
2266 
2267 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2268    as an IEEE float without any loss of precision.  Store the value in
2269    *FPWORD if so.  */
2270 
2271 static bool
can_convert_double_to_float(uint64_t imm,uint32_t * fpword)2272 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2273 {
2274   /* If a double-precision floating-point value has the following bit
2275      pattern, it can be expressed in a float:
2276 
2277      6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2278      3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2279      n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2280 
2281        ----------------------------->     nEeeeeee esssssss ssssssss sssssSSS
2282 	 if Eeee_eeee != 1111_1111
2283 
2284      where n, e, s and S are either 0 or 1 independently and where ~ is the
2285      inverse of E.  */
2286 
2287   uint32_t pattern;
2288   uint32_t high32 = imm >> 32;
2289   uint32_t low32 = imm;
2290 
2291   /* Lower 29 bits need to be 0s.  */
2292   if ((imm & 0x1fffffff) != 0)
2293     return false;
2294 
2295   /* Prepare the pattern for 'Eeeeeeeee'.  */
2296   if (((high32 >> 30) & 0x1) == 0)
2297     pattern = 0x38000000;
2298   else
2299     pattern = 0x40000000;
2300 
2301   /* Check E~~~.  */
2302   if ((high32 & 0x78000000) != pattern)
2303     return false;
2304 
2305   /* Check Eeee_eeee != 1111_1111.  */
2306   if ((high32 & 0x7ff00000) == 0x47f00000)
2307     return false;
2308 
2309   *fpword = ((high32 & 0xc0000000)		/* 1 n bit and 1 E bit.  */
2310 	     | ((high32 << 3) & 0x3ffffff8)	/* 7 e and 20 s bits.  */
2311 	     | (low32 >> 29));			/* 3 S bits.  */
2312   return true;
2313 }
2314 
2315 /* Return true if we should treat OPERAND as a double-precision
2316    floating-point operand rather than a single-precision one.  */
2317 static bool
double_precision_operand_p(const aarch64_opnd_info * operand)2318 double_precision_operand_p (const aarch64_opnd_info *operand)
2319 {
2320   /* Check for unsuffixed SVE registers, which are allowed
2321      for LDR and STR but not in instructions that require an
2322      immediate.  We get better error messages if we arbitrarily
2323      pick one size, parse the immediate normally, and then
2324      report the match failure in the normal way.  */
2325   return (operand->qualifier == AARCH64_OPND_QLF_NIL
2326 	  || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2327 }
2328 
2329 /* Parse a floating-point immediate.  Return TRUE on success and return the
2330    value in *IMMED in the format of IEEE754 single-precision encoding.
2331    *CCP points to the start of the string; DP_P is TRUE when the immediate
2332    is expected to be in double-precision (N.B. this only matters when
2333    hexadecimal representation is involved).  REG_TYPE says which register
2334    names should be treated as registers rather than as symbolic immediates.
2335 
2336    This routine accepts any IEEE float; it is up to the callers to reject
2337    invalid ones.  */
2338 
2339 static bool
parse_aarch64_imm_float(char ** ccp,int * immed,bool dp_p,aarch64_reg_type reg_type)2340 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2341 			 aarch64_reg_type reg_type)
2342 {
2343   char *str = *ccp;
2344   char *fpnum;
2345   LITTLENUM_TYPE words[MAX_LITTLENUMS];
2346   int64_t val = 0;
2347   unsigned fpword = 0;
2348   bool hex_p = false;
2349 
2350   skip_past_char (&str, '#');
2351 
2352   fpnum = str;
2353   skip_whitespace (fpnum);
2354 
2355   if (startswith (fpnum, "0x"))
2356     {
2357       /* Support the hexadecimal representation of the IEEE754 encoding.
2358 	 Double-precision is expected when DP_P is TRUE, otherwise the
2359 	 representation should be in single-precision.  */
2360       if (! parse_constant_immediate (&str, &val, reg_type))
2361 	goto invalid_fp;
2362 
2363       if (dp_p)
2364 	{
2365 	  if (!can_convert_double_to_float (val, &fpword))
2366 	    goto invalid_fp;
2367 	}
2368       else if ((uint64_t) val > 0xffffffff)
2369 	goto invalid_fp;
2370       else
2371 	fpword = val;
2372 
2373       hex_p = true;
2374     }
2375   else if (reg_name_p (str, reg_type))
2376    {
2377      set_recoverable_error (_("immediate operand required"));
2378      return false;
2379     }
2380 
2381   if (! hex_p)
2382     {
2383       int i;
2384 
2385       if ((str = atof_ieee (str, 's', words)) == NULL)
2386 	goto invalid_fp;
2387 
2388       /* Our FP word must be 32 bits (single-precision FP).  */
2389       for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2390 	{
2391 	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
2392 	  fpword |= words[i];
2393 	}
2394     }
2395 
2396   *immed = fpword;
2397   *ccp = str;
2398   return true;
2399 
2400  invalid_fp:
2401   set_fatal_syntax_error (_("invalid floating-point constant"));
2402   return false;
2403 }
2404 
2405 /* Less-generic immediate-value read function with the possibility of loading
2406    a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2407    instructions.
2408 
2409    To prevent the expression parser from pushing a register name into the
2410    symbol table as an undefined symbol, a check is firstly done to find
2411    out whether STR is a register of type REG_TYPE followed by a comma or
2412    the end of line.  Return FALSE if STR is such a register.  */
2413 
2414 static bool
parse_big_immediate(char ** str,int64_t * imm,aarch64_reg_type reg_type)2415 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2416 {
2417   char *ptr = *str;
2418 
2419   if (reg_name_p (ptr, reg_type))
2420     {
2421       set_syntax_error (_("immediate operand required"));
2422       return false;
2423     }
2424 
2425   aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2426 			  NORMAL_RESOLUTION);
2427 
2428   if (inst.reloc.exp.X_op == O_constant)
2429     *imm = inst.reloc.exp.X_add_number;
2430 
2431   *str = ptr;
2432 
2433   return true;
2434 }
2435 
2436 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2437    if NEED_LIBOPCODES is non-zero, the fixup will need
2438    assistance from the libopcodes.   */
2439 
2440 static inline void
aarch64_set_gas_internal_fixup(struct reloc * reloc,const aarch64_opnd_info * operand,int need_libopcodes_p)2441 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2442 				const aarch64_opnd_info *operand,
2443 				int need_libopcodes_p)
2444 {
2445   reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2446   reloc->opnd = operand->type;
2447   if (need_libopcodes_p)
2448     reloc->need_libopcodes_p = 1;
2449 };
2450 
2451 /* Return TRUE if the instruction needs to be fixed up later internally by
2452    the GAS; otherwise return FALSE.  */
2453 
2454 static inline bool
aarch64_gas_internal_fixup_p(void)2455 aarch64_gas_internal_fixup_p (void)
2456 {
2457   return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2458 }
2459 
2460 /* Assign the immediate value to the relevant field in *OPERAND if
2461    RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2462    needs an internal fixup in a later stage.
2463    ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2464    IMM.VALUE that may get assigned with the constant.  */
2465 static inline void
assign_imm_if_const_or_fixup_later(struct reloc * reloc,aarch64_opnd_info * operand,int addr_off_p,int need_libopcodes_p,int skip_p)2466 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2467 				    aarch64_opnd_info *operand,
2468 				    int addr_off_p,
2469 				    int need_libopcodes_p,
2470 				    int skip_p)
2471 {
2472   if (reloc->exp.X_op == O_constant)
2473     {
2474       if (addr_off_p)
2475 	operand->addr.offset.imm = reloc->exp.X_add_number;
2476       else
2477 	operand->imm.value = reloc->exp.X_add_number;
2478       reloc->type = BFD_RELOC_UNUSED;
2479     }
2480   else
2481     {
2482       aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2483       /* Tell libopcodes to ignore this operand or not.  This is helpful
2484 	 when one of the operands needs to be fixed up later but we need
2485 	 libopcodes to check the other operands.  */
2486       operand->skip = skip_p;
2487     }
2488 }
2489 
2490 /* Relocation modifiers.  Each entry in the table contains the textual
2491    name for the relocation which may be placed before a symbol used as
2492    a load/store offset, or add immediate. It must be surrounded by a
2493    leading and trailing colon, for example:
2494 
2495 	ldr	x0, [x1, #:rello:varsym]
2496 	add	x0, x1, #:rello:varsym  */
2497 
2498 struct reloc_table_entry
2499 {
2500   const char *name;
2501   int pc_rel;
2502   bfd_reloc_code_real_type adr_type;
2503   bfd_reloc_code_real_type adrp_type;
2504   bfd_reloc_code_real_type movw_type;
2505   bfd_reloc_code_real_type add_type;
2506   bfd_reloc_code_real_type ldst_type;
2507   bfd_reloc_code_real_type ld_literal_type;
2508 };
2509 
2510 static struct reloc_table_entry reloc_table[] =
2511 {
2512   /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2513   {"lo12", 0,
2514    0,				/* adr_type */
2515    0,
2516    0,
2517    BFD_RELOC_AARCH64_ADD_LO12,
2518    BFD_RELOC_AARCH64_LDST_LO12,
2519    0},
2520 
2521   /* Higher 21 bits of pc-relative page offset: ADRP */
2522   {"pg_hi21", 1,
2523    0,				/* adr_type */
2524    BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2525    0,
2526    0,
2527    0,
2528    0},
2529 
2530   /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2531   {"pg_hi21_nc", 1,
2532    0,				/* adr_type */
2533    BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2534    0,
2535    0,
2536    0,
2537    0},
2538 
2539   /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2540   {"abs_g0", 0,
2541    0,				/* adr_type */
2542    0,
2543    BFD_RELOC_AARCH64_MOVW_G0,
2544    0,
2545    0,
2546    0},
2547 
2548   /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2549   {"abs_g0_s", 0,
2550    0,				/* adr_type */
2551    0,
2552    BFD_RELOC_AARCH64_MOVW_G0_S,
2553    0,
2554    0,
2555    0},
2556 
2557   /* Less significant bits 0-15 of address/value: MOVK, no check */
2558   {"abs_g0_nc", 0,
2559    0,				/* adr_type */
2560    0,
2561    BFD_RELOC_AARCH64_MOVW_G0_NC,
2562    0,
2563    0,
2564    0},
2565 
2566   /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2567   {"abs_g1", 0,
2568    0,				/* adr_type */
2569    0,
2570    BFD_RELOC_AARCH64_MOVW_G1,
2571    0,
2572    0,
2573    0},
2574 
2575   /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2576   {"abs_g1_s", 0,
2577    0,				/* adr_type */
2578    0,
2579    BFD_RELOC_AARCH64_MOVW_G1_S,
2580    0,
2581    0,
2582    0},
2583 
2584   /* Less significant bits 16-31 of address/value: MOVK, no check */
2585   {"abs_g1_nc", 0,
2586    0,				/* adr_type */
2587    0,
2588    BFD_RELOC_AARCH64_MOVW_G1_NC,
2589    0,
2590    0,
2591    0},
2592 
2593   /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2594   {"abs_g2", 0,
2595    0,				/* adr_type */
2596    0,
2597    BFD_RELOC_AARCH64_MOVW_G2,
2598    0,
2599    0,
2600    0},
2601 
2602   /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2603   {"abs_g2_s", 0,
2604    0,				/* adr_type */
2605    0,
2606    BFD_RELOC_AARCH64_MOVW_G2_S,
2607    0,
2608    0,
2609    0},
2610 
2611   /* Less significant bits 32-47 of address/value: MOVK, no check */
2612   {"abs_g2_nc", 0,
2613    0,				/* adr_type */
2614    0,
2615    BFD_RELOC_AARCH64_MOVW_G2_NC,
2616    0,
2617    0,
2618    0},
2619 
2620   /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2621   {"abs_g3", 0,
2622    0,				/* adr_type */
2623    0,
2624    BFD_RELOC_AARCH64_MOVW_G3,
2625    0,
2626    0,
2627    0},
2628 
2629   /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2630   {"prel_g0", 1,
2631    0,				/* adr_type */
2632    0,
2633    BFD_RELOC_AARCH64_MOVW_PREL_G0,
2634    0,
2635    0,
2636    0},
2637 
2638   /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2639   {"prel_g0_nc", 1,
2640    0,				/* adr_type */
2641    0,
2642    BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2643    0,
2644    0,
2645    0},
2646 
2647   /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2648   {"prel_g1", 1,
2649    0,				/* adr_type */
2650    0,
2651    BFD_RELOC_AARCH64_MOVW_PREL_G1,
2652    0,
2653    0,
2654    0},
2655 
2656   /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2657   {"prel_g1_nc", 1,
2658    0,				/* adr_type */
2659    0,
2660    BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2661    0,
2662    0,
2663    0},
2664 
2665   /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2666   {"prel_g2", 1,
2667    0,				/* adr_type */
2668    0,
2669    BFD_RELOC_AARCH64_MOVW_PREL_G2,
2670    0,
2671    0,
2672    0},
2673 
2674   /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2675   {"prel_g2_nc", 1,
2676    0,				/* adr_type */
2677    0,
2678    BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2679    0,
2680    0,
2681    0},
2682 
2683   /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2684   {"prel_g3", 1,
2685    0,				/* adr_type */
2686    0,
2687    BFD_RELOC_AARCH64_MOVW_PREL_G3,
2688    0,
2689    0,
2690    0},
2691 
2692   /* Get to the page containing GOT entry for a symbol.  */
2693   {"got", 1,
2694    0,				/* adr_type */
2695    BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2696    0,
2697    0,
2698    0,
2699    BFD_RELOC_AARCH64_GOT_LD_PREL19},
2700 
2701   /* 12 bit offset into the page containing GOT entry for that symbol.  */
2702   {"got_lo12", 0,
2703    0,				/* adr_type */
2704    0,
2705    0,
2706    0,
2707    BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2708    0},
2709 
2710   /* 0-15 bits of address/value: MOVk, no check.  */
2711   {"gotoff_g0_nc", 0,
2712    0,				/* adr_type */
2713    0,
2714    BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2715    0,
2716    0,
2717    0},
2718 
2719   /* Most significant bits 16-31 of address/value: MOVZ.  */
2720   {"gotoff_g1", 0,
2721    0,				/* adr_type */
2722    0,
2723    BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2724    0,
2725    0,
2726    0},
2727 
2728   /* 15 bit offset into the page containing GOT entry for that symbol.  */
2729   {"gotoff_lo15", 0,
2730    0,				/* adr_type */
2731    0,
2732    0,
2733    0,
2734    BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2735    0},
2736 
2737   /* Get to the page containing GOT TLS entry for a symbol */
2738   {"gottprel_g0_nc", 0,
2739    0,				/* adr_type */
2740    0,
2741    BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2742    0,
2743    0,
2744    0},
2745 
2746   /* Get to the page containing GOT TLS entry for a symbol */
2747   {"gottprel_g1", 0,
2748    0,				/* adr_type */
2749    0,
2750    BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2751    0,
2752    0,
2753    0},
2754 
2755   /* Get to the page containing GOT TLS entry for a symbol */
2756   {"tlsgd", 0,
2757    BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2758    BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2759    0,
2760    0,
2761    0,
2762    0},
2763 
2764   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2765   {"tlsgd_lo12", 0,
2766    0,				/* adr_type */
2767    0,
2768    0,
2769    BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2770    0,
2771    0},
2772 
2773   /* Lower 16 bits address/value: MOVk.  */
2774   {"tlsgd_g0_nc", 0,
2775    0,				/* adr_type */
2776    0,
2777    BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2778    0,
2779    0,
2780    0},
2781 
2782   /* Most significant bits 16-31 of address/value: MOVZ.  */
2783   {"tlsgd_g1", 0,
2784    0,				/* adr_type */
2785    0,
2786    BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2787    0,
2788    0,
2789    0},
2790 
2791   /* Get to the page containing GOT TLS entry for a symbol */
2792   {"tlsdesc", 0,
2793    BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2794    BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2795    0,
2796    0,
2797    0,
2798    BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2799 
2800   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2801   {"tlsdesc_lo12", 0,
2802    0,				/* adr_type */
2803    0,
2804    0,
2805    BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2806    BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2807    0},
2808 
2809   /* Get to the page containing GOT TLS entry for a symbol.
2810      The same as GD, we allocate two consecutive GOT slots
2811      for module index and module offset, the only difference
2812      with GD is the module offset should be initialized to
2813      zero without any outstanding runtime relocation. */
2814   {"tlsldm", 0,
2815    BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2816    BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2817    0,
2818    0,
2819    0,
2820    0},
2821 
2822   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2823   {"tlsldm_lo12_nc", 0,
2824    0,				/* adr_type */
2825    0,
2826    0,
2827    BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2828    0,
2829    0},
2830 
2831   /* 12 bit offset into the module TLS base address.  */
2832   {"dtprel_lo12", 0,
2833    0,				/* adr_type */
2834    0,
2835    0,
2836    BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2837    BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2838    0},
2839 
2840   /* Same as dtprel_lo12, no overflow check.  */
2841   {"dtprel_lo12_nc", 0,
2842    0,				/* adr_type */
2843    0,
2844    0,
2845    BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2846    BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2847    0},
2848 
2849   /* bits[23:12] of offset to the module TLS base address.  */
2850   {"dtprel_hi12", 0,
2851    0,				/* adr_type */
2852    0,
2853    0,
2854    BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2855    0,
2856    0},
2857 
2858   /* bits[15:0] of offset to the module TLS base address.  */
2859   {"dtprel_g0", 0,
2860    0,				/* adr_type */
2861    0,
2862    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2863    0,
2864    0,
2865    0},
2866 
2867   /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0.  */
2868   {"dtprel_g0_nc", 0,
2869    0,				/* adr_type */
2870    0,
2871    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2872    0,
2873    0,
2874    0},
2875 
2876   /* bits[31:16] of offset to the module TLS base address.  */
2877   {"dtprel_g1", 0,
2878    0,				/* adr_type */
2879    0,
2880    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2881    0,
2882    0,
2883    0},
2884 
2885   /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1.  */
2886   {"dtprel_g1_nc", 0,
2887    0,				/* adr_type */
2888    0,
2889    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2890    0,
2891    0,
2892    0},
2893 
2894   /* bits[47:32] of offset to the module TLS base address.  */
2895   {"dtprel_g2", 0,
2896    0,				/* adr_type */
2897    0,
2898    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2899    0,
2900    0,
2901    0},
2902 
2903   /* Lower 16 bit offset into GOT entry for a symbol */
2904   {"tlsdesc_off_g0_nc", 0,
2905    0,				/* adr_type */
2906    0,
2907    BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2908    0,
2909    0,
2910    0},
2911 
2912   /* Higher 16 bit offset into GOT entry for a symbol */
2913   {"tlsdesc_off_g1", 0,
2914    0,				/* adr_type */
2915    0,
2916    BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2917    0,
2918    0,
2919    0},
2920 
2921   /* Get to the page containing GOT TLS entry for a symbol */
2922   {"gottprel", 0,
2923    0,				/* adr_type */
2924    BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2925    0,
2926    0,
2927    0,
2928    BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2929 
2930   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2931   {"gottprel_lo12", 0,
2932    0,				/* adr_type */
2933    0,
2934    0,
2935    0,
2936    BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2937    0},
2938 
2939   /* Get tp offset for a symbol.  */
2940   {"tprel", 0,
2941    0,				/* adr_type */
2942    0,
2943    0,
2944    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2945    0,
2946    0},
2947 
2948   /* Get tp offset for a symbol.  */
2949   {"tprel_lo12", 0,
2950    0,				/* adr_type */
2951    0,
2952    0,
2953    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2954    BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2955    0},
2956 
2957   /* Get tp offset for a symbol.  */
2958   {"tprel_hi12", 0,
2959    0,				/* adr_type */
2960    0,
2961    0,
2962    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2963    0,
2964    0},
2965 
2966   /* Get tp offset for a symbol.  */
2967   {"tprel_lo12_nc", 0,
2968    0,				/* adr_type */
2969    0,
2970    0,
2971    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2972    BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2973    0},
2974 
2975   /* Most significant bits 32-47 of address/value: MOVZ.  */
2976   {"tprel_g2", 0,
2977    0,				/* adr_type */
2978    0,
2979    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2980    0,
2981    0,
2982    0},
2983 
2984   /* Most significant bits 16-31 of address/value: MOVZ.  */
2985   {"tprel_g1", 0,
2986    0,				/* adr_type */
2987    0,
2988    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2989    0,
2990    0,
2991    0},
2992 
2993   /* Most significant bits 16-31 of address/value: MOVZ, no check.  */
2994   {"tprel_g1_nc", 0,
2995    0,				/* adr_type */
2996    0,
2997    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2998    0,
2999    0,
3000    0},
3001 
3002   /* Most significant bits 0-15 of address/value: MOVZ.  */
3003   {"tprel_g0", 0,
3004    0,				/* adr_type */
3005    0,
3006    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3007    0,
3008    0,
3009    0},
3010 
3011   /* Most significant bits 0-15 of address/value: MOVZ, no check.  */
3012   {"tprel_g0_nc", 0,
3013    0,				/* adr_type */
3014    0,
3015    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3016    0,
3017    0,
3018    0},
3019 
3020   /* 15bit offset from got entry to base address of GOT table.  */
3021   {"gotpage_lo15", 0,
3022    0,
3023    0,
3024    0,
3025    0,
3026    BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3027    0},
3028 
3029   /* 14bit offset from got entry to base address of GOT table.  */
3030   {"gotpage_lo14", 0,
3031    0,
3032    0,
3033    0,
3034    0,
3035    BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3036    0},
3037 };
3038 
3039 /* Given the address of a pointer pointing to the textual name of a
3040    relocation as may appear in assembler source, attempt to find its
3041    details in reloc_table.  The pointer will be updated to the character
3042    after the trailing colon.  On failure, NULL will be returned;
3043    otherwise return the reloc_table_entry.  */
3044 
3045 static struct reloc_table_entry *
find_reloc_table_entry(char ** str)3046 find_reloc_table_entry (char **str)
3047 {
3048   unsigned int i;
3049   for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3050     {
3051       int length = strlen (reloc_table[i].name);
3052 
3053       if (strncasecmp (reloc_table[i].name, *str, length) == 0
3054 	  && (*str)[length] == ':')
3055 	{
3056 	  *str += (length + 1);
3057 	  return &reloc_table[i];
3058 	}
3059     }
3060 
3061   return NULL;
3062 }
3063 
3064 /* Returns 0 if the relocation should never be forced,
3065    1 if the relocation must be forced, and -1 if either
3066    result is OK.  */
3067 
3068 static signed int
aarch64_force_reloc(unsigned int type)3069 aarch64_force_reloc (unsigned int type)
3070 {
3071   switch (type)
3072     {
3073     case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3074       /* Perform these "immediate" internal relocations
3075          even if the symbol is extern or weak.  */
3076       return 0;
3077 
3078     case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3079     case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3080     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3081       /* Pseudo relocs that need to be fixed up according to
3082 	 ilp32_p.  */
3083       return 1;
3084 
3085     case BFD_RELOC_AARCH64_ADD_LO12:
3086     case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3087     case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3088     case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3089     case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3090     case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3091     case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3092     case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3093     case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3094     case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3095     case BFD_RELOC_AARCH64_LDST128_LO12:
3096     case BFD_RELOC_AARCH64_LDST16_LO12:
3097     case BFD_RELOC_AARCH64_LDST32_LO12:
3098     case BFD_RELOC_AARCH64_LDST64_LO12:
3099     case BFD_RELOC_AARCH64_LDST8_LO12:
3100     case BFD_RELOC_AARCH64_LDST_LO12:
3101     case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3102     case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3103     case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3104     case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3105     case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3106     case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3107     case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3108     case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3109     case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3110     case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3111     case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3112     case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3113     case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3114     case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3115     case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3116     case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3117     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3118     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3119     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3120     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3121     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3122     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3123     case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3124     case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3125     case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3126     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3127     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3128     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3129     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3130     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3131     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3132     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3133     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3134     case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3135     case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3136     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3137     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3138     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3139     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3140     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3141     case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3142     case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3143     case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3144     case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3145     case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3146     case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3147     case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3148     case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3149     case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3150     case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3151     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3152     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3153     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3154     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3155     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3156     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3157     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3158     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3159       /* Always leave these relocations for the linker.  */
3160       return 1;
3161 
3162     default:
3163       return -1;
3164     }
3165 }
3166 
3167 int
aarch64_force_relocation(struct fix * fixp)3168 aarch64_force_relocation (struct fix *fixp)
3169 {
3170   int res = aarch64_force_reloc (fixp->fx_r_type);
3171 
3172   if (res == -1)
3173     return generic_force_reloc (fixp);
3174   return res;
3175 }
3176 
3177 /* Mode argument to parse_shift and parser_shifter_operand.  */
3178 enum parse_shift_mode
3179 {
3180   SHIFTED_NONE,			/* no shifter allowed  */
3181   SHIFTED_ARITH_IMM,		/* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3182 				   "#imm{,lsl #n}"  */
3183   SHIFTED_LOGIC_IMM,		/* "rn{,lsl|lsr|asl|asr|ror #n}" or
3184 				   "#imm"  */
3185   SHIFTED_LSL,			/* bare "lsl #n"  */
3186   SHIFTED_MUL,			/* bare "mul #n"  */
3187   SHIFTED_LSL_MSL,		/* "lsl|msl #n"  */
3188   SHIFTED_MUL_VL,		/* "mul vl"  */
3189   SHIFTED_REG_OFFSET		/* [su]xtw|sxtx {#n} or lsl #n  */
3190 };
3191 
3192 /* Parse a <shift> operator on an AArch64 data processing instruction.
3193    Return TRUE on success; otherwise return FALSE.  */
3194 static bool
parse_shift(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3195 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3196 {
3197   const struct aarch64_name_value_pair *shift_op;
3198   enum aarch64_modifier_kind kind;
3199   expressionS exp;
3200   int exp_has_prefix;
3201   char *s = *str;
3202   char *p = s;
3203 
3204   for (p = *str; ISALPHA (*p); p++)
3205     ;
3206 
3207   if (p == *str)
3208     {
3209       set_syntax_error (_("shift expression expected"));
3210       return false;
3211     }
3212 
3213   shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3214 
3215   if (shift_op == NULL)
3216     {
3217       set_syntax_error (_("shift operator expected"));
3218       return false;
3219     }
3220 
3221   kind = aarch64_get_operand_modifier (shift_op);
3222 
3223   if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3224     {
3225       set_syntax_error (_("invalid use of 'MSL'"));
3226       return false;
3227     }
3228 
3229   if (kind == AARCH64_MOD_MUL
3230       && mode != SHIFTED_MUL
3231       && mode != SHIFTED_MUL_VL)
3232     {
3233       set_syntax_error (_("invalid use of 'MUL'"));
3234       return false;
3235     }
3236 
3237   switch (mode)
3238     {
3239     case SHIFTED_LOGIC_IMM:
3240       if (aarch64_extend_operator_p (kind))
3241 	{
3242 	  set_syntax_error (_("extending shift is not permitted"));
3243 	  return false;
3244 	}
3245       break;
3246 
3247     case SHIFTED_ARITH_IMM:
3248       if (kind == AARCH64_MOD_ROR)
3249 	{
3250 	  set_syntax_error (_("'ROR' shift is not permitted"));
3251 	  return false;
3252 	}
3253       break;
3254 
3255     case SHIFTED_LSL:
3256       if (kind != AARCH64_MOD_LSL)
3257 	{
3258 	  set_syntax_error (_("only 'LSL' shift is permitted"));
3259 	  return false;
3260 	}
3261       break;
3262 
3263     case SHIFTED_MUL:
3264       if (kind != AARCH64_MOD_MUL)
3265 	{
3266 	  set_syntax_error (_("only 'MUL' is permitted"));
3267 	  return false;
3268 	}
3269       break;
3270 
3271     case SHIFTED_MUL_VL:
3272       /* "MUL VL" consists of two separate tokens.  Require the first
3273 	 token to be "MUL" and look for a following "VL".  */
3274       if (kind == AARCH64_MOD_MUL)
3275 	{
3276 	  skip_whitespace (p);
3277 	  if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3278 	    {
3279 	      p += 2;
3280 	      kind = AARCH64_MOD_MUL_VL;
3281 	      break;
3282 	    }
3283 	}
3284       set_syntax_error (_("only 'MUL VL' is permitted"));
3285       return false;
3286 
3287     case SHIFTED_REG_OFFSET:
3288       if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3289 	  && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3290 	{
3291 	  set_fatal_syntax_error
3292 	    (_("invalid shift for the register offset addressing mode"));
3293 	  return false;
3294 	}
3295       break;
3296 
3297     case SHIFTED_LSL_MSL:
3298       if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3299 	{
3300 	  set_syntax_error (_("invalid shift operator"));
3301 	  return false;
3302 	}
3303       break;
3304 
3305     default:
3306       abort ();
3307     }
3308 
3309   /* Whitespace can appear here if the next thing is a bare digit.  */
3310   skip_whitespace (p);
3311 
3312   /* Parse shift amount.  */
3313   exp_has_prefix = 0;
3314   if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3315     exp.X_op = O_absent;
3316   else
3317     {
3318       if (is_immediate_prefix (*p))
3319 	{
3320 	  p++;
3321 	  exp_has_prefix = 1;
3322 	}
3323       (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3324 				     NORMAL_RESOLUTION);
3325     }
3326   if (kind == AARCH64_MOD_MUL_VL)
3327     /* For consistency, give MUL VL the same shift amount as an implicit
3328        MUL #1.  */
3329     operand->shifter.amount = 1;
3330   else if (exp.X_op == O_absent)
3331     {
3332       if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3333 	{
3334 	  set_syntax_error (_("missing shift amount"));
3335 	  return false;
3336 	}
3337       operand->shifter.amount = 0;
3338     }
3339   else if (exp.X_op != O_constant)
3340     {
3341       set_syntax_error (_("constant shift amount required"));
3342       return false;
3343     }
3344   /* For parsing purposes, MUL #n has no inherent range.  The range
3345      depends on the operand and will be checked by operand-specific
3346      routines.  */
3347   else if (kind != AARCH64_MOD_MUL
3348 	   && (exp.X_add_number < 0 || exp.X_add_number > 63))
3349     {
3350       set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3351       return false;
3352     }
3353   else
3354     {
3355       operand->shifter.amount = exp.X_add_number;
3356       operand->shifter.amount_present = 1;
3357     }
3358 
3359   operand->shifter.operator_present = 1;
3360   operand->shifter.kind = kind;
3361 
3362   *str = p;
3363   return true;
3364 }
3365 
3366 /* Parse a <shifter_operand> for a data processing instruction:
3367 
3368       #<immediate>
3369       #<immediate>, LSL #imm
3370 
3371    Validation of immediate operands is deferred to md_apply_fix.
3372 
3373    Return TRUE on success; otherwise return FALSE.  */
3374 
3375 static bool
parse_shifter_operand_imm(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3376 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3377 			   enum parse_shift_mode mode)
3378 {
3379   char *p;
3380 
3381   if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3382     return false;
3383 
3384   p = *str;
3385 
3386   /* Accept an immediate expression.  */
3387   if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3388 				REJECT_ABSENT, NORMAL_RESOLUTION))
3389     return false;
3390 
3391   /* Accept optional LSL for arithmetic immediate values.  */
3392   if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3393     if (! parse_shift (&p, operand, SHIFTED_LSL))
3394       return false;
3395 
3396   /* Not accept any shifter for logical immediate values.  */
3397   if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3398       && parse_shift (&p, operand, mode))
3399     {
3400       set_syntax_error (_("unexpected shift operator"));
3401       return false;
3402     }
3403 
3404   *str = p;
3405   return true;
3406 }
3407 
3408 /* Parse a <shifter_operand> for a data processing instruction:
3409 
3410       <Rm>
3411       <Rm>, <shift>
3412       #<immediate>
3413       #<immediate>, LSL #imm
3414 
3415    where <shift> is handled by parse_shift above, and the last two
3416    cases are handled by the function above.
3417 
3418    Validation of immediate operands is deferred to md_apply_fix.
3419 
3420    Return TRUE on success; otherwise return FALSE.  */
3421 
3422 static bool
parse_shifter_operand(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3423 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3424 		       enum parse_shift_mode mode)
3425 {
3426   const reg_entry *reg;
3427   aarch64_opnd_qualifier_t qualifier;
3428   enum aarch64_operand_class opd_class
3429     = aarch64_get_operand_class (operand->type);
3430 
3431   reg = aarch64_reg_parse_32_64 (str, &qualifier);
3432   if (reg)
3433     {
3434       if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3435 	{
3436 	  set_syntax_error (_("unexpected register in the immediate operand"));
3437 	  return false;
3438 	}
3439 
3440       if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3441 	{
3442 	  set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3443 	  return false;
3444 	}
3445 
3446       operand->reg.regno = reg->number;
3447       operand->qualifier = qualifier;
3448 
3449       /* Accept optional shift operation on register.  */
3450       if (! skip_past_comma (str))
3451 	return true;
3452 
3453       if (! parse_shift (str, operand, mode))
3454 	return false;
3455 
3456       return true;
3457     }
3458   else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3459     {
3460       set_syntax_error
3461 	(_("integer register expected in the extended/shifted operand "
3462 	   "register"));
3463       return false;
3464     }
3465 
3466   /* We have a shifted immediate variable.  */
3467   return parse_shifter_operand_imm (str, operand, mode);
3468 }
3469 
3470 /* Return TRUE on success; return FALSE otherwise.  */
3471 
3472 static bool
parse_shifter_operand_reloc(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3473 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3474 			     enum parse_shift_mode mode)
3475 {
3476   char *p = *str;
3477 
3478   /* Determine if we have the sequence of characters #: or just :
3479      coming next.  If we do, then we check for a :rello: relocation
3480      modifier.  If we don't, punt the whole lot to
3481      parse_shifter_operand.  */
3482 
3483   if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3484     {
3485       struct reloc_table_entry *entry;
3486 
3487       if (p[0] == '#')
3488 	p += 2;
3489       else
3490 	p++;
3491       *str = p;
3492 
3493       /* Try to parse a relocation.  Anything else is an error.  */
3494       if (!(entry = find_reloc_table_entry (str)))
3495 	{
3496 	  set_syntax_error (_("unknown relocation modifier"));
3497 	  return false;
3498 	}
3499 
3500       if (entry->add_type == 0)
3501 	{
3502 	  set_syntax_error
3503 	    (_("this relocation modifier is not allowed on this instruction"));
3504 	  return false;
3505 	}
3506 
3507       /* Save str before we decompose it.  */
3508       p = *str;
3509 
3510       /* Next, we parse the expression.  */
3511       if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3512 				    REJECT_ABSENT,
3513 				    aarch64_force_reloc (entry->add_type) == 1))
3514 	return false;
3515 
3516       /* Record the relocation type (use the ADD variant here).  */
3517       inst.reloc.type = entry->add_type;
3518       inst.reloc.pc_rel = entry->pc_rel;
3519 
3520       /* If str is empty, we've reached the end, stop here.  */
3521       if (**str == '\0')
3522 	return true;
3523 
3524       /* Otherwise, we have a shifted reloc modifier, so rewind to
3525          recover the variable name and continue parsing for the shifter.  */
3526       *str = p;
3527       return parse_shifter_operand_imm (str, operand, mode);
3528     }
3529 
3530   return parse_shifter_operand (str, operand, mode);
3531 }
3532 
3533 /* Parse all forms of an address expression.  Information is written
3534    to *OPERAND and/or inst.reloc.
3535 
3536    The A64 instruction set has the following addressing modes:
3537 
3538    Offset
3539      [base]			 // in SIMD ld/st structure
3540      [base{,#0}]		 // in ld/st exclusive
3541      [base{,#imm}]
3542      [base,Xm{,LSL #imm}]
3543      [base,Xm,SXTX {#imm}]
3544      [base,Wm,(S|U)XTW {#imm}]
3545    Pre-indexed
3546      [base]!                    // in ldraa/ldrab exclusive
3547      [base,#imm]!
3548    Post-indexed
3549      [base],#imm
3550      [base],Xm			 // in SIMD ld/st structure
3551    PC-relative (literal)
3552      label
3553    SVE:
3554      [base,#imm,MUL VL]
3555      [base,Zm.D{,LSL #imm}]
3556      [base,Zm.S,(S|U)XTW {#imm}]
3557      [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3558      [Zn.S,#imm]
3559      [Zn.D,#imm]
3560      [Zn.S{, Xm}]
3561      [Zn.S,Zm.S{,LSL #imm}]      // in ADR
3562      [Zn.D,Zm.D{,LSL #imm}]      // in ADR
3563      [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3564 
3565    (As a convenience, the notation "=immediate" is permitted in conjunction
3566    with the pc-relative literal load instructions to automatically place an
3567    immediate value or symbolic address in a nearby literal pool and generate
3568    a hidden label which references it.)
3569 
3570    Upon a successful parsing, the address structure in *OPERAND will be
3571    filled in the following way:
3572 
3573      .base_regno = <base>
3574      .offset.is_reg	// 1 if the offset is a register
3575      .offset.imm = <imm>
3576      .offset.regno = <Rm>
3577 
3578    For different addressing modes defined in the A64 ISA:
3579 
3580    Offset
3581      .pcrel=0; .preind=1; .postind=0; .writeback=0
3582    Pre-indexed
3583      .pcrel=0; .preind=1; .postind=0; .writeback=1
3584    Post-indexed
3585      .pcrel=0; .preind=0; .postind=1; .writeback=1
3586    PC-relative (literal)
3587      .pcrel=1; .preind=1; .postind=0; .writeback=0
3588 
3589    The shift/extension information, if any, will be stored in .shifter.
3590    The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3591    *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3592    corresponding register.
3593 
3594    BASE_TYPE says which types of base register should be accepted and
3595    OFFSET_TYPE says the same for offset registers.  IMM_SHIFT_MODE
3596    is the type of shifter that is allowed for immediate offsets,
3597    or SHIFTED_NONE if none.
3598 
3599    In all other respects, it is the caller's responsibility to check
3600    for addressing modes not supported by the instruction, and to set
3601    inst.reloc.type.  */
3602 
3603 static bool
parse_address_main(char ** str,aarch64_opnd_info * operand,aarch64_opnd_qualifier_t * base_qualifier,aarch64_opnd_qualifier_t * offset_qualifier,aarch64_reg_type base_type,aarch64_reg_type offset_type,enum parse_shift_mode imm_shift_mode)3604 parse_address_main (char **str, aarch64_opnd_info *operand,
3605 		    aarch64_opnd_qualifier_t *base_qualifier,
3606 		    aarch64_opnd_qualifier_t *offset_qualifier,
3607 		    aarch64_reg_type base_type, aarch64_reg_type offset_type,
3608 		    enum parse_shift_mode imm_shift_mode)
3609 {
3610   char *p = *str;
3611   const reg_entry *reg;
3612   expressionS *exp = &inst.reloc.exp;
3613 
3614   *base_qualifier = AARCH64_OPND_QLF_NIL;
3615   *offset_qualifier = AARCH64_OPND_QLF_NIL;
3616   if (! skip_past_char (&p, '['))
3617     {
3618       /* =immediate or label.  */
3619       operand->addr.pcrel = 1;
3620       operand->addr.preind = 1;
3621 
3622       /* #:<reloc_op>:<symbol>  */
3623       skip_past_char (&p, '#');
3624       if (skip_past_char (&p, ':'))
3625 	{
3626 	  bfd_reloc_code_real_type ty;
3627 	  struct reloc_table_entry *entry;
3628 
3629 	  /* Try to parse a relocation modifier.  Anything else is
3630 	     an error.  */
3631 	  entry = find_reloc_table_entry (&p);
3632 	  if (! entry)
3633 	    {
3634 	      set_syntax_error (_("unknown relocation modifier"));
3635 	      return false;
3636 	    }
3637 
3638 	  switch (operand->type)
3639 	    {
3640 	    case AARCH64_OPND_ADDR_PCREL21:
3641 	      /* adr */
3642 	      ty = entry->adr_type;
3643 	      break;
3644 
3645 	    default:
3646 	      ty = entry->ld_literal_type;
3647 	      break;
3648 	    }
3649 
3650 	  if (ty == 0)
3651 	    {
3652 	      set_syntax_error
3653 		(_("this relocation modifier is not allowed on this "
3654 		   "instruction"));
3655 	      return false;
3656 	    }
3657 
3658 	  /* #:<reloc_op>:  */
3659 	  if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3660 					aarch64_force_reloc (ty) == 1))
3661 	    {
3662 	      set_syntax_error (_("invalid relocation expression"));
3663 	      return false;
3664 	    }
3665 	  /* #:<reloc_op>:<expr>  */
3666 	  /* Record the relocation type.  */
3667 	  inst.reloc.type = ty;
3668 	  inst.reloc.pc_rel = entry->pc_rel;
3669 	}
3670       else
3671 	{
3672 	  if (skip_past_char (&p, '='))
3673 	    /* =immediate; need to generate the literal in the literal pool. */
3674 	    inst.gen_lit_pool = 1;
3675 
3676 	  if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3677 				       NORMAL_RESOLUTION))
3678 	    {
3679 	      set_syntax_error (_("invalid address"));
3680 	      return false;
3681 	    }
3682 	}
3683 
3684       *str = p;
3685       return true;
3686     }
3687 
3688   /* [ */
3689 
3690   reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3691   if (!reg || !aarch64_check_reg_type (reg, base_type))
3692     {
3693       set_syntax_error (_(get_reg_expected_msg (base_type)));
3694       return false;
3695     }
3696   operand->addr.base_regno = reg->number;
3697 
3698   /* [Xn */
3699   if (skip_past_comma (&p))
3700     {
3701       /* [Xn, */
3702       operand->addr.preind = 1;
3703 
3704       reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3705       if (reg)
3706 	{
3707 	  if (!aarch64_check_reg_type (reg, offset_type))
3708 	    {
3709 	      set_syntax_error (_(get_reg_expected_msg (offset_type)));
3710 	      return false;
3711 	    }
3712 
3713 	  /* [Xn,Rm  */
3714 	  operand->addr.offset.regno = reg->number;
3715 	  operand->addr.offset.is_reg = 1;
3716 	  /* Shifted index.  */
3717 	  if (skip_past_comma (&p))
3718 	    {
3719 	      /* [Xn,Rm,  */
3720 	      if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3721 		/* Use the diagnostics set in parse_shift, so not set new
3722 		   error message here.  */
3723 		return false;
3724 	    }
3725 	  /* We only accept:
3726 	     [base,Xm]  # For vector plus scalar SVE2 indexing.
3727 	     [base,Xm{,LSL #imm}]
3728 	     [base,Xm,SXTX {#imm}]
3729 	     [base,Wm,(S|U)XTW {#imm}]  */
3730 	  if (operand->shifter.kind == AARCH64_MOD_NONE
3731 	      || operand->shifter.kind == AARCH64_MOD_LSL
3732 	      || operand->shifter.kind == AARCH64_MOD_SXTX)
3733 	    {
3734 	      if (*offset_qualifier == AARCH64_OPND_QLF_W)
3735 		{
3736 		  set_syntax_error (_("invalid use of 32-bit register offset"));
3737 		  return false;
3738 		}
3739 	      if (aarch64_get_qualifier_esize (*base_qualifier)
3740 		  != aarch64_get_qualifier_esize (*offset_qualifier)
3741 		  && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3742 		      || *base_qualifier != AARCH64_OPND_QLF_S_S
3743 		      || *offset_qualifier != AARCH64_OPND_QLF_X))
3744 		{
3745 		  set_syntax_error (_("offset has different size from base"));
3746 		  return false;
3747 		}
3748 	    }
3749 	  else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3750 	    {
3751 	      set_syntax_error (_("invalid use of 64-bit register offset"));
3752 	      return false;
3753 	    }
3754 	}
3755       else
3756 	{
3757 	  /* [Xn,#:<reloc_op>:<symbol>  */
3758 	  skip_past_char (&p, '#');
3759 	  if (skip_past_char (&p, ':'))
3760 	    {
3761 	      struct reloc_table_entry *entry;
3762 
3763 	      /* Try to parse a relocation modifier.  Anything else is
3764 		 an error.  */
3765 	      if (!(entry = find_reloc_table_entry (&p)))
3766 		{
3767 		  set_syntax_error (_("unknown relocation modifier"));
3768 		  return false;
3769 		}
3770 
3771 	      if (entry->ldst_type == 0)
3772 		{
3773 		  set_syntax_error
3774 		    (_("this relocation modifier is not allowed on this "
3775 		       "instruction"));
3776 		  return false;
3777 		}
3778 
3779 	      /* [Xn,#:<reloc_op>:  */
3780 	      /* We now have the group relocation table entry corresponding to
3781 	         the name in the assembler source.  Next, we parse the
3782 	         expression.  */
3783 	      if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3784 					    aarch64_force_reloc (entry->ldst_type) == 1))
3785 		{
3786 		  set_syntax_error (_("invalid relocation expression"));
3787 		  return false;
3788 		}
3789 
3790 	      /* [Xn,#:<reloc_op>:<expr>  */
3791 	      /* Record the load/store relocation type.  */
3792 	      inst.reloc.type = entry->ldst_type;
3793 	      inst.reloc.pc_rel = entry->pc_rel;
3794 	    }
3795 	  else
3796 	    {
3797 	      if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3798 					    NORMAL_RESOLUTION))
3799 		{
3800 		  set_syntax_error (_("invalid expression in the address"));
3801 		  return false;
3802 		}
3803 	      /* [Xn,<expr>  */
3804 	      if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3805 		/* [Xn,<expr>,<shifter>  */
3806 		if (! parse_shift (&p, operand, imm_shift_mode))
3807 		  return false;
3808 	    }
3809 	}
3810     }
3811 
3812   if (! skip_past_char (&p, ']'))
3813     {
3814       set_syntax_error (_("']' expected"));
3815       return false;
3816     }
3817 
3818   if (skip_past_char (&p, '!'))
3819     {
3820       if (operand->addr.preind && operand->addr.offset.is_reg)
3821 	{
3822 	  set_syntax_error (_("register offset not allowed in pre-indexed "
3823 			      "addressing mode"));
3824 	  return false;
3825 	}
3826       /* [Xn]! */
3827       operand->addr.writeback = 1;
3828     }
3829   else if (skip_past_comma (&p))
3830     {
3831       /* [Xn], */
3832       operand->addr.postind = 1;
3833       operand->addr.writeback = 1;
3834 
3835       if (operand->addr.preind)
3836 	{
3837 	  set_syntax_error (_("cannot combine pre- and post-indexing"));
3838 	  return false;
3839 	}
3840 
3841       reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3842       if (reg)
3843 	{
3844 	  /* [Xn],Xm */
3845 	  if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3846 	    {
3847 	      set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3848 	      return false;
3849 	    }
3850 
3851 	  operand->addr.offset.regno = reg->number;
3852 	  operand->addr.offset.is_reg = 1;
3853 	}
3854       else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3855 					 NORMAL_RESOLUTION))
3856 	{
3857 	  /* [Xn],#expr */
3858 	  set_syntax_error (_("invalid expression in the address"));
3859 	  return false;
3860 	}
3861     }
3862 
3863   /* If at this point neither .preind nor .postind is set, we have a
3864      bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3865      ldrab, accept [Rn] as a shorthand for [Rn,#0].
3866      For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3867      [Zn.<T>, xzr].  */
3868   if (operand->addr.preind == 0 && operand->addr.postind == 0)
3869     {
3870       if (operand->addr.writeback)
3871 	{
3872 	  if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3873             {
3874               /* Accept [Rn]! as a shorthand for [Rn,#0]!   */
3875               operand->addr.offset.is_reg = 0;
3876               operand->addr.offset.imm = 0;
3877               operand->addr.preind = 1;
3878             }
3879           else
3880            {
3881 	     /* Reject [Rn]!   */
3882 	     set_syntax_error (_("missing offset in the pre-indexed address"));
3883 	     return false;
3884 	   }
3885 	}
3886        else
3887 	{
3888           operand->addr.preind = 1;
3889           if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3890 	   {
3891 	     operand->addr.offset.is_reg = 1;
3892 	     operand->addr.offset.regno = REG_ZR;
3893 	     *offset_qualifier = AARCH64_OPND_QLF_X;
3894  	   }
3895           else
3896 	   {
3897 	     inst.reloc.exp.X_op = O_constant;
3898 	     inst.reloc.exp.X_add_number = 0;
3899 	   }
3900 	}
3901     }
3902 
3903   *str = p;
3904   return true;
3905 }
3906 
3907 /* Parse a base AArch64 address (as opposed to an SVE one).  Return TRUE
3908    on success.  */
3909 static bool
parse_address(char ** str,aarch64_opnd_info * operand)3910 parse_address (char **str, aarch64_opnd_info *operand)
3911 {
3912   aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3913   return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3914 			     REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3915 }
3916 
3917 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3918    The arguments have the same meaning as for parse_address_main.
3919    Return TRUE on success.  */
3920 static bool
parse_sve_address(char ** str,aarch64_opnd_info * operand,aarch64_opnd_qualifier_t * base_qualifier,aarch64_opnd_qualifier_t * offset_qualifier)3921 parse_sve_address (char **str, aarch64_opnd_info *operand,
3922 		   aarch64_opnd_qualifier_t *base_qualifier,
3923 		   aarch64_opnd_qualifier_t *offset_qualifier)
3924 {
3925   return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3926 			     REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3927 			     SHIFTED_MUL_VL);
3928 }
3929 
3930 /* Parse a register X0-X30.  The register must be 64-bit and register 31
3931    is unallocated.  */
3932 static bool
parse_x0_to_x30(char ** str,aarch64_opnd_info * operand)3933 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3934 {
3935   const reg_entry *reg = parse_reg (str);
3936   if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3937     {
3938       set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3939       return false;
3940     }
3941   operand->reg.regno = reg->number;
3942   operand->qualifier = AARCH64_OPND_QLF_X;
3943   return true;
3944 }
3945 
3946 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3947    Return TRUE on success; otherwise return FALSE.  */
3948 static bool
parse_half(char ** str,int * internal_fixup_p)3949 parse_half (char **str, int *internal_fixup_p)
3950 {
3951   char *p = *str;
3952 
3953   skip_past_char (&p, '#');
3954 
3955   gas_assert (internal_fixup_p);
3956   *internal_fixup_p = 0;
3957 
3958   if (*p == ':')
3959     {
3960       struct reloc_table_entry *entry;
3961 
3962       /* Try to parse a relocation.  Anything else is an error.  */
3963       ++p;
3964 
3965       if (!(entry = find_reloc_table_entry (&p)))
3966 	{
3967 	  set_syntax_error (_("unknown relocation modifier"));
3968 	  return false;
3969 	}
3970 
3971       if (entry->movw_type == 0)
3972 	{
3973 	  set_syntax_error
3974 	    (_("this relocation modifier is not allowed on this instruction"));
3975 	  return false;
3976 	}
3977 
3978       inst.reloc.type = entry->movw_type;
3979     }
3980   else
3981     *internal_fixup_p = 1;
3982 
3983   if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3984 				aarch64_force_reloc (inst.reloc.type) == 1))
3985     return false;
3986 
3987   *str = p;
3988   return true;
3989 }
3990 
3991 /* Parse an operand for an ADRP instruction:
3992      ADRP <Xd>, <label>
3993    Return TRUE on success; otherwise return FALSE.  */
3994 
3995 static bool
parse_adrp(char ** str)3996 parse_adrp (char **str)
3997 {
3998   char *p;
3999 
4000   p = *str;
4001   if (*p == ':')
4002     {
4003       struct reloc_table_entry *entry;
4004 
4005       /* Try to parse a relocation.  Anything else is an error.  */
4006       ++p;
4007       if (!(entry = find_reloc_table_entry (&p)))
4008 	{
4009 	  set_syntax_error (_("unknown relocation modifier"));
4010 	  return false;
4011 	}
4012 
4013       if (entry->adrp_type == 0)
4014 	{
4015 	  set_syntax_error
4016 	    (_("this relocation modifier is not allowed on this instruction"));
4017 	  return false;
4018 	}
4019 
4020       inst.reloc.type = entry->adrp_type;
4021     }
4022   else
4023     inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4024 
4025   inst.reloc.pc_rel = 1;
4026   if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4027 				aarch64_force_reloc (inst.reloc.type) == 1))
4028     return false;
4029   *str = p;
4030   return true;
4031 }
4032 
4033 /* Miscellaneous. */
4034 
4035 /* Parse a symbolic operand such as "pow2" at *STR.  ARRAY is an array
4036    of SIZE tokens in which index I gives the token for field value I,
4037    or is null if field value I is invalid.  REG_TYPE says which register
4038    names should be treated as registers rather than as symbolic immediates.
4039 
4040    Return true on success, moving *STR past the operand and storing the
4041    field value in *VAL.  */
4042 
4043 static int
parse_enum_string(char ** str,int64_t * val,const char * const * array,size_t size,aarch64_reg_type reg_type)4044 parse_enum_string (char **str, int64_t *val, const char *const *array,
4045 		   size_t size, aarch64_reg_type reg_type)
4046 {
4047   expressionS exp;
4048   char *p, *q;
4049   size_t i;
4050 
4051   /* Match C-like tokens.  */
4052   p = q = *str;
4053   while (ISALNUM (*q))
4054     q++;
4055 
4056   for (i = 0; i < size; ++i)
4057     if (array[i]
4058 	&& strncasecmp (array[i], p, q - p) == 0
4059 	&& array[i][q - p] == 0)
4060       {
4061 	*val = i;
4062 	*str = q;
4063 	return true;
4064       }
4065 
4066   if (!parse_immediate_expression (&p, &exp, reg_type))
4067     return false;
4068 
4069   if (exp.X_op == O_constant
4070       && (uint64_t) exp.X_add_number < size)
4071     {
4072       *val = exp.X_add_number;
4073       *str = p;
4074       return true;
4075     }
4076 
4077   /* Use the default error for this operand.  */
4078   return false;
4079 }
4080 
4081 /* Parse an option for a preload instruction.  Returns the encoding for the
4082    option, or PARSE_FAIL.  */
4083 
4084 static int
parse_pldop(char ** str)4085 parse_pldop (char **str)
4086 {
4087   char *p, *q;
4088   const struct aarch64_name_value_pair *o;
4089 
4090   p = q = *str;
4091   while (ISALNUM (*q))
4092     q++;
4093 
4094   o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4095   if (!o)
4096     return PARSE_FAIL;
4097 
4098   *str = q;
4099   return o->value;
4100 }
4101 
4102 /* Parse an option for a barrier instruction.  Returns the encoding for the
4103    option, or PARSE_FAIL.  */
4104 
4105 static int
parse_barrier(char ** str)4106 parse_barrier (char **str)
4107 {
4108   char *p, *q;
4109   const struct aarch64_name_value_pair *o;
4110 
4111   p = q = *str;
4112   while (ISALPHA (*q))
4113     q++;
4114 
4115   o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4116   if (!o)
4117     return PARSE_FAIL;
4118 
4119   *str = q;
4120   return o->value;
4121 }
4122 
4123 /* Parse an operand for a PSB barrier.  Set *HINT_OPT to the hint-option record
4124    return 0 if successful.  Otherwise return PARSE_FAIL.  */
4125 
4126 static int
parse_barrier_psb(char ** str,const struct aarch64_name_value_pair ** hint_opt)4127 parse_barrier_psb (char **str,
4128 		   const struct aarch64_name_value_pair ** hint_opt)
4129 {
4130   char *p, *q;
4131   const struct aarch64_name_value_pair *o;
4132 
4133   p = q = *str;
4134   while (ISALPHA (*q))
4135     q++;
4136 
4137   o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4138   if (!o)
4139     {
4140       set_fatal_syntax_error
4141 	( _("unknown or missing option to PSB/TSB"));
4142       return PARSE_FAIL;
4143     }
4144 
4145   if (o->value != 0x11)
4146     {
4147       /* PSB only accepts option name 'CSYNC'.  */
4148       set_syntax_error
4149 	(_("the specified option is not accepted for PSB/TSB"));
4150       return PARSE_FAIL;
4151     }
4152 
4153   *str = q;
4154   *hint_opt = o;
4155   return 0;
4156 }
4157 
4158 /* Parse an operand for BTI.  Set *HINT_OPT to the hint-option record
4159    return 0 if successful.  Otherwise return PARSE_FAIL.  */
4160 
4161 static int
parse_bti_operand(char ** str,const struct aarch64_name_value_pair ** hint_opt)4162 parse_bti_operand (char **str,
4163 		   const struct aarch64_name_value_pair ** hint_opt)
4164 {
4165   char *p, *q;
4166   const struct aarch64_name_value_pair *o;
4167 
4168   p = q = *str;
4169   while (ISALPHA (*q))
4170     q++;
4171 
4172   o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4173   if (!o)
4174     {
4175       set_fatal_syntax_error
4176 	( _("unknown option to BTI"));
4177       return PARSE_FAIL;
4178     }
4179 
4180   switch (o->value)
4181     {
4182     /* Valid BTI operands.  */
4183     case HINT_OPD_C:
4184     case HINT_OPD_J:
4185     case HINT_OPD_JC:
4186       break;
4187 
4188     default:
4189       set_syntax_error
4190 	(_("unknown option to BTI"));
4191       return PARSE_FAIL;
4192     }
4193 
4194   *str = q;
4195   *hint_opt = o;
4196   return 0;
4197 }
4198 
4199 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4200    Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4201    on failure. Format:
4202 
4203      REG_TYPE.QUALIFIER
4204 
4205    Side effect: Update STR with current parse position of success.
4206 */
4207 
4208 static const reg_entry *
parse_reg_with_qual(char ** str,aarch64_reg_type reg_type,aarch64_opnd_qualifier_t * qualifier)4209 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4210                      aarch64_opnd_qualifier_t *qualifier)
4211 {
4212   char *q;
4213 
4214   reg_entry *reg = parse_reg (str);
4215   if (reg != NULL && reg->type == reg_type)
4216     {
4217       if (!skip_past_char (str, '.'))
4218         {
4219           set_syntax_error (_("missing ZA tile element size separator"));
4220           return NULL;
4221         }
4222 
4223       q = *str;
4224       switch (TOLOWER (*q))
4225         {
4226         case 'b':
4227           *qualifier = AARCH64_OPND_QLF_S_B;
4228           break;
4229         case 'h':
4230           *qualifier = AARCH64_OPND_QLF_S_H;
4231           break;
4232         case 's':
4233           *qualifier = AARCH64_OPND_QLF_S_S;
4234           break;
4235         case 'd':
4236           *qualifier = AARCH64_OPND_QLF_S_D;
4237           break;
4238         case 'q':
4239           *qualifier = AARCH64_OPND_QLF_S_Q;
4240           break;
4241         default:
4242           return NULL;
4243         }
4244       q++;
4245 
4246       *str = q;
4247       return reg;
4248     }
4249 
4250   return NULL;
4251 }
4252 
4253 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4254    Function return tile QUALIFIER on success.
4255 
4256    Tiles are in example format: za[0-9]\.[bhsd]
4257 
4258    Function returns <ZAda> register number or PARSE_FAIL.
4259 */
4260 static int
parse_sme_zada_operand(char ** str,aarch64_opnd_qualifier_t * qualifier)4261 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4262 {
4263   int regno;
4264   const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4265 
4266   if (reg == NULL)
4267     return PARSE_FAIL;
4268   regno = reg->number;
4269 
4270   switch (*qualifier)
4271     {
4272     case AARCH64_OPND_QLF_S_B:
4273       if (regno != 0x00)
4274       {
4275         set_syntax_error (_("invalid ZA tile register number, expected za0"));
4276         return PARSE_FAIL;
4277       }
4278       break;
4279     case AARCH64_OPND_QLF_S_H:
4280       if (regno > 0x01)
4281       {
4282         set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4283         return PARSE_FAIL;
4284       }
4285       break;
4286     case AARCH64_OPND_QLF_S_S:
4287       if (regno > 0x03)
4288       {
4289         /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3.  */
4290         set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4291         return PARSE_FAIL;
4292       }
4293       break;
4294     case AARCH64_OPND_QLF_S_D:
4295       if (regno > 0x07)
4296       {
4297         /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7  */
4298         set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4299         return PARSE_FAIL;
4300       }
4301       break;
4302     default:
4303       set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4304       return PARSE_FAIL;
4305     }
4306 
4307   return regno;
4308 }
4309 
4310 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4311 
4312      #<imm>
4313      <imm>
4314 
4315   Function return TRUE if immediate was found, or FALSE.
4316 */
4317 static bool
parse_sme_immediate(char ** str,int64_t * imm)4318 parse_sme_immediate (char **str, int64_t *imm)
4319 {
4320   int64_t val;
4321   if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4322     return false;
4323 
4324   *imm = val;
4325   return true;
4326 }
4327 
4328 /* Parse index with vector select register and immediate:
4329 
4330    [<Wv>, <imm>]
4331    [<Wv>, #<imm>]
4332    where <Wv> is in W12-W15 range and # is optional for immediate.
4333 
4334    Function performs extra check for mandatory immediate value if REQUIRE_IMM
4335    is set to true.
4336 
4337    On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4338    IMM output.
4339 */
4340 static bool
parse_sme_za_hv_tiles_operand_index(char ** str,int * vector_select_register,int64_t * imm)4341 parse_sme_za_hv_tiles_operand_index (char **str,
4342                                      int *vector_select_register,
4343                                      int64_t *imm)
4344 {
4345   const reg_entry *reg;
4346 
4347   if (!skip_past_char (str, '['))
4348     {
4349       set_syntax_error (_("expected '['"));
4350       return false;
4351     }
4352 
4353   /* Vector select register W12-W15 encoded in the 2-bit Rv field.  */
4354   reg = parse_reg (str);
4355   if (reg == NULL || reg->type != REG_TYPE_R_32
4356       || reg->number < 12 || reg->number > 15)
4357     {
4358       set_syntax_error (_("expected vector select register W12-W15"));
4359       return false;
4360     }
4361   *vector_select_register = reg->number;
4362 
4363   if (!skip_past_char (str, ','))    /* Optional index offset immediate.  */
4364     {
4365       set_syntax_error (_("expected ','"));
4366       return false;
4367     }
4368 
4369   if (!parse_sme_immediate (str, imm))
4370     {
4371       set_syntax_error (_("index offset immediate expected"));
4372       return false;
4373     }
4374 
4375   if (!skip_past_char (str, ']'))
4376     {
4377       set_syntax_error (_("expected ']'"));
4378       return false;
4379     }
4380 
4381   return true;
4382 }
4383 
4384 /* Parse SME ZA horizontal or vertical vector access to tiles.
4385    Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4386    vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4387    contains <Wv> select register and corresponding optional IMMEDIATE.
4388    In addition QUALIFIER is extracted.
4389 
4390    Field format examples:
4391 
4392    ZA0<HV>.B[<Wv>, #<imm>]
4393    <ZAn><HV>.H[<Wv>, #<imm>]
4394    <ZAn><HV>.S[<Wv>, #<imm>]
4395    <ZAn><HV>.D[<Wv>, #<imm>]
4396    <ZAn><HV>.Q[<Wv>, #<imm>]
4397 
4398    Function returns <ZAda> register number or PARSE_FAIL.
4399 */
4400 static int
parse_sme_za_hv_tiles_operand(char ** str,enum sme_hv_slice * slice_indicator,int * vector_select_register,int * imm,aarch64_opnd_qualifier_t * qualifier)4401 parse_sme_za_hv_tiles_operand (char **str,
4402                                enum sme_hv_slice *slice_indicator,
4403                                int *vector_select_register,
4404                                int *imm,
4405                                aarch64_opnd_qualifier_t *qualifier)
4406 {
4407   char *qh, *qv;
4408   int regno;
4409   int regno_limit;
4410   int64_t imm_limit;
4411   int64_t imm_value;
4412   const reg_entry *reg;
4413 
4414   qh = qv = *str;
4415   if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4416     {
4417       *slice_indicator = HV_horizontal;
4418       *str = qh;
4419     }
4420   else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4421     {
4422       *slice_indicator = HV_vertical;
4423       *str = qv;
4424     }
4425   else
4426     return PARSE_FAIL;
4427   regno = reg->number;
4428 
4429   switch (*qualifier)
4430     {
4431     case AARCH64_OPND_QLF_S_B:
4432       regno_limit = 0;
4433       imm_limit = 15;
4434       break;
4435     case AARCH64_OPND_QLF_S_H:
4436       regno_limit = 1;
4437       imm_limit = 7;
4438       break;
4439     case AARCH64_OPND_QLF_S_S:
4440       regno_limit = 3;
4441       imm_limit = 3;
4442       break;
4443     case AARCH64_OPND_QLF_S_D:
4444       regno_limit = 7;
4445       imm_limit = 1;
4446       break;
4447     case AARCH64_OPND_QLF_S_Q:
4448       regno_limit = 15;
4449       imm_limit = 0;
4450       break;
4451     default:
4452       set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4453       return PARSE_FAIL;
4454     }
4455 
4456   /* Check if destination register ZA tile vector is in range for given
4457      instruction variant.  */
4458   if (regno < 0 || regno > regno_limit)
4459     {
4460       set_syntax_error (_("ZA tile vector out of range"));
4461       return PARSE_FAIL;
4462     }
4463 
4464   if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4465                                             &imm_value))
4466     return PARSE_FAIL;
4467 
4468   /* Check if optional index offset is in the range for instruction
4469      variant.  */
4470   if (imm_value < 0 || imm_value > imm_limit)
4471     {
4472       set_syntax_error (_("index offset out of range"));
4473       return PARSE_FAIL;
4474     }
4475 
4476   *imm = imm_value;
4477 
4478   return regno;
4479 }
4480 
4481 
4482 static int
parse_sme_za_hv_tiles_operand_with_braces(char ** str,enum sme_hv_slice * slice_indicator,int * vector_select_register,int * imm,aarch64_opnd_qualifier_t * qualifier)4483 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4484                                            enum sme_hv_slice *slice_indicator,
4485                                            int *vector_select_register,
4486                                            int *imm,
4487                                            aarch64_opnd_qualifier_t *qualifier)
4488 {
4489   int regno;
4490 
4491   if (!skip_past_char (str, '{'))
4492     {
4493       set_syntax_error (_("expected '{'"));
4494       return PARSE_FAIL;
4495     }
4496 
4497   regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4498                                          vector_select_register, imm,
4499                                          qualifier);
4500 
4501   if (regno == PARSE_FAIL)
4502     return PARSE_FAIL;
4503 
4504   if (!skip_past_char (str, '}'))
4505     {
4506       set_syntax_error (_("expected '}'"));
4507       return PARSE_FAIL;
4508     }
4509 
4510   return regno;
4511 }
4512 
4513 /* Parse list of up to eight 64-bit element tile names separated by commas in
4514    SME's ZERO instruction:
4515 
4516      ZERO { <mask> }
4517 
4518    Function returns <mask>:
4519 
4520      an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4521 */
4522 static int
parse_sme_zero_mask(char ** str)4523 parse_sme_zero_mask(char **str)
4524 {
4525   char *q;
4526   int mask;
4527   aarch64_opnd_qualifier_t qualifier;
4528 
4529   mask = 0x00;
4530   q = *str;
4531   do
4532     {
4533       const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4534       if (reg)
4535         {
4536           int regno = reg->number;
4537           if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4538             {
4539               /* { ZA0.B } is assembled as all-ones immediate.  */
4540               mask = 0xff;
4541             }
4542           else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4543             mask |= 0x55 << regno;
4544           else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4545             mask |= 0x11 << regno;
4546           else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4547             mask |= 0x01 << regno;
4548           else
4549             {
4550               set_syntax_error (_("wrong ZA tile element format"));
4551               return PARSE_FAIL;
4552             }
4553           continue;
4554         }
4555       else if (strncasecmp (q, "za", 2) == 0
4556                && !ISALNUM (q[2]))
4557         {
4558           /* { ZA } is assembled as all-ones immediate.  */
4559           mask = 0xff;
4560           q += 2;
4561           continue;
4562         }
4563       else
4564         {
4565           set_syntax_error (_("wrong ZA tile element format"));
4566           return PARSE_FAIL;
4567         }
4568     }
4569   while (skip_past_char (&q, ','));
4570 
4571   *str = q;
4572   return mask;
4573 }
4574 
4575 /* Wraps in curly braces <mask> operand ZERO instruction:
4576 
4577    ZERO { <mask> }
4578 
4579    Function returns value of <mask> bit-field.
4580 */
4581 static int
parse_sme_list_of_64bit_tiles(char ** str)4582 parse_sme_list_of_64bit_tiles (char **str)
4583 {
4584   int regno;
4585 
4586   if (!skip_past_char (str, '{'))
4587     {
4588       set_syntax_error (_("expected '{'"));
4589       return PARSE_FAIL;
4590     }
4591 
4592   /* Empty <mask> list is an all-zeros immediate.  */
4593   if (!skip_past_char (str, '}'))
4594     {
4595       regno = parse_sme_zero_mask (str);
4596       if (regno == PARSE_FAIL)
4597          return PARSE_FAIL;
4598 
4599       if (!skip_past_char (str, '}'))
4600         {
4601           set_syntax_error (_("expected '}'"));
4602           return PARSE_FAIL;
4603         }
4604     }
4605   else
4606     regno = 0x00;
4607 
4608   return regno;
4609 }
4610 
4611 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4612    Operand format:
4613 
4614    ZA[<Wv>, <imm>]
4615    ZA[<Wv>, #<imm>]
4616 
4617    Function returns <Wv> or PARSE_FAIL.
4618 */
4619 static int
parse_sme_za_array(char ** str,int * imm)4620 parse_sme_za_array (char **str, int *imm)
4621 {
4622   char *p, *q;
4623   int regno;
4624   int64_t imm_value;
4625 
4626   p = q = *str;
4627   while (ISALPHA (*q))
4628     q++;
4629 
4630   if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4631     {
4632       set_syntax_error (_("expected ZA array"));
4633       return PARSE_FAIL;
4634     }
4635 
4636   if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4637     return PARSE_FAIL;
4638 
4639   if (imm_value < 0 || imm_value > 15)
4640     {
4641       set_syntax_error (_("offset out of range"));
4642       return PARSE_FAIL;
4643     }
4644 
4645   *imm = imm_value;
4646   *str = q;
4647   return regno;
4648 }
4649 
4650 /* Parse streaming mode operand for SMSTART and SMSTOP.
4651 
4652    {SM | ZA}
4653 
4654    Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4655 */
4656 static int
parse_sme_sm_za(char ** str)4657 parse_sme_sm_za (char **str)
4658 {
4659   char *p, *q;
4660 
4661   p = q = *str;
4662   while (ISALPHA (*q))
4663     q++;
4664 
4665   if ((q - p != 2)
4666       || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4667     {
4668       set_syntax_error (_("expected SM or ZA operand"));
4669       return PARSE_FAIL;
4670     }
4671 
4672   *str = q;
4673   return TOLOWER (p[0]);
4674 }
4675 
4676 /* Parse the name of the source scalable predicate register, the index base
4677    register W12-W15 and the element index. Function performs element index
4678    limit checks as well as qualifier type checks.
4679 
4680    <Pn>.<T>[<Wv>, <imm>]
4681    <Pn>.<T>[<Wv>, #<imm>]
4682 
4683    On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4684    <imm> to IMM.
4685    Function returns <Pn>, or PARSE_FAIL.
4686 */
4687 static int
parse_sme_pred_reg_with_index(char ** str,int * index_base_reg,int * imm,aarch64_opnd_qualifier_t * qualifier)4688 parse_sme_pred_reg_with_index(char **str,
4689                               int *index_base_reg,
4690                               int *imm,
4691                               aarch64_opnd_qualifier_t *qualifier)
4692 {
4693   int regno;
4694   int64_t imm_limit;
4695   int64_t imm_value;
4696   const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4697 
4698   if (reg == NULL)
4699     return PARSE_FAIL;
4700   regno = reg->number;
4701 
4702   switch (*qualifier)
4703     {
4704     case AARCH64_OPND_QLF_S_B:
4705       imm_limit = 15;
4706       break;
4707     case AARCH64_OPND_QLF_S_H:
4708       imm_limit = 7;
4709       break;
4710     case AARCH64_OPND_QLF_S_S:
4711       imm_limit = 3;
4712       break;
4713     case AARCH64_OPND_QLF_S_D:
4714       imm_limit = 1;
4715       break;
4716     default:
4717       set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4718       return PARSE_FAIL;
4719     }
4720 
4721   if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4722     return PARSE_FAIL;
4723 
4724   if (imm_value < 0 || imm_value > imm_limit)
4725     {
4726       set_syntax_error (_("element index out of range for given variant"));
4727       return PARSE_FAIL;
4728     }
4729 
4730   *imm = imm_value;
4731 
4732   return regno;
4733 }
4734 
4735 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4736    Returns the encoding for the option, or PARSE_FAIL.
4737 
4738    If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4739    implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4740 
4741    If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4742    field, otherwise as a system register.
4743 */
4744 
4745 static int
parse_sys_reg(char ** str,htab_t sys_regs,int imple_defined_p,int pstatefield_p,uint32_t * flags)4746 parse_sys_reg (char **str, htab_t sys_regs,
4747 	       int imple_defined_p, int pstatefield_p,
4748 	       uint32_t* flags)
4749 {
4750   char *p, *q;
4751   char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4752   const aarch64_sys_reg *o;
4753   int value;
4754 
4755   p = buf;
4756   for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4757     if (p < buf + (sizeof (buf) - 1))
4758       *p++ = TOLOWER (*q);
4759   *p = '\0';
4760 
4761   /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4762      valid system register.  This is enforced by construction of the hash
4763      table.  */
4764   if (p - buf != q - *str)
4765     return PARSE_FAIL;
4766 
4767   o = str_hash_find (sys_regs, buf);
4768   if (!o)
4769     {
4770       if (!imple_defined_p)
4771 	return PARSE_FAIL;
4772       else
4773 	{
4774 	  /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>.  */
4775 	  unsigned int op0, op1, cn, cm, op2;
4776 
4777 	  if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4778 	      != 5)
4779 	    return PARSE_FAIL;
4780 	  if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4781 	    return PARSE_FAIL;
4782 	  value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4783 	  if (flags)
4784 	    *flags = 0;
4785 	}
4786     }
4787   else
4788     {
4789       if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4790 	as_bad (_("selected processor does not support PSTATE field "
4791 		  "name '%s'"), buf);
4792       if (!pstatefield_p
4793 	  && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4794 					       o->value, o->flags, o->features))
4795 	as_bad (_("selected processor does not support system register "
4796 		  "name '%s'"), buf);
4797       if (aarch64_sys_reg_deprecated_p (o->flags))
4798 	as_warn (_("system register name '%s' is deprecated and may be "
4799 		   "removed in a future release"), buf);
4800       value = o->value;
4801       if (flags)
4802 	*flags = o->flags;
4803     }
4804 
4805   *str = q;
4806   return value;
4807 }
4808 
4809 /* Parse a system reg for ic/dc/at/tlbi instructions.  Returns the table entry
4810    for the option, or NULL.  */
4811 
4812 static const aarch64_sys_ins_reg *
parse_sys_ins_reg(char ** str,htab_t sys_ins_regs)4813 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4814 {
4815   char *p, *q;
4816   char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4817   const aarch64_sys_ins_reg *o;
4818 
4819   p = buf;
4820   for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4821     if (p < buf + (sizeof (buf) - 1))
4822       *p++ = TOLOWER (*q);
4823   *p = '\0';
4824 
4825   /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4826      valid system register.  This is enforced by construction of the hash
4827      table.  */
4828   if (p - buf != q - *str)
4829     return NULL;
4830 
4831   o = str_hash_find (sys_ins_regs, buf);
4832   if (!o)
4833     return NULL;
4834 
4835   if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4836 					o->name, o->value, o->flags, 0))
4837     as_bad (_("selected processor does not support system register "
4838 	      "name '%s'"), buf);
4839   if (aarch64_sys_reg_deprecated_p (o->flags))
4840     as_warn (_("system register name '%s' is deprecated and may be "
4841           "removed in a future release"), buf);
4842 
4843   *str = q;
4844   return o;
4845 }
4846 
4847 #define po_char_or_fail(chr) do {				\
4848     if (! skip_past_char (&str, chr))				\
4849       goto failure;						\
4850 } while (0)
4851 
4852 #define po_reg_or_fail(regtype) do {				\
4853     val = aarch64_reg_parse (&str, regtype, &rtype, NULL);	\
4854     if (val == PARSE_FAIL)					\
4855       {								\
4856 	set_default_error ();					\
4857 	goto failure;						\
4858       }								\
4859   } while (0)
4860 
4861 #define po_int_reg_or_fail(reg_type) do {			\
4862     reg = aarch64_reg_parse_32_64 (&str, &qualifier);		\
4863     if (!reg || !aarch64_check_reg_type (reg, reg_type))	\
4864       {								\
4865 	set_default_error ();					\
4866 	goto failure;						\
4867       }								\
4868     info->reg.regno = reg->number;				\
4869     info->qualifier = qualifier;				\
4870   } while (0)
4871 
4872 #define po_imm_nc_or_fail() do {				\
4873     if (! parse_constant_immediate (&str, &val, imm_reg_type))	\
4874       goto failure;						\
4875   } while (0)
4876 
4877 #define po_imm_or_fail(min, max) do {				\
4878     if (! parse_constant_immediate (&str, &val, imm_reg_type))	\
4879       goto failure;						\
4880     if (val < min || val > max)					\
4881       {								\
4882 	set_fatal_syntax_error (_("immediate value out of range "\
4883 #min " to "#max));						\
4884 	goto failure;						\
4885       }								\
4886   } while (0)
4887 
4888 #define po_enum_or_fail(array) do {				\
4889     if (!parse_enum_string (&str, &val, array,			\
4890 			    ARRAY_SIZE (array), imm_reg_type))	\
4891       goto failure;						\
4892   } while (0)
4893 
4894 #define po_misc_or_fail(expr) do {				\
4895     if (!expr)							\
4896       goto failure;						\
4897   } while (0)
4898 
4899 /* encode the 12-bit imm field of Add/sub immediate */
4900 static inline uint32_t
encode_addsub_imm(uint32_t imm)4901 encode_addsub_imm (uint32_t imm)
4902 {
4903   return imm << 10;
4904 }
4905 
4906 /* encode the shift amount field of Add/sub immediate */
4907 static inline uint32_t
encode_addsub_imm_shift_amount(uint32_t cnt)4908 encode_addsub_imm_shift_amount (uint32_t cnt)
4909 {
4910   return cnt << 22;
4911 }
4912 
4913 
4914 /* encode the imm field of Adr instruction */
4915 static inline uint32_t
encode_adr_imm(uint32_t imm)4916 encode_adr_imm (uint32_t imm)
4917 {
4918   return (((imm & 0x3) << 29)	/*  [1:0] -> [30:29] */
4919 	  | ((imm & (0x7ffff << 2)) << 3));	/* [20:2] -> [23:5]  */
4920 }
4921 
4922 /* encode the immediate field of Move wide immediate */
4923 static inline uint32_t
encode_movw_imm(uint32_t imm)4924 encode_movw_imm (uint32_t imm)
4925 {
4926   return imm << 5;
4927 }
4928 
4929 /* encode the 26-bit offset of unconditional branch */
4930 static inline uint32_t
encode_branch_ofs_26(uint32_t ofs)4931 encode_branch_ofs_26 (uint32_t ofs)
4932 {
4933   return ofs & ((1 << 26) - 1);
4934 }
4935 
4936 /* encode the 19-bit offset of conditional branch and compare & branch */
4937 static inline uint32_t
encode_cond_branch_ofs_19(uint32_t ofs)4938 encode_cond_branch_ofs_19 (uint32_t ofs)
4939 {
4940   return (ofs & ((1 << 19) - 1)) << 5;
4941 }
4942 
4943 /* encode the 19-bit offset of ld literal */
4944 static inline uint32_t
encode_ld_lit_ofs_19(uint32_t ofs)4945 encode_ld_lit_ofs_19 (uint32_t ofs)
4946 {
4947   return (ofs & ((1 << 19) - 1)) << 5;
4948 }
4949 
4950 /* Encode the 14-bit offset of test & branch.  */
4951 static inline uint32_t
encode_tst_branch_ofs_14(uint32_t ofs)4952 encode_tst_branch_ofs_14 (uint32_t ofs)
4953 {
4954   return (ofs & ((1 << 14) - 1)) << 5;
4955 }
4956 
4957 /* Encode the 16-bit imm field of svc/hvc/smc.  */
4958 static inline uint32_t
encode_svc_imm(uint32_t imm)4959 encode_svc_imm (uint32_t imm)
4960 {
4961   return imm << 5;
4962 }
4963 
4964 /* Reencode add(s) to sub(s), or sub(s) to add(s).  */
4965 static inline uint32_t
reencode_addsub_switch_add_sub(uint32_t opcode)4966 reencode_addsub_switch_add_sub (uint32_t opcode)
4967 {
4968   return opcode ^ (1 << 30);
4969 }
4970 
4971 static inline uint32_t
reencode_movzn_to_movz(uint32_t opcode)4972 reencode_movzn_to_movz (uint32_t opcode)
4973 {
4974   return opcode | (1 << 30);
4975 }
4976 
4977 static inline uint32_t
reencode_movzn_to_movn(uint32_t opcode)4978 reencode_movzn_to_movn (uint32_t opcode)
4979 {
4980   return opcode & ~(1 << 30);
4981 }
4982 
4983 /* Overall per-instruction processing.	*/
4984 
4985 /* We need to be able to fix up arbitrary expressions in some statements.
4986    This is so that we can handle symbols that are an arbitrary distance from
4987    the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4988    which returns part of an address in a form which will be valid for
4989    a data instruction.	We do this by pushing the expression into a symbol
4990    in the expr_section, and creating a fix for that.  */
4991 
4992 static fixS *
fix_new_aarch64(fragS * frag,int where,short int size,expressionS * exp,int pc_rel,int reloc)4993 fix_new_aarch64 (fragS * frag,
4994 		 int where,
4995 		 short int size,
4996 		 expressionS * exp,
4997 		 int pc_rel,
4998 		 int reloc)
4999 {
5000   fixS *new_fix;
5001 
5002   switch (exp->X_op)
5003     {
5004     case O_constant:
5005     case O_symbol:
5006     case O_add:
5007     case O_subtract:
5008       new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5009       break;
5010 
5011     default:
5012       new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5013 			 pc_rel, reloc);
5014       break;
5015     }
5016   return new_fix;
5017 }
5018 
5019 /* Diagnostics on operands errors.  */
5020 
5021 /* By default, output verbose error message.
5022    Disable the verbose error message by -mno-verbose-error.  */
5023 static int verbose_error_p = 1;
5024 
5025 #ifdef DEBUG_AARCH64
5026 /* N.B. this is only for the purpose of debugging.  */
5027 const char* operand_mismatch_kind_names[] =
5028 {
5029   "AARCH64_OPDE_NIL",
5030   "AARCH64_OPDE_RECOVERABLE",
5031   "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5032   "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5033   "AARCH64_OPDE_SYNTAX_ERROR",
5034   "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5035   "AARCH64_OPDE_INVALID_VARIANT",
5036   "AARCH64_OPDE_OUT_OF_RANGE",
5037   "AARCH64_OPDE_UNALIGNED",
5038   "AARCH64_OPDE_REG_LIST",
5039   "AARCH64_OPDE_OTHER_ERROR",
5040 };
5041 #endif /* DEBUG_AARCH64 */
5042 
5043 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5044 
5045    When multiple errors of different kinds are found in the same assembly
5046    line, only the error of the highest severity will be picked up for
5047    issuing the diagnostics.  */
5048 
5049 static inline bool
operand_error_higher_severity_p(enum aarch64_operand_error_kind lhs,enum aarch64_operand_error_kind rhs)5050 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5051 				 enum aarch64_operand_error_kind rhs)
5052 {
5053   gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5054   gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5055   gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5056   gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5057   gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5058   gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5059   gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5060   gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5061   gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5062   gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5063   gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5064   return lhs > rhs;
5065 }
5066 
5067 /* Helper routine to get the mnemonic name from the assembly instruction
5068    line; should only be called for the diagnosis purpose, as there is
5069    string copy operation involved, which may affect the runtime
5070    performance if used in elsewhere.  */
5071 
5072 static const char*
get_mnemonic_name(const char * str)5073 get_mnemonic_name (const char *str)
5074 {
5075   static char mnemonic[32];
5076   char *ptr;
5077 
5078   /* Get the first 15 bytes and assume that the full name is included.  */
5079   strncpy (mnemonic, str, 31);
5080   mnemonic[31] = '\0';
5081 
5082   /* Scan up to the end of the mnemonic, which must end in white space,
5083      '.', or end of string.  */
5084   for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5085     ;
5086 
5087   *ptr = '\0';
5088 
5089   /* Append '...' to the truncated long name.  */
5090   if (ptr - mnemonic == 31)
5091     mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5092 
5093   return mnemonic;
5094 }
5095 
5096 static void
reset_aarch64_instruction(aarch64_instruction * instruction)5097 reset_aarch64_instruction (aarch64_instruction *instruction)
5098 {
5099   memset (instruction, '\0', sizeof (aarch64_instruction));
5100   instruction->reloc.type = BFD_RELOC_UNUSED;
5101 }
5102 
5103 /* Data structures storing one user error in the assembly code related to
5104    operands.  */
5105 
5106 struct operand_error_record
5107 {
5108   const aarch64_opcode *opcode;
5109   aarch64_operand_error detail;
5110   struct operand_error_record *next;
5111 };
5112 
5113 typedef struct operand_error_record operand_error_record;
5114 
5115 struct operand_errors
5116 {
5117   operand_error_record *head;
5118   operand_error_record *tail;
5119 };
5120 
5121 typedef struct operand_errors operand_errors;
5122 
5123 /* Top-level data structure reporting user errors for the current line of
5124    the assembly code.
5125    The way md_assemble works is that all opcodes sharing the same mnemonic
5126    name are iterated to find a match to the assembly line.  In this data
5127    structure, each of the such opcodes will have one operand_error_record
5128    allocated and inserted.  In other words, excessive errors related with
5129    a single opcode are disregarded.  */
5130 operand_errors operand_error_report;
5131 
5132 /* Free record nodes.  */
5133 static operand_error_record *free_opnd_error_record_nodes = NULL;
5134 
5135 /* Initialize the data structure that stores the operand mismatch
5136    information on assembling one line of the assembly code.  */
5137 static void
init_operand_error_report(void)5138 init_operand_error_report (void)
5139 {
5140   if (operand_error_report.head != NULL)
5141     {
5142       gas_assert (operand_error_report.tail != NULL);
5143       operand_error_report.tail->next = free_opnd_error_record_nodes;
5144       free_opnd_error_record_nodes = operand_error_report.head;
5145       operand_error_report.head = NULL;
5146       operand_error_report.tail = NULL;
5147       return;
5148     }
5149   gas_assert (operand_error_report.tail == NULL);
5150 }
5151 
5152 /* Return TRUE if some operand error has been recorded during the
5153    parsing of the current assembly line using the opcode *OPCODE;
5154    otherwise return FALSE.  */
5155 static inline bool
opcode_has_operand_error_p(const aarch64_opcode * opcode)5156 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5157 {
5158   operand_error_record *record = operand_error_report.head;
5159   return record && record->opcode == opcode;
5160 }
5161 
5162 /* Add the error record *NEW_RECORD to operand_error_report.  The record's
5163    OPCODE field is initialized with OPCODE.
5164    N.B. only one record for each opcode, i.e. the maximum of one error is
5165    recorded for each instruction template.  */
5166 
5167 static void
add_operand_error_record(const operand_error_record * new_record)5168 add_operand_error_record (const operand_error_record* new_record)
5169 {
5170   const aarch64_opcode *opcode = new_record->opcode;
5171   operand_error_record* record = operand_error_report.head;
5172 
5173   /* The record may have been created for this opcode.  If not, we need
5174      to prepare one.  */
5175   if (! opcode_has_operand_error_p (opcode))
5176     {
5177       /* Get one empty record.  */
5178       if (free_opnd_error_record_nodes == NULL)
5179 	{
5180 	  record = XNEW (operand_error_record);
5181 	}
5182       else
5183 	{
5184 	  record = free_opnd_error_record_nodes;
5185 	  free_opnd_error_record_nodes = record->next;
5186 	}
5187       record->opcode = opcode;
5188       /* Insert at the head.  */
5189       record->next = operand_error_report.head;
5190       operand_error_report.head = record;
5191       if (operand_error_report.tail == NULL)
5192 	operand_error_report.tail = record;
5193     }
5194   else if (record->detail.kind != AARCH64_OPDE_NIL
5195 	   && record->detail.index <= new_record->detail.index
5196 	   && operand_error_higher_severity_p (record->detail.kind,
5197 					       new_record->detail.kind))
5198     {
5199       /* In the case of multiple errors found on operands related with a
5200 	 single opcode, only record the error of the leftmost operand and
5201 	 only if the error is of higher severity.  */
5202       DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5203 		   " the existing error %s on operand %d",
5204 		   operand_mismatch_kind_names[new_record->detail.kind],
5205 		   new_record->detail.index,
5206 		   operand_mismatch_kind_names[record->detail.kind],
5207 		   record->detail.index);
5208       return;
5209     }
5210 
5211   record->detail = new_record->detail;
5212 }
5213 
5214 static inline void
record_operand_error_info(const aarch64_opcode * opcode,aarch64_operand_error * error_info)5215 record_operand_error_info (const aarch64_opcode *opcode,
5216 			   aarch64_operand_error *error_info)
5217 {
5218   operand_error_record record;
5219   record.opcode = opcode;
5220   record.detail = *error_info;
5221   add_operand_error_record (&record);
5222 }
5223 
5224 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5225    error message *ERROR, for operand IDX (count from 0).  */
5226 
5227 static void
record_operand_error(const aarch64_opcode * opcode,int idx,enum aarch64_operand_error_kind kind,const char * error)5228 record_operand_error (const aarch64_opcode *opcode, int idx,
5229 		      enum aarch64_operand_error_kind kind,
5230 		      const char* error)
5231 {
5232   aarch64_operand_error info;
5233   memset(&info, 0, sizeof (info));
5234   info.index = idx;
5235   info.kind = kind;
5236   info.error = error;
5237   info.non_fatal = false;
5238   record_operand_error_info (opcode, &info);
5239 }
5240 
5241 static void
record_operand_error_with_data(const aarch64_opcode * opcode,int idx,enum aarch64_operand_error_kind kind,const char * error,const int * extra_data)5242 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5243 				enum aarch64_operand_error_kind kind,
5244 				const char* error, const int *extra_data)
5245 {
5246   aarch64_operand_error info;
5247   info.index = idx;
5248   info.kind = kind;
5249   info.error = error;
5250   info.data[0].i = extra_data[0];
5251   info.data[1].i = extra_data[1];
5252   info.data[2].i = extra_data[2];
5253   info.non_fatal = false;
5254   record_operand_error_info (opcode, &info);
5255 }
5256 
5257 static void
record_operand_out_of_range_error(const aarch64_opcode * opcode,int idx,const char * error,int lower_bound,int upper_bound)5258 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5259 				   const char* error, int lower_bound,
5260 				   int upper_bound)
5261 {
5262   int data[3] = {lower_bound, upper_bound, 0};
5263   record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5264 				  error, data);
5265 }
5266 
5267 /* Remove the operand error record for *OPCODE.  */
5268 static void ATTRIBUTE_UNUSED
remove_operand_error_record(const aarch64_opcode * opcode)5269 remove_operand_error_record (const aarch64_opcode *opcode)
5270 {
5271   if (opcode_has_operand_error_p (opcode))
5272     {
5273       operand_error_record* record = operand_error_report.head;
5274       gas_assert (record != NULL && operand_error_report.tail != NULL);
5275       operand_error_report.head = record->next;
5276       record->next = free_opnd_error_record_nodes;
5277       free_opnd_error_record_nodes = record;
5278       if (operand_error_report.head == NULL)
5279 	{
5280 	  gas_assert (operand_error_report.tail == record);
5281 	  operand_error_report.tail = NULL;
5282 	}
5283     }
5284 }
5285 
5286 /* Given the instruction in *INSTR, return the index of the best matched
5287    qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5288 
5289    Return -1 if there is no qualifier sequence; return the first match
5290    if there is multiple matches found.  */
5291 
5292 static int
find_best_match(const aarch64_inst * instr,const aarch64_opnd_qualifier_seq_t * qualifiers_list)5293 find_best_match (const aarch64_inst *instr,
5294 		 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5295 {
5296   int i, num_opnds, max_num_matched, idx;
5297 
5298   num_opnds = aarch64_num_of_operands (instr->opcode);
5299   if (num_opnds == 0)
5300     {
5301       DEBUG_TRACE ("no operand");
5302       return -1;
5303     }
5304 
5305   max_num_matched = 0;
5306   idx = 0;
5307 
5308   /* For each pattern.  */
5309   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5310     {
5311       int j, num_matched;
5312       const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5313 
5314       /* Most opcodes has much fewer patterns in the list.  */
5315       if (empty_qualifier_sequence_p (qualifiers))
5316 	{
5317 	  DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5318 	  break;
5319 	}
5320 
5321       for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5322 	if (*qualifiers == instr->operands[j].qualifier)
5323 	  ++num_matched;
5324 
5325       if (num_matched > max_num_matched)
5326 	{
5327 	  max_num_matched = num_matched;
5328 	  idx = i;
5329 	}
5330     }
5331 
5332   DEBUG_TRACE ("return with %d", idx);
5333   return idx;
5334 }
5335 
5336 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5337    corresponding operands in *INSTR.  */
5338 
5339 static inline void
assign_qualifier_sequence(aarch64_inst * instr,const aarch64_opnd_qualifier_t * qualifiers)5340 assign_qualifier_sequence (aarch64_inst *instr,
5341 			   const aarch64_opnd_qualifier_t *qualifiers)
5342 {
5343   int i = 0;
5344   int num_opnds = aarch64_num_of_operands (instr->opcode);
5345   gas_assert (num_opnds);
5346   for (i = 0; i < num_opnds; ++i, ++qualifiers)
5347     instr->operands[i].qualifier = *qualifiers;
5348 }
5349 
5350 /* Print operands for the diagnosis purpose.  */
5351 
5352 static void
print_operands(char * buf,const aarch64_opcode * opcode,const aarch64_opnd_info * opnds)5353 print_operands (char *buf, const aarch64_opcode *opcode,
5354 		const aarch64_opnd_info *opnds)
5355 {
5356   int i;
5357 
5358   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5359     {
5360       char str[128];
5361       char cmt[128];
5362 
5363       /* We regard the opcode operand info more, however we also look into
5364 	 the inst->operands to support the disassembling of the optional
5365 	 operand.
5366 	 The two operand code should be the same in all cases, apart from
5367 	 when the operand can be optional.  */
5368       if (opcode->operands[i] == AARCH64_OPND_NIL
5369 	  || opnds[i].type == AARCH64_OPND_NIL)
5370 	break;
5371 
5372       /* Generate the operand string in STR.  */
5373       aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5374 			     NULL, cmt, sizeof (cmt), cpu_variant);
5375 
5376       /* Delimiter.  */
5377       if (str[0] != '\0')
5378 	strcat (buf, i == 0 ? " " : ", ");
5379 
5380       /* Append the operand string.  */
5381       strcat (buf, str);
5382 
5383       /* Append a comment.  This works because only the last operand ever
5384 	 adds a comment.  If that ever changes then we'll need to be
5385 	 smarter here.  */
5386       if (cmt[0] != '\0')
5387 	{
5388 	  strcat (buf, "\t// ");
5389 	  strcat (buf, cmt);
5390 	}
5391     }
5392 }
5393 
5394 /* Send to stderr a string as information.  */
5395 
5396 static void
output_info(const char * format,...)5397 output_info (const char *format, ...)
5398 {
5399   const char *file;
5400   unsigned int line;
5401   va_list args;
5402 
5403   file = as_where (&line);
5404   if (file)
5405     {
5406       if (line != 0)
5407 	fprintf (stderr, "%s:%u: ", file, line);
5408       else
5409 	fprintf (stderr, "%s: ", file);
5410     }
5411   fprintf (stderr, _("Info: "));
5412   va_start (args, format);
5413   vfprintf (stderr, format, args);
5414   va_end (args);
5415   (void) putc ('\n', stderr);
5416 }
5417 
5418 /* Output one operand error record.  */
5419 
5420 static void
output_operand_error_record(const operand_error_record * record,char * str)5421 output_operand_error_record (const operand_error_record *record, char *str)
5422 {
5423   const aarch64_operand_error *detail = &record->detail;
5424   int idx = detail->index;
5425   const aarch64_opcode *opcode = record->opcode;
5426   enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5427 				: AARCH64_OPND_NIL);
5428 
5429   typedef void (*handler_t)(const char *format, ...);
5430   handler_t handler = detail->non_fatal ? as_warn : as_bad;
5431 
5432   switch (detail->kind)
5433     {
5434     case AARCH64_OPDE_NIL:
5435       gas_assert (0);
5436       break;
5437 
5438     case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5439       handler (_("this `%s' should have an immediately preceding `%s'"
5440 		 " -- `%s'"),
5441 	       detail->data[0].s, detail->data[1].s, str);
5442       break;
5443 
5444     case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5445       handler (_("the preceding `%s' should be followed by `%s` rather"
5446 		 " than `%s` -- `%s'"),
5447 	       detail->data[1].s, detail->data[0].s, opcode->name, str);
5448       break;
5449 
5450     case AARCH64_OPDE_SYNTAX_ERROR:
5451     case AARCH64_OPDE_RECOVERABLE:
5452     case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5453     case AARCH64_OPDE_OTHER_ERROR:
5454       /* Use the prepared error message if there is, otherwise use the
5455 	 operand description string to describe the error.  */
5456       if (detail->error != NULL)
5457 	{
5458 	  if (idx < 0)
5459 	    handler (_("%s -- `%s'"), detail->error, str);
5460 	  else
5461 	    handler (_("%s at operand %d -- `%s'"),
5462 		     detail->error, idx + 1, str);
5463 	}
5464       else
5465 	{
5466 	  gas_assert (idx >= 0);
5467 	  handler (_("operand %d must be %s -- `%s'"), idx + 1,
5468 		   aarch64_get_operand_desc (opd_code), str);
5469 	}
5470       break;
5471 
5472     case AARCH64_OPDE_INVALID_VARIANT:
5473       handler (_("operand mismatch -- `%s'"), str);
5474       if (verbose_error_p)
5475 	{
5476 	  /* We will try to correct the erroneous instruction and also provide
5477 	     more information e.g. all other valid variants.
5478 
5479 	     The string representation of the corrected instruction and other
5480 	     valid variants are generated by
5481 
5482 	     1) obtaining the intermediate representation of the erroneous
5483 	     instruction;
5484 	     2) manipulating the IR, e.g. replacing the operand qualifier;
5485 	     3) printing out the instruction by calling the printer functions
5486 	     shared with the disassembler.
5487 
5488 	     The limitation of this method is that the exact input assembly
5489 	     line cannot be accurately reproduced in some cases, for example an
5490 	     optional operand present in the actual assembly line will be
5491 	     omitted in the output; likewise for the optional syntax rules,
5492 	     e.g. the # before the immediate.  Another limitation is that the
5493 	     assembly symbols and relocation operations in the assembly line
5494 	     currently cannot be printed out in the error report.  Last but not
5495 	     least, when there is other error(s) co-exist with this error, the
5496 	     'corrected' instruction may be still incorrect, e.g.  given
5497 	       'ldnp h0,h1,[x0,#6]!'
5498 	     this diagnosis will provide the version:
5499 	       'ldnp s0,s1,[x0,#6]!'
5500 	     which is still not right.  */
5501 	  size_t len = strlen (get_mnemonic_name (str));
5502 	  int i, qlf_idx;
5503 	  bool result;
5504 	  char buf[2048];
5505 	  aarch64_inst *inst_base = &inst.base;
5506 	  const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5507 
5508 	  /* Init inst.  */
5509 	  reset_aarch64_instruction (&inst);
5510 	  inst_base->opcode = opcode;
5511 
5512 	  /* Reset the error report so that there is no side effect on the
5513 	     following operand parsing.  */
5514 	  init_operand_error_report ();
5515 
5516 	  /* Fill inst.  */
5517 	  result = parse_operands (str + len, opcode)
5518 	    && programmer_friendly_fixup (&inst);
5519 	  gas_assert (result);
5520 	  result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5521 					  NULL, NULL, insn_sequence);
5522 	  gas_assert (!result);
5523 
5524 	  /* Find the most matched qualifier sequence.  */
5525 	  qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5526 	  gas_assert (qlf_idx > -1);
5527 
5528 	  /* Assign the qualifiers.  */
5529 	  assign_qualifier_sequence (inst_base,
5530 				     opcode->qualifiers_list[qlf_idx]);
5531 
5532 	  /* Print the hint.  */
5533 	  output_info (_("   did you mean this?"));
5534 	  snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5535 	  print_operands (buf, opcode, inst_base->operands);
5536 	  output_info (_("   %s"), buf);
5537 
5538 	  /* Print out other variant(s) if there is any.  */
5539 	  if (qlf_idx != 0 ||
5540 	      !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5541 	    output_info (_("   other valid variant(s):"));
5542 
5543 	  /* For each pattern.  */
5544 	  qualifiers_list = opcode->qualifiers_list;
5545 	  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5546 	    {
5547 	      /* Most opcodes has much fewer patterns in the list.
5548 		 First NIL qualifier indicates the end in the list.   */
5549 	      if (empty_qualifier_sequence_p (*qualifiers_list))
5550 		break;
5551 
5552 	      if (i != qlf_idx)
5553 		{
5554 		  /* Mnemonics name.  */
5555 		  snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5556 
5557 		  /* Assign the qualifiers.  */
5558 		  assign_qualifier_sequence (inst_base, *qualifiers_list);
5559 
5560 		  /* Print instruction.  */
5561 		  print_operands (buf, opcode, inst_base->operands);
5562 
5563 		  output_info (_("   %s"), buf);
5564 		}
5565 	    }
5566 	}
5567       break;
5568 
5569     case AARCH64_OPDE_UNTIED_IMMS:
5570       handler (_("operand %d must have the same immediate value "
5571                  "as operand 1 -- `%s'"),
5572                detail->index + 1, str);
5573       break;
5574 
5575     case AARCH64_OPDE_UNTIED_OPERAND:
5576       handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5577                detail->index + 1, str);
5578       break;
5579 
5580     case AARCH64_OPDE_OUT_OF_RANGE:
5581       if (detail->data[0].i != detail->data[1].i)
5582 	handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5583 		 detail->error ? detail->error : _("immediate value"),
5584 		 detail->data[0].i, detail->data[1].i, idx + 1, str);
5585       else
5586 	handler (_("%s must be %d at operand %d -- `%s'"),
5587 		 detail->error ? detail->error : _("immediate value"),
5588 		 detail->data[0].i, idx + 1, str);
5589       break;
5590 
5591     case AARCH64_OPDE_REG_LIST:
5592       if (detail->data[0].i == 1)
5593 	handler (_("invalid number of registers in the list; "
5594 		   "only 1 register is expected at operand %d -- `%s'"),
5595 		 idx + 1, str);
5596       else
5597 	handler (_("invalid number of registers in the list; "
5598 		   "%d registers are expected at operand %d -- `%s'"),
5599 	       detail->data[0].i, idx + 1, str);
5600       break;
5601 
5602     case AARCH64_OPDE_UNALIGNED:
5603       handler (_("immediate value must be a multiple of "
5604 		 "%d at operand %d -- `%s'"),
5605 	       detail->data[0].i, idx + 1, str);
5606       break;
5607 
5608     default:
5609       gas_assert (0);
5610       break;
5611     }
5612 }
5613 
5614 /* Process and output the error message about the operand mismatching.
5615 
5616    When this function is called, the operand error information had
5617    been collected for an assembly line and there will be multiple
5618    errors in the case of multiple instruction templates; output the
5619    error message that most closely describes the problem.
5620 
5621    The errors to be printed can be filtered on printing all errors
5622    or only non-fatal errors.  This distinction has to be made because
5623    the error buffer may already be filled with fatal errors we don't want to
5624    print due to the different instruction templates.  */
5625 
5626 static void
output_operand_error_report(char * str,bool non_fatal_only)5627 output_operand_error_report (char *str, bool non_fatal_only)
5628 {
5629   int largest_error_pos;
5630   const char *msg = NULL;
5631   enum aarch64_operand_error_kind kind;
5632   operand_error_record *curr;
5633   operand_error_record *head = operand_error_report.head;
5634   operand_error_record *record = NULL;
5635 
5636   /* No error to report.  */
5637   if (head == NULL)
5638     return;
5639 
5640   gas_assert (head != NULL && operand_error_report.tail != NULL);
5641 
5642   /* Only one error.  */
5643   if (head == operand_error_report.tail)
5644     {
5645       /* If the only error is a non-fatal one and we don't want to print it,
5646 	 just exit.  */
5647       if (!non_fatal_only || head->detail.non_fatal)
5648 	{
5649 	  DEBUG_TRACE ("single opcode entry with error kind: %s",
5650 		       operand_mismatch_kind_names[head->detail.kind]);
5651 	  output_operand_error_record (head, str);
5652 	}
5653       return;
5654     }
5655 
5656   /* Find the error kind of the highest severity.  */
5657   DEBUG_TRACE ("multiple opcode entries with error kind");
5658   kind = AARCH64_OPDE_NIL;
5659   for (curr = head; curr != NULL; curr = curr->next)
5660     {
5661       gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5662       DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5663       if (operand_error_higher_severity_p (curr->detail.kind, kind)
5664 	  && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5665 	kind = curr->detail.kind;
5666     }
5667 
5668   gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5669 
5670   /* Pick up one of errors of KIND to report.  */
5671   largest_error_pos = -2; /* Index can be -1 which means unknown index.  */
5672   for (curr = head; curr != NULL; curr = curr->next)
5673     {
5674       /* If we don't want to print non-fatal errors then don't consider them
5675 	 at all.  */
5676       if (curr->detail.kind != kind
5677 	  || (non_fatal_only && !curr->detail.non_fatal))
5678 	continue;
5679       /* If there are multiple errors, pick up the one with the highest
5680 	 mismatching operand index.  In the case of multiple errors with
5681 	 the equally highest operand index, pick up the first one or the
5682 	 first one with non-NULL error message.  */
5683       if (curr->detail.index > largest_error_pos
5684 	  || (curr->detail.index == largest_error_pos && msg == NULL
5685 	      && curr->detail.error != NULL))
5686 	{
5687 	  largest_error_pos = curr->detail.index;
5688 	  record = curr;
5689 	  msg = record->detail.error;
5690 	}
5691     }
5692 
5693   /* The way errors are collected in the back-end is a bit non-intuitive.  But
5694      essentially, because each operand template is tried recursively you may
5695      always have errors collected from the previous tried OPND.  These are
5696      usually skipped if there is one successful match.  However now with the
5697      non-fatal errors we have to ignore those previously collected hard errors
5698      when we're only interested in printing the non-fatal ones.  This condition
5699      prevents us from printing errors that are not appropriate, since we did
5700      match a condition, but it also has warnings that it wants to print.  */
5701   if (non_fatal_only && !record)
5702     return;
5703 
5704   gas_assert (largest_error_pos != -2 && record != NULL);
5705   DEBUG_TRACE ("Pick up error kind %s to report",
5706 	       operand_mismatch_kind_names[record->detail.kind]);
5707 
5708   /* Output.  */
5709   output_operand_error_record (record, str);
5710 }
5711 
5712 /* Write an AARCH64 instruction to buf - always little-endian.  */
5713 static void
put_aarch64_insn(char * buf,uint32_t insn)5714 put_aarch64_insn (char *buf, uint32_t insn)
5715 {
5716   unsigned char *where = (unsigned char *) buf;
5717   where[0] = insn;
5718   where[1] = insn >> 8;
5719   where[2] = insn >> 16;
5720   where[3] = insn >> 24;
5721 }
5722 
5723 static uint32_t
get_aarch64_insn(char * buf)5724 get_aarch64_insn (char *buf)
5725 {
5726   unsigned char *where = (unsigned char *) buf;
5727   uint32_t result;
5728   result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5729 	     | ((uint32_t) where[3] << 24)));
5730   return result;
5731 }
5732 
5733 static void
output_inst(struct aarch64_inst * new_inst)5734 output_inst (struct aarch64_inst *new_inst)
5735 {
5736   char *to = NULL;
5737 
5738   to = frag_more (INSN_SIZE);
5739 
5740   frag_now->tc_frag_data.recorded = 1;
5741 
5742   put_aarch64_insn (to, inst.base.value);
5743 
5744   if (inst.reloc.type != BFD_RELOC_UNUSED)
5745     {
5746       fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5747 				    INSN_SIZE, &inst.reloc.exp,
5748 				    inst.reloc.pc_rel,
5749 				    inst.reloc.type);
5750       DEBUG_TRACE ("Prepared relocation fix up");
5751       /* Don't check the addend value against the instruction size,
5752          that's the job of our code in md_apply_fix(). */
5753       fixp->fx_no_overflow = 1;
5754       if (new_inst != NULL)
5755 	fixp->tc_fix_data.inst = new_inst;
5756       if (aarch64_gas_internal_fixup_p ())
5757 	{
5758 	  gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5759 	  fixp->tc_fix_data.opnd = inst.reloc.opnd;
5760 	  fixp->fx_addnumber = inst.reloc.flags;
5761 	}
5762     }
5763 
5764   dwarf2_emit_insn (INSN_SIZE);
5765 }
5766 
5767 /* Link together opcodes of the same name.  */
5768 
5769 struct templates
5770 {
5771   const aarch64_opcode *opcode;
5772   struct templates *next;
5773 };
5774 
5775 typedef struct templates templates;
5776 
5777 static templates *
lookup_mnemonic(const char * start,int len)5778 lookup_mnemonic (const char *start, int len)
5779 {
5780   templates *templ = NULL;
5781 
5782   templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5783   return templ;
5784 }
5785 
5786 /* Subroutine of md_assemble, responsible for looking up the primary
5787    opcode from the mnemonic the user wrote.  BASE points to the beginning
5788    of the mnemonic, DOT points to the first '.' within the mnemonic
5789    (if any) and END points to the end of the mnemonic.  */
5790 
5791 static templates *
opcode_lookup(char * base,char * dot,char * end)5792 opcode_lookup (char *base, char *dot, char *end)
5793 {
5794   const aarch64_cond *cond;
5795   char condname[16];
5796   int len;
5797 
5798   if (dot == end)
5799     return 0;
5800 
5801   inst.cond = COND_ALWAYS;
5802 
5803   /* Handle a possible condition.  */
5804   if (dot)
5805     {
5806       cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5807       if (!cond)
5808 	return 0;
5809       inst.cond = cond->value;
5810       len = dot - base;
5811     }
5812   else
5813     len = end - base;
5814 
5815   if (inst.cond == COND_ALWAYS)
5816     {
5817       /* Look for unaffixed mnemonic.  */
5818       return lookup_mnemonic (base, len);
5819     }
5820   else if (len <= 13)
5821     {
5822       /* append ".c" to mnemonic if conditional */
5823       memcpy (condname, base, len);
5824       memcpy (condname + len, ".c", 2);
5825       base = condname;
5826       len += 2;
5827       return lookup_mnemonic (base, len);
5828     }
5829 
5830   return NULL;
5831 }
5832 
5833 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5834    to a corresponding operand qualifier.  */
5835 
5836 static inline aarch64_opnd_qualifier_t
vectype_to_qualifier(const struct vector_type_el * vectype)5837 vectype_to_qualifier (const struct vector_type_el *vectype)
5838 {
5839   /* Element size in bytes indexed by vector_el_type.  */
5840   const unsigned char ele_size[5]
5841     = {1, 2, 4, 8, 16};
5842   const unsigned int ele_base [5] =
5843     {
5844       AARCH64_OPND_QLF_V_4B,
5845       AARCH64_OPND_QLF_V_2H,
5846       AARCH64_OPND_QLF_V_2S,
5847       AARCH64_OPND_QLF_V_1D,
5848       AARCH64_OPND_QLF_V_1Q
5849   };
5850 
5851   if (!vectype->defined || vectype->type == NT_invtype)
5852     goto vectype_conversion_fail;
5853 
5854   if (vectype->type == NT_zero)
5855     return AARCH64_OPND_QLF_P_Z;
5856   if (vectype->type == NT_merge)
5857     return AARCH64_OPND_QLF_P_M;
5858 
5859   gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5860 
5861   if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5862     {
5863       /* Special case S_4B.  */
5864       if (vectype->type == NT_b && vectype->width == 4)
5865 	return AARCH64_OPND_QLF_S_4B;
5866 
5867       /* Special case S_2H.  */
5868       if (vectype->type == NT_h && vectype->width == 2)
5869 	return AARCH64_OPND_QLF_S_2H;
5870 
5871       /* Vector element register.  */
5872       return AARCH64_OPND_QLF_S_B + vectype->type;
5873     }
5874   else
5875     {
5876       /* Vector register.  */
5877       int reg_size = ele_size[vectype->type] * vectype->width;
5878       unsigned offset;
5879       unsigned shift;
5880       if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5881 	goto vectype_conversion_fail;
5882 
5883       /* The conversion is by calculating the offset from the base operand
5884 	 qualifier for the vector type.  The operand qualifiers are regular
5885 	 enough that the offset can established by shifting the vector width by
5886 	 a vector-type dependent amount.  */
5887       shift = 0;
5888       if (vectype->type == NT_b)
5889 	shift = 3;
5890       else if (vectype->type == NT_h || vectype->type == NT_s)
5891 	shift = 2;
5892       else if (vectype->type >= NT_d)
5893 	shift = 1;
5894       else
5895 	gas_assert (0);
5896 
5897       offset = ele_base [vectype->type] + (vectype->width >> shift);
5898       gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5899 		  && offset <= AARCH64_OPND_QLF_V_1Q);
5900       return offset;
5901     }
5902 
5903  vectype_conversion_fail:
5904   first_error (_("bad vector arrangement type"));
5905   return AARCH64_OPND_QLF_NIL;
5906 }
5907 
5908 /* Process an optional operand that is found omitted from the assembly line.
5909    Fill *OPERAND for such an operand of type TYPE.  OPCODE points to the
5910    instruction's opcode entry while IDX is the index of this omitted operand.
5911    */
5912 
5913 static void
process_omitted_operand(enum aarch64_opnd type,const aarch64_opcode * opcode,int idx,aarch64_opnd_info * operand)5914 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5915 			 int idx, aarch64_opnd_info *operand)
5916 {
5917   aarch64_insn default_value = get_optional_operand_default_value (opcode);
5918   gas_assert (optional_operand_p (opcode, idx));
5919   gas_assert (!operand->present);
5920 
5921   switch (type)
5922     {
5923     case AARCH64_OPND_Rd:
5924     case AARCH64_OPND_Rn:
5925     case AARCH64_OPND_Rm:
5926     case AARCH64_OPND_Rt:
5927     case AARCH64_OPND_Rt2:
5928     case AARCH64_OPND_Rt_LS64:
5929     case AARCH64_OPND_Rt_SP:
5930     case AARCH64_OPND_Rs:
5931     case AARCH64_OPND_Ra:
5932     case AARCH64_OPND_Rt_SYS:
5933     case AARCH64_OPND_Rd_SP:
5934     case AARCH64_OPND_Rn_SP:
5935     case AARCH64_OPND_Rm_SP:
5936     case AARCH64_OPND_Fd:
5937     case AARCH64_OPND_Fn:
5938     case AARCH64_OPND_Fm:
5939     case AARCH64_OPND_Fa:
5940     case AARCH64_OPND_Ft:
5941     case AARCH64_OPND_Ft2:
5942     case AARCH64_OPND_Sd:
5943     case AARCH64_OPND_Sn:
5944     case AARCH64_OPND_Sm:
5945     case AARCH64_OPND_Va:
5946     case AARCH64_OPND_Vd:
5947     case AARCH64_OPND_Vn:
5948     case AARCH64_OPND_Vm:
5949     case AARCH64_OPND_VdD1:
5950     case AARCH64_OPND_VnD1:
5951       operand->reg.regno = default_value;
5952       break;
5953 
5954     case AARCH64_OPND_Ed:
5955     case AARCH64_OPND_En:
5956     case AARCH64_OPND_Em:
5957     case AARCH64_OPND_Em16:
5958     case AARCH64_OPND_SM3_IMM2:
5959       operand->reglane.regno = default_value;
5960       break;
5961 
5962     case AARCH64_OPND_IDX:
5963     case AARCH64_OPND_BIT_NUM:
5964     case AARCH64_OPND_IMMR:
5965     case AARCH64_OPND_IMMS:
5966     case AARCH64_OPND_SHLL_IMM:
5967     case AARCH64_OPND_IMM_VLSL:
5968     case AARCH64_OPND_IMM_VLSR:
5969     case AARCH64_OPND_CCMP_IMM:
5970     case AARCH64_OPND_FBITS:
5971     case AARCH64_OPND_UIMM4:
5972     case AARCH64_OPND_UIMM3_OP1:
5973     case AARCH64_OPND_UIMM3_OP2:
5974     case AARCH64_OPND_IMM:
5975     case AARCH64_OPND_IMM_2:
5976     case AARCH64_OPND_WIDTH:
5977     case AARCH64_OPND_UIMM7:
5978     case AARCH64_OPND_NZCV:
5979     case AARCH64_OPND_SVE_PATTERN:
5980     case AARCH64_OPND_SVE_PRFOP:
5981       operand->imm.value = default_value;
5982       break;
5983 
5984     case AARCH64_OPND_SVE_PATTERN_SCALED:
5985       operand->imm.value = default_value;
5986       operand->shifter.kind = AARCH64_MOD_MUL;
5987       operand->shifter.amount = 1;
5988       break;
5989 
5990     case AARCH64_OPND_EXCEPTION:
5991       inst.reloc.type = BFD_RELOC_UNUSED;
5992       break;
5993 
5994     case AARCH64_OPND_BARRIER_ISB:
5995       operand->barrier = aarch64_barrier_options + default_value;
5996       break;
5997 
5998     case AARCH64_OPND_BTI_TARGET:
5999       operand->hint_option = aarch64_hint_options + default_value;
6000       break;
6001 
6002     default:
6003       break;
6004     }
6005 }
6006 
6007 /* Process the relocation type for move wide instructions.
6008    Return TRUE on success; otherwise return FALSE.  */
6009 
6010 static bool
process_movw_reloc_info(void)6011 process_movw_reloc_info (void)
6012 {
6013   int is32;
6014   unsigned shift;
6015 
6016   is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6017 
6018   if (inst.base.opcode->op == OP_MOVK)
6019     switch (inst.reloc.type)
6020       {
6021       case BFD_RELOC_AARCH64_MOVW_G0_S:
6022       case BFD_RELOC_AARCH64_MOVW_G1_S:
6023       case BFD_RELOC_AARCH64_MOVW_G2_S:
6024       case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6025       case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6026       case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6027       case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6028       case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6029       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6030       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6031       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6032 	set_syntax_error
6033 	  (_("the specified relocation type is not allowed for MOVK"));
6034 	return false;
6035       default:
6036 	break;
6037       }
6038 
6039   switch (inst.reloc.type)
6040     {
6041     case BFD_RELOC_AARCH64_MOVW_G0:
6042     case BFD_RELOC_AARCH64_MOVW_G0_NC:
6043     case BFD_RELOC_AARCH64_MOVW_G0_S:
6044     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6045     case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6046     case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6047     case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6048     case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6049     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6050     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6051     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6052     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6053     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6054       shift = 0;
6055       break;
6056     case BFD_RELOC_AARCH64_MOVW_G1:
6057     case BFD_RELOC_AARCH64_MOVW_G1_NC:
6058     case BFD_RELOC_AARCH64_MOVW_G1_S:
6059     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6060     case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6061     case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6062     case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6063     case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6064     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6065     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6066     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6067     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6068     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6069       shift = 16;
6070       break;
6071     case BFD_RELOC_AARCH64_MOVW_G2:
6072     case BFD_RELOC_AARCH64_MOVW_G2_NC:
6073     case BFD_RELOC_AARCH64_MOVW_G2_S:
6074     case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6075     case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6076     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6077     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6078       if (is32)
6079 	{
6080 	  set_fatal_syntax_error
6081 	    (_("the specified relocation type is not allowed for 32-bit "
6082 	       "register"));
6083 	  return false;
6084 	}
6085       shift = 32;
6086       break;
6087     case BFD_RELOC_AARCH64_MOVW_G3:
6088     case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6089       if (is32)
6090 	{
6091 	  set_fatal_syntax_error
6092 	    (_("the specified relocation type is not allowed for 32-bit "
6093 	       "register"));
6094 	  return false;
6095 	}
6096       shift = 48;
6097       break;
6098     default:
6099       /* More cases should be added when more MOVW-related relocation types
6100          are supported in GAS.  */
6101       gas_assert (aarch64_gas_internal_fixup_p ());
6102       /* The shift amount should have already been set by the parser.  */
6103       return true;
6104     }
6105   inst.base.operands[1].shifter.amount = shift;
6106   return true;
6107 }
6108 
6109 /* A primitive log calculator.  */
6110 
6111 static inline unsigned int
get_logsz(unsigned int size)6112 get_logsz (unsigned int size)
6113 {
6114   const unsigned char ls[16] =
6115     {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6116   if (size > 16)
6117     {
6118       gas_assert (0);
6119       return -1;
6120     }
6121   gas_assert (ls[size - 1] != (unsigned char)-1);
6122   return ls[size - 1];
6123 }
6124 
6125 /* Determine and return the real reloc type code for an instruction
6126    with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12.  */
6127 
6128 static inline bfd_reloc_code_real_type
ldst_lo12_determine_real_reloc_type(void)6129 ldst_lo12_determine_real_reloc_type (void)
6130 {
6131   unsigned logsz, max_logsz;
6132   enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6133   enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6134 
6135   const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6136     {
6137       BFD_RELOC_AARCH64_LDST8_LO12,
6138       BFD_RELOC_AARCH64_LDST16_LO12,
6139       BFD_RELOC_AARCH64_LDST32_LO12,
6140       BFD_RELOC_AARCH64_LDST64_LO12,
6141       BFD_RELOC_AARCH64_LDST128_LO12
6142     },
6143     {
6144       BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6145       BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6146       BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6147       BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6148       BFD_RELOC_AARCH64_NONE
6149     },
6150     {
6151       BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6152       BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6153       BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6154       BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6155       BFD_RELOC_AARCH64_NONE
6156     },
6157     {
6158       BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6159       BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6160       BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6161       BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6162       BFD_RELOC_AARCH64_NONE
6163     },
6164     {
6165       BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6166       BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6167       BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6168       BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6169       BFD_RELOC_AARCH64_NONE
6170     }
6171   };
6172 
6173   gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6174 	      || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6175 	      || (inst.reloc.type
6176 		  == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6177 	      || (inst.reloc.type
6178 		  == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6179 	      || (inst.reloc.type
6180 		  == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6181   gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6182 
6183   if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6184     opd1_qlf =
6185       aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6186 				      1, opd0_qlf, 0);
6187   gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6188 
6189   logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6190 
6191   if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6192       || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6193       || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6194       || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6195     max_logsz = 3;
6196   else
6197     max_logsz = 4;
6198 
6199   if (logsz > max_logsz)
6200     {
6201       /* SEE PR 27904 for an example of this.  */
6202       set_fatal_syntax_error
6203 	(_("relocation qualifier does not match instruction size"));
6204       return BFD_RELOC_AARCH64_NONE;
6205     }
6206 
6207   /* In reloc.c, these pseudo relocation types should be defined in similar
6208      order as above reloc_ldst_lo12 array. Because the array index calculation
6209      below relies on this.  */
6210   return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6211 }
6212 
6213 /* Check whether a register list REGINFO is valid.  The registers must be
6214    numbered in increasing order (modulo 32), in increments of one or two.
6215 
6216    If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6217    increments of two.
6218 
6219    Return FALSE if such a register list is invalid, otherwise return TRUE.  */
6220 
6221 static bool
reg_list_valid_p(uint32_t reginfo,int accept_alternate)6222 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6223 {
6224   uint32_t i, nb_regs, prev_regno, incr;
6225 
6226   nb_regs = 1 + (reginfo & 0x3);
6227   reginfo >>= 2;
6228   prev_regno = reginfo & 0x1f;
6229   incr = accept_alternate ? 2 : 1;
6230 
6231   for (i = 1; i < nb_regs; ++i)
6232     {
6233       uint32_t curr_regno;
6234       reginfo >>= 5;
6235       curr_regno = reginfo & 0x1f;
6236       if (curr_regno != ((prev_regno + incr) & 0x1f))
6237 	return false;
6238       prev_regno = curr_regno;
6239     }
6240 
6241   return true;
6242 }
6243 
6244 /* Generic instruction operand parser.	This does no encoding and no
6245    semantic validation; it merely squirrels values away in the inst
6246    structure.  Returns TRUE or FALSE depending on whether the
6247    specified grammar matched.  */
6248 
6249 static bool
parse_operands(char * str,const aarch64_opcode * opcode)6250 parse_operands (char *str, const aarch64_opcode *opcode)
6251 {
6252   int i;
6253   char *backtrack_pos = 0;
6254   const enum aarch64_opnd *operands = opcode->operands;
6255   aarch64_reg_type imm_reg_type;
6256 
6257   clear_error ();
6258   skip_whitespace (str);
6259 
6260   if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6261     imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6262   else
6263     imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6264 
6265   for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6266     {
6267       int64_t val;
6268       const reg_entry *reg;
6269       int comma_skipped_p = 0;
6270       aarch64_reg_type rtype;
6271       struct vector_type_el vectype;
6272       aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6273       aarch64_opnd_info *info = &inst.base.operands[i];
6274       aarch64_reg_type reg_type;
6275 
6276       DEBUG_TRACE ("parse operand %d", i);
6277 
6278       /* Assign the operand code.  */
6279       info->type = operands[i];
6280 
6281       if (optional_operand_p (opcode, i))
6282 	{
6283 	  /* Remember where we are in case we need to backtrack.  */
6284 	  gas_assert (!backtrack_pos);
6285 	  backtrack_pos = str;
6286 	}
6287 
6288       /* Expect comma between operands; the backtrack mechanism will take
6289 	 care of cases of omitted optional operand.  */
6290       if (i > 0 && ! skip_past_char (&str, ','))
6291 	{
6292 	  set_syntax_error (_("comma expected between operands"));
6293 	  goto failure;
6294 	}
6295       else
6296 	comma_skipped_p = 1;
6297 
6298       switch (operands[i])
6299 	{
6300 	case AARCH64_OPND_Rd:
6301 	case AARCH64_OPND_Rn:
6302 	case AARCH64_OPND_Rm:
6303 	case AARCH64_OPND_Rt:
6304 	case AARCH64_OPND_Rt2:
6305 	case AARCH64_OPND_Rs:
6306 	case AARCH64_OPND_Ra:
6307 	case AARCH64_OPND_Rt_LS64:
6308 	case AARCH64_OPND_Rt_SYS:
6309 	case AARCH64_OPND_PAIRREG:
6310 	case AARCH64_OPND_SVE_Rm:
6311 	  po_int_reg_or_fail (REG_TYPE_R_Z);
6312 
6313 	  /* In LS64 load/store instructions Rt register number must be even
6314 	     and <=22.  */
6315 	  if (operands[i] == AARCH64_OPND_Rt_LS64)
6316 	  {
6317 	    /* We've already checked if this is valid register.
6318 	       This will check if register number (Rt) is not undefined for LS64
6319 	       instructions:
6320 	       if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED.  */
6321 	    if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6322 	    {
6323 	      set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6324 	      goto failure;
6325 	    }
6326 	  }
6327 	  break;
6328 
6329 	case AARCH64_OPND_Rd_SP:
6330 	case AARCH64_OPND_Rn_SP:
6331 	case AARCH64_OPND_Rt_SP:
6332 	case AARCH64_OPND_SVE_Rn_SP:
6333 	case AARCH64_OPND_Rm_SP:
6334 	  po_int_reg_or_fail (REG_TYPE_R_SP);
6335 	  break;
6336 
6337 	case AARCH64_OPND_Rm_EXT:
6338 	case AARCH64_OPND_Rm_SFT:
6339 	  po_misc_or_fail (parse_shifter_operand
6340 			   (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6341 					 ? SHIFTED_ARITH_IMM
6342 					 : SHIFTED_LOGIC_IMM)));
6343 	  if (!info->shifter.operator_present)
6344 	    {
6345 	      /* Default to LSL if not present.  Libopcodes prefers shifter
6346 		 kind to be explicit.  */
6347 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6348 	      info->shifter.kind = AARCH64_MOD_LSL;
6349 	      /* For Rm_EXT, libopcodes will carry out further check on whether
6350 		 or not stack pointer is used in the instruction (Recall that
6351 		 "the extend operator is not optional unless at least one of
6352 		 "Rd" or "Rn" is '11111' (i.e. WSP)").  */
6353 	    }
6354 	  break;
6355 
6356 	case AARCH64_OPND_Fd:
6357 	case AARCH64_OPND_Fn:
6358 	case AARCH64_OPND_Fm:
6359 	case AARCH64_OPND_Fa:
6360 	case AARCH64_OPND_Ft:
6361 	case AARCH64_OPND_Ft2:
6362 	case AARCH64_OPND_Sd:
6363 	case AARCH64_OPND_Sn:
6364 	case AARCH64_OPND_Sm:
6365 	case AARCH64_OPND_SVE_VZn:
6366 	case AARCH64_OPND_SVE_Vd:
6367 	case AARCH64_OPND_SVE_Vm:
6368 	case AARCH64_OPND_SVE_Vn:
6369 	  val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6370 	  if (val == PARSE_FAIL)
6371 	    {
6372 	      first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6373 	      goto failure;
6374 	    }
6375 	  gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6376 
6377 	  info->reg.regno = val;
6378 	  info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6379 	  break;
6380 
6381 	case AARCH64_OPND_SVE_Pd:
6382 	case AARCH64_OPND_SVE_Pg3:
6383 	case AARCH64_OPND_SVE_Pg4_5:
6384 	case AARCH64_OPND_SVE_Pg4_10:
6385 	case AARCH64_OPND_SVE_Pg4_16:
6386 	case AARCH64_OPND_SVE_Pm:
6387 	case AARCH64_OPND_SVE_Pn:
6388 	case AARCH64_OPND_SVE_Pt:
6389 	case AARCH64_OPND_SME_Pm:
6390 	  reg_type = REG_TYPE_PN;
6391 	  goto vector_reg;
6392 
6393 	case AARCH64_OPND_SVE_Za_5:
6394 	case AARCH64_OPND_SVE_Za_16:
6395 	case AARCH64_OPND_SVE_Zd:
6396 	case AARCH64_OPND_SVE_Zm_5:
6397 	case AARCH64_OPND_SVE_Zm_16:
6398 	case AARCH64_OPND_SVE_Zn:
6399 	case AARCH64_OPND_SVE_Zt:
6400 	  reg_type = REG_TYPE_ZN;
6401 	  goto vector_reg;
6402 
6403 	case AARCH64_OPND_Va:
6404 	case AARCH64_OPND_Vd:
6405 	case AARCH64_OPND_Vn:
6406 	case AARCH64_OPND_Vm:
6407 	  reg_type = REG_TYPE_VN;
6408 	vector_reg:
6409 	  val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6410 	  if (val == PARSE_FAIL)
6411 	    {
6412 	      first_error (_(get_reg_expected_msg (reg_type)));
6413 	      goto failure;
6414 	    }
6415 	  if (vectype.defined & NTA_HASINDEX)
6416 	    goto failure;
6417 
6418 	  info->reg.regno = val;
6419 	  if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6420 	      && vectype.type == NT_invtype)
6421 	    /* Unqualified Pn and Zn registers are allowed in certain
6422 	       contexts.  Rely on F_STRICT qualifier checking to catch
6423 	       invalid uses.  */
6424 	    info->qualifier = AARCH64_OPND_QLF_NIL;
6425 	  else
6426 	    {
6427 	      info->qualifier = vectype_to_qualifier (&vectype);
6428 	      if (info->qualifier == AARCH64_OPND_QLF_NIL)
6429 		goto failure;
6430 	    }
6431 	  break;
6432 
6433 	case AARCH64_OPND_VdD1:
6434 	case AARCH64_OPND_VnD1:
6435 	  val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6436 	  if (val == PARSE_FAIL)
6437 	    {
6438 	      set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6439 	      goto failure;
6440 	    }
6441 	  if (vectype.type != NT_d || vectype.index != 1)
6442 	    {
6443 	      set_fatal_syntax_error
6444 		(_("the top half of a 128-bit FP/SIMD register is expected"));
6445 	      goto failure;
6446 	    }
6447 	  info->reg.regno = val;
6448 	  /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6449 	     here; it is correct for the purpose of encoding/decoding since
6450 	     only the register number is explicitly encoded in the related
6451 	     instructions, although this appears a bit hacky.  */
6452 	  info->qualifier = AARCH64_OPND_QLF_S_D;
6453 	  break;
6454 
6455 	case AARCH64_OPND_SVE_Zm3_INDEX:
6456 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
6457 	case AARCH64_OPND_SVE_Zm3_11_INDEX:
6458 	case AARCH64_OPND_SVE_Zm4_11_INDEX:
6459 	case AARCH64_OPND_SVE_Zm4_INDEX:
6460 	case AARCH64_OPND_SVE_Zn_INDEX:
6461 	  reg_type = REG_TYPE_ZN;
6462 	  goto vector_reg_index;
6463 
6464 	case AARCH64_OPND_Ed:
6465 	case AARCH64_OPND_En:
6466 	case AARCH64_OPND_Em:
6467 	case AARCH64_OPND_Em16:
6468 	case AARCH64_OPND_SM3_IMM2:
6469 	  reg_type = REG_TYPE_VN;
6470 	vector_reg_index:
6471 	  val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6472 	  if (val == PARSE_FAIL)
6473 	    {
6474 	      first_error (_(get_reg_expected_msg (reg_type)));
6475 	      goto failure;
6476 	    }
6477 	  if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6478 	    goto failure;
6479 
6480 	  info->reglane.regno = val;
6481 	  info->reglane.index = vectype.index;
6482 	  info->qualifier = vectype_to_qualifier (&vectype);
6483 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
6484 	    goto failure;
6485 	  break;
6486 
6487 	case AARCH64_OPND_SVE_ZnxN:
6488 	case AARCH64_OPND_SVE_ZtxN:
6489 	  reg_type = REG_TYPE_ZN;
6490 	  goto vector_reg_list;
6491 
6492 	case AARCH64_OPND_LVn:
6493 	case AARCH64_OPND_LVt:
6494 	case AARCH64_OPND_LVt_AL:
6495 	case AARCH64_OPND_LEt:
6496 	  reg_type = REG_TYPE_VN;
6497 	vector_reg_list:
6498 	  if (reg_type == REG_TYPE_ZN
6499 	      && get_opcode_dependent_value (opcode) == 1
6500 	      && *str != '{')
6501 	    {
6502 	      val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6503 	      if (val == PARSE_FAIL)
6504 		{
6505 		  first_error (_(get_reg_expected_msg (reg_type)));
6506 		  goto failure;
6507 		}
6508 	      info->reglist.first_regno = val;
6509 	      info->reglist.num_regs = 1;
6510 	    }
6511 	  else
6512 	    {
6513 	      val = parse_vector_reg_list (&str, reg_type, &vectype);
6514 	      if (val == PARSE_FAIL)
6515 		goto failure;
6516 
6517 	      if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6518 		{
6519 		  set_fatal_syntax_error (_("invalid register list"));
6520 		  goto failure;
6521 		}
6522 
6523 	      if (vectype.width != 0 && *str != ',')
6524 		{
6525 		  set_fatal_syntax_error
6526 		    (_("expected element type rather than vector type"));
6527 		  goto failure;
6528 		}
6529 
6530 	      info->reglist.first_regno = (val >> 2) & 0x1f;
6531 	      info->reglist.num_regs = (val & 0x3) + 1;
6532 	    }
6533 	  if (operands[i] == AARCH64_OPND_LEt)
6534 	    {
6535 	      if (!(vectype.defined & NTA_HASINDEX))
6536 		goto failure;
6537 	      info->reglist.has_index = 1;
6538 	      info->reglist.index = vectype.index;
6539 	    }
6540 	  else
6541 	    {
6542 	      if (vectype.defined & NTA_HASINDEX)
6543 		goto failure;
6544 	      if (!(vectype.defined & NTA_HASTYPE))
6545 		{
6546 		  if (reg_type == REG_TYPE_ZN)
6547 		    set_fatal_syntax_error (_("missing type suffix"));
6548 		  goto failure;
6549 		}
6550 	    }
6551 	  info->qualifier = vectype_to_qualifier (&vectype);
6552 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
6553 	    goto failure;
6554 	  break;
6555 
6556 	case AARCH64_OPND_CRn:
6557 	case AARCH64_OPND_CRm:
6558 	    {
6559 	      char prefix = *(str++);
6560 	      if (prefix != 'c' && prefix != 'C')
6561 		goto failure;
6562 
6563 	      po_imm_nc_or_fail ();
6564 	      if (val > 15)
6565 		{
6566 		  set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6567 		  goto failure;
6568 		}
6569 	      info->qualifier = AARCH64_OPND_QLF_CR;
6570 	      info->imm.value = val;
6571 	      break;
6572 	    }
6573 
6574 	case AARCH64_OPND_SHLL_IMM:
6575 	case AARCH64_OPND_IMM_VLSR:
6576 	  po_imm_or_fail (1, 64);
6577 	  info->imm.value = val;
6578 	  break;
6579 
6580 	case AARCH64_OPND_CCMP_IMM:
6581 	case AARCH64_OPND_SIMM5:
6582 	case AARCH64_OPND_FBITS:
6583 	case AARCH64_OPND_TME_UIMM16:
6584 	case AARCH64_OPND_UIMM4:
6585 	case AARCH64_OPND_UIMM4_ADDG:
6586 	case AARCH64_OPND_UIMM10:
6587 	case AARCH64_OPND_UIMM3_OP1:
6588 	case AARCH64_OPND_UIMM3_OP2:
6589 	case AARCH64_OPND_IMM_VLSL:
6590 	case AARCH64_OPND_IMM:
6591 	case AARCH64_OPND_IMM_2:
6592 	case AARCH64_OPND_WIDTH:
6593 	case AARCH64_OPND_SVE_INV_LIMM:
6594 	case AARCH64_OPND_SVE_LIMM:
6595 	case AARCH64_OPND_SVE_LIMM_MOV:
6596 	case AARCH64_OPND_SVE_SHLIMM_PRED:
6597 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6598 	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6599 	case AARCH64_OPND_SVE_SHRIMM_PRED:
6600 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6601 	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6602 	case AARCH64_OPND_SVE_SIMM5:
6603 	case AARCH64_OPND_SVE_SIMM5B:
6604 	case AARCH64_OPND_SVE_SIMM6:
6605 	case AARCH64_OPND_SVE_SIMM8:
6606 	case AARCH64_OPND_SVE_UIMM3:
6607 	case AARCH64_OPND_SVE_UIMM7:
6608 	case AARCH64_OPND_SVE_UIMM8:
6609 	case AARCH64_OPND_SVE_UIMM8_53:
6610 	case AARCH64_OPND_IMM_ROT1:
6611 	case AARCH64_OPND_IMM_ROT2:
6612 	case AARCH64_OPND_IMM_ROT3:
6613 	case AARCH64_OPND_SVE_IMM_ROT1:
6614 	case AARCH64_OPND_SVE_IMM_ROT2:
6615 	case AARCH64_OPND_SVE_IMM_ROT3:
6616 	  po_imm_nc_or_fail ();
6617 	  info->imm.value = val;
6618 	  break;
6619 
6620 	case AARCH64_OPND_SVE_AIMM:
6621 	case AARCH64_OPND_SVE_ASIMM:
6622 	  po_imm_nc_or_fail ();
6623 	  info->imm.value = val;
6624 	  skip_whitespace (str);
6625 	  if (skip_past_comma (&str))
6626 	    po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6627 	  else
6628 	    inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6629 	  break;
6630 
6631 	case AARCH64_OPND_SVE_PATTERN:
6632 	  po_enum_or_fail (aarch64_sve_pattern_array);
6633 	  info->imm.value = val;
6634 	  break;
6635 
6636 	case AARCH64_OPND_SVE_PATTERN_SCALED:
6637 	  po_enum_or_fail (aarch64_sve_pattern_array);
6638 	  info->imm.value = val;
6639 	  if (skip_past_comma (&str)
6640 	      && !parse_shift (&str, info, SHIFTED_MUL))
6641 	    goto failure;
6642 	  if (!info->shifter.operator_present)
6643 	    {
6644 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6645 	      info->shifter.kind = AARCH64_MOD_MUL;
6646 	      info->shifter.amount = 1;
6647 	    }
6648 	  break;
6649 
6650 	case AARCH64_OPND_SVE_PRFOP:
6651 	  po_enum_or_fail (aarch64_sve_prfop_array);
6652 	  info->imm.value = val;
6653 	  break;
6654 
6655 	case AARCH64_OPND_UIMM7:
6656 	  po_imm_or_fail (0, 127);
6657 	  info->imm.value = val;
6658 	  break;
6659 
6660 	case AARCH64_OPND_IDX:
6661 	case AARCH64_OPND_MASK:
6662 	case AARCH64_OPND_BIT_NUM:
6663 	case AARCH64_OPND_IMMR:
6664 	case AARCH64_OPND_IMMS:
6665 	  po_imm_or_fail (0, 63);
6666 	  info->imm.value = val;
6667 	  break;
6668 
6669 	case AARCH64_OPND_IMM0:
6670 	  po_imm_nc_or_fail ();
6671 	  if (val != 0)
6672 	    {
6673 	      set_fatal_syntax_error (_("immediate zero expected"));
6674 	      goto failure;
6675 	    }
6676 	  info->imm.value = 0;
6677 	  break;
6678 
6679 	case AARCH64_OPND_FPIMM0:
6680 	  {
6681 	    int qfloat;
6682 	    bool res1 = false, res2 = false;
6683 	    /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6684 	       it is probably not worth the effort to support it.  */
6685 	    if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6686 						  imm_reg_type))
6687 		&& (error_p ()
6688 		    || !(res2 = parse_constant_immediate (&str, &val,
6689 							  imm_reg_type))))
6690 	      goto failure;
6691 	    if ((res1 && qfloat == 0) || (res2 && val == 0))
6692 	      {
6693 		info->imm.value = 0;
6694 		info->imm.is_fp = 1;
6695 		break;
6696 	      }
6697 	    set_fatal_syntax_error (_("immediate zero expected"));
6698 	    goto failure;
6699 	  }
6700 
6701 	case AARCH64_OPND_IMM_MOV:
6702 	  {
6703 	    char *saved = str;
6704 	    if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6705 		reg_name_p (str, REG_TYPE_VN))
6706 	      goto failure;
6707 	    str = saved;
6708 	    po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6709 						     GE_OPT_PREFIX, REJECT_ABSENT,
6710 						     NORMAL_RESOLUTION));
6711 	    /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6712 	       later.  fix_mov_imm_insn will try to determine a machine
6713 	       instruction (MOVZ, MOVN or ORR) for it and will issue an error
6714 	       message if the immediate cannot be moved by a single
6715 	       instruction.  */
6716 	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6717 	    inst.base.operands[i].skip = 1;
6718 	  }
6719 	  break;
6720 
6721 	case AARCH64_OPND_SIMD_IMM:
6722 	case AARCH64_OPND_SIMD_IMM_SFT:
6723 	  if (! parse_big_immediate (&str, &val, imm_reg_type))
6724 	    goto failure;
6725 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6726 					      /* addr_off_p */ 0,
6727 					      /* need_libopcodes_p */ 1,
6728 					      /* skip_p */ 1);
6729 	  /* Parse shift.
6730 	     N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6731 	     shift, we don't check it here; we leave the checking to
6732 	     the libopcodes (operand_general_constraint_met_p).  By
6733 	     doing this, we achieve better diagnostics.  */
6734 	  if (skip_past_comma (&str)
6735 	      && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6736 	    goto failure;
6737 	  if (!info->shifter.operator_present
6738 	      && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6739 	    {
6740 	      /* Default to LSL if not present.  Libopcodes prefers shifter
6741 		 kind to be explicit.  */
6742 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6743 	      info->shifter.kind = AARCH64_MOD_LSL;
6744 	    }
6745 	  break;
6746 
6747 	case AARCH64_OPND_FPIMM:
6748 	case AARCH64_OPND_SIMD_FPIMM:
6749 	case AARCH64_OPND_SVE_FPIMM8:
6750 	  {
6751 	    int qfloat;
6752 	    bool dp_p;
6753 
6754 	    dp_p = double_precision_operand_p (&inst.base.operands[0]);
6755 	    if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6756 		|| !aarch64_imm_float_p (qfloat))
6757 	      {
6758 		if (!error_p ())
6759 		  set_fatal_syntax_error (_("invalid floating-point"
6760 					    " constant"));
6761 		goto failure;
6762 	      }
6763 	    inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6764 	    inst.base.operands[i].imm.is_fp = 1;
6765 	  }
6766 	  break;
6767 
6768 	case AARCH64_OPND_SVE_I1_HALF_ONE:
6769 	case AARCH64_OPND_SVE_I1_HALF_TWO:
6770 	case AARCH64_OPND_SVE_I1_ZERO_ONE:
6771 	  {
6772 	    int qfloat;
6773 	    bool dp_p;
6774 
6775 	    dp_p = double_precision_operand_p (&inst.base.operands[0]);
6776 	    if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6777 	      {
6778 		if (!error_p ())
6779 		  set_fatal_syntax_error (_("invalid floating-point"
6780 					    " constant"));
6781 		goto failure;
6782 	      }
6783 	    inst.base.operands[i].imm.value = qfloat;
6784 	    inst.base.operands[i].imm.is_fp = 1;
6785 	  }
6786 	  break;
6787 
6788 	case AARCH64_OPND_LIMM:
6789 	  po_misc_or_fail (parse_shifter_operand (&str, info,
6790 						  SHIFTED_LOGIC_IMM));
6791 	  if (info->shifter.operator_present)
6792 	    {
6793 	      set_fatal_syntax_error
6794 		(_("shift not allowed for bitmask immediate"));
6795 	      goto failure;
6796 	    }
6797 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6798 					      /* addr_off_p */ 0,
6799 					      /* need_libopcodes_p */ 1,
6800 					      /* skip_p */ 1);
6801 	  break;
6802 
6803 	case AARCH64_OPND_AIMM:
6804 	  if (opcode->op == OP_ADD)
6805 	    /* ADD may have relocation types.  */
6806 	    po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6807 							  SHIFTED_ARITH_IMM));
6808 	  else
6809 	    po_misc_or_fail (parse_shifter_operand (&str, info,
6810 						    SHIFTED_ARITH_IMM));
6811 	  switch (inst.reloc.type)
6812 	    {
6813 	    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6814 	      info->shifter.amount = 12;
6815 	      break;
6816 	    case BFD_RELOC_UNUSED:
6817 	      aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6818 	      if (info->shifter.kind != AARCH64_MOD_NONE)
6819 		inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6820 	      inst.reloc.pc_rel = 0;
6821 	      break;
6822 	    default:
6823 	      break;
6824 	    }
6825 	  info->imm.value = 0;
6826 	  if (!info->shifter.operator_present)
6827 	    {
6828 	      /* Default to LSL if not present.  Libopcodes prefers shifter
6829 		 kind to be explicit.  */
6830 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6831 	      info->shifter.kind = AARCH64_MOD_LSL;
6832 	    }
6833 	  break;
6834 
6835 	case AARCH64_OPND_HALF:
6836 	    {
6837 	      /* #<imm16> or relocation.  */
6838 	      int internal_fixup_p;
6839 	      po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6840 	      if (internal_fixup_p)
6841 		aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6842 	      skip_whitespace (str);
6843 	      if (skip_past_comma (&str))
6844 		{
6845 		  /* {, LSL #<shift>}  */
6846 		  if (! aarch64_gas_internal_fixup_p ())
6847 		    {
6848 		      set_fatal_syntax_error (_("can't mix relocation modifier "
6849 						"with explicit shift"));
6850 		      goto failure;
6851 		    }
6852 		  po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6853 		}
6854 	      else
6855 		inst.base.operands[i].shifter.amount = 0;
6856 	      inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6857 	      inst.base.operands[i].imm.value = 0;
6858 	      if (! process_movw_reloc_info ())
6859 		goto failure;
6860 	    }
6861 	  break;
6862 
6863 	case AARCH64_OPND_EXCEPTION:
6864 	case AARCH64_OPND_UNDEFINED:
6865 	  po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6866 						       imm_reg_type));
6867 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6868 					      /* addr_off_p */ 0,
6869 					      /* need_libopcodes_p */ 0,
6870 					      /* skip_p */ 1);
6871 	  break;
6872 
6873 	case AARCH64_OPND_NZCV:
6874 	  {
6875 	    const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6876 	    if (nzcv != NULL)
6877 	      {
6878 		str += 4;
6879 		info->imm.value = nzcv->value;
6880 		break;
6881 	      }
6882 	    po_imm_or_fail (0, 15);
6883 	    info->imm.value = val;
6884 	  }
6885 	  break;
6886 
6887 	case AARCH64_OPND_COND:
6888 	case AARCH64_OPND_COND1:
6889 	  {
6890 	    char *start = str;
6891 	    do
6892 	      str++;
6893 	    while (ISALPHA (*str));
6894 	    info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6895 	    if (info->cond == NULL)
6896 	      {
6897 		set_syntax_error (_("invalid condition"));
6898 		goto failure;
6899 	      }
6900 	    else if (operands[i] == AARCH64_OPND_COND1
6901 		     && (info->cond->value & 0xe) == 0xe)
6902 	      {
6903 		/* Do not allow AL or NV.  */
6904 		set_default_error ();
6905 		goto failure;
6906 	      }
6907 	  }
6908 	  break;
6909 
6910 	case AARCH64_OPND_ADDR_ADRP:
6911 	  po_misc_or_fail (parse_adrp (&str));
6912 	  /* Clear the value as operand needs to be relocated.  */
6913 	  info->imm.value = 0;
6914 	  break;
6915 
6916 	case AARCH64_OPND_ADDR_PCREL14:
6917 	case AARCH64_OPND_ADDR_PCREL19:
6918 	case AARCH64_OPND_ADDR_PCREL21:
6919 	case AARCH64_OPND_ADDR_PCREL26:
6920 	  po_misc_or_fail (parse_address (&str, info));
6921 	  if (!info->addr.pcrel)
6922 	    {
6923 	      set_syntax_error (_("invalid pc-relative address"));
6924 	      goto failure;
6925 	    }
6926 	  if (inst.gen_lit_pool
6927 	      && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6928 	    {
6929 	      /* Only permit "=value" in the literal load instructions.
6930 		 The literal will be generated by programmer_friendly_fixup.  */
6931 	      set_syntax_error (_("invalid use of \"=immediate\""));
6932 	      goto failure;
6933 	    }
6934 	  if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6935 	    {
6936 	      set_syntax_error (_("unrecognized relocation suffix"));
6937 	      goto failure;
6938 	    }
6939 	  if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6940 	    {
6941 	      info->imm.value = inst.reloc.exp.X_add_number;
6942 	      inst.reloc.type = BFD_RELOC_UNUSED;
6943 	    }
6944 	  else
6945 	    {
6946 	      info->imm.value = 0;
6947 	      if (inst.reloc.type == BFD_RELOC_UNUSED)
6948 		switch (opcode->iclass)
6949 		  {
6950 		  case compbranch:
6951 		  case condbranch:
6952 		    /* e.g. CBZ or B.COND  */
6953 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6954 		    inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6955 		    break;
6956 		  case testbranch:
6957 		    /* e.g. TBZ  */
6958 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6959 		    inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6960 		    break;
6961 		  case branch_imm:
6962 		    /* e.g. B or BL  */
6963 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6964 		    inst.reloc.type =
6965 		      (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6966 			 : BFD_RELOC_AARCH64_JUMP26;
6967 		    break;
6968 		  case loadlit:
6969 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6970 		    inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6971 		    break;
6972 		  case pcreladdr:
6973 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6974 		    inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6975 		    break;
6976 		  default:
6977 		    gas_assert (0);
6978 		    abort ();
6979 		  }
6980 	      inst.reloc.pc_rel = 1;
6981 	    }
6982 	  break;
6983 
6984 	case AARCH64_OPND_ADDR_SIMPLE:
6985 	case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6986 	  {
6987 	    /* [<Xn|SP>{, #<simm>}]  */
6988 	    char *start = str;
6989 	    /* First use the normal address-parsing routines, to get
6990 	       the usual syntax errors.  */
6991 	    po_misc_or_fail (parse_address (&str, info));
6992 	    if (info->addr.pcrel || info->addr.offset.is_reg
6993 		|| !info->addr.preind || info->addr.postind
6994 		|| info->addr.writeback)
6995 	      {
6996 		set_syntax_error (_("invalid addressing mode"));
6997 		goto failure;
6998 	      }
6999 
7000 	    /* Then retry, matching the specific syntax of these addresses.  */
7001 	    str = start;
7002 	    po_char_or_fail ('[');
7003 	    po_reg_or_fail (REG_TYPE_R64_SP);
7004 	    /* Accept optional ", #0".  */
7005 	    if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7006 		&& skip_past_char (&str, ','))
7007 	      {
7008 		skip_past_char (&str, '#');
7009 		if (! skip_past_char (&str, '0'))
7010 		  {
7011 		    set_fatal_syntax_error
7012 		      (_("the optional immediate offset can only be 0"));
7013 		    goto failure;
7014 		  }
7015 	      }
7016 	    po_char_or_fail (']');
7017 	    break;
7018 	  }
7019 
7020 	case AARCH64_OPND_ADDR_REGOFF:
7021 	  /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}]  */
7022 	  po_misc_or_fail (parse_address (&str, info));
7023 	regoff_addr:
7024 	  if (info->addr.pcrel || !info->addr.offset.is_reg
7025 	      || !info->addr.preind || info->addr.postind
7026 	      || info->addr.writeback)
7027 	    {
7028 	      set_syntax_error (_("invalid addressing mode"));
7029 	      goto failure;
7030 	    }
7031 	  if (!info->shifter.operator_present)
7032 	    {
7033 	      /* Default to LSL if not present.  Libopcodes prefers shifter
7034 		 kind to be explicit.  */
7035 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7036 	      info->shifter.kind = AARCH64_MOD_LSL;
7037 	    }
7038 	  /* Qualifier to be deduced by libopcodes.  */
7039 	  break;
7040 
7041 	case AARCH64_OPND_ADDR_SIMM7:
7042 	  po_misc_or_fail (parse_address (&str, info));
7043 	  if (info->addr.pcrel || info->addr.offset.is_reg
7044 	      || (!info->addr.preind && !info->addr.postind))
7045 	    {
7046 	      set_syntax_error (_("invalid addressing mode"));
7047 	      goto failure;
7048 	    }
7049 	  if (inst.reloc.type != BFD_RELOC_UNUSED)
7050 	    {
7051 	      set_syntax_error (_("relocation not allowed"));
7052 	      goto failure;
7053 	    }
7054 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7055 					      /* addr_off_p */ 1,
7056 					      /* need_libopcodes_p */ 1,
7057 					      /* skip_p */ 0);
7058 	  break;
7059 
7060 	case AARCH64_OPND_ADDR_SIMM9:
7061 	case AARCH64_OPND_ADDR_SIMM9_2:
7062 	case AARCH64_OPND_ADDR_SIMM11:
7063 	case AARCH64_OPND_ADDR_SIMM13:
7064 	  po_misc_or_fail (parse_address (&str, info));
7065 	  if (info->addr.pcrel || info->addr.offset.is_reg
7066 	      || (!info->addr.preind && !info->addr.postind)
7067 	      || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7068 		  && info->addr.writeback))
7069 	    {
7070 	      set_syntax_error (_("invalid addressing mode"));
7071 	      goto failure;
7072 	    }
7073 	  if (inst.reloc.type != BFD_RELOC_UNUSED)
7074 	    {
7075 	      set_syntax_error (_("relocation not allowed"));
7076 	      goto failure;
7077 	    }
7078 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7079 					      /* addr_off_p */ 1,
7080 					      /* need_libopcodes_p */ 1,
7081 					      /* skip_p */ 0);
7082 	  break;
7083 
7084 	case AARCH64_OPND_ADDR_SIMM10:
7085 	case AARCH64_OPND_ADDR_OFFSET:
7086 	  po_misc_or_fail (parse_address (&str, info));
7087 	  if (info->addr.pcrel || info->addr.offset.is_reg
7088 	      || !info->addr.preind || info->addr.postind)
7089 	    {
7090 	      set_syntax_error (_("invalid addressing mode"));
7091 	      goto failure;
7092 	    }
7093 	  if (inst.reloc.type != BFD_RELOC_UNUSED)
7094 	    {
7095 	      set_syntax_error (_("relocation not allowed"));
7096 	      goto failure;
7097 	    }
7098 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7099 					      /* addr_off_p */ 1,
7100 					      /* need_libopcodes_p */ 1,
7101 					      /* skip_p */ 0);
7102 	  break;
7103 
7104 	case AARCH64_OPND_ADDR_UIMM12:
7105 	  po_misc_or_fail (parse_address (&str, info));
7106 	  if (info->addr.pcrel || info->addr.offset.is_reg
7107 	      || !info->addr.preind || info->addr.writeback)
7108 	    {
7109 	      set_syntax_error (_("invalid addressing mode"));
7110 	      goto failure;
7111 	    }
7112 	  if (inst.reloc.type == BFD_RELOC_UNUSED)
7113 	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7114 	  else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7115 		   || (inst.reloc.type
7116 		       == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7117 		   || (inst.reloc.type
7118 		       == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7119 		   || (inst.reloc.type
7120 		       == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7121 		   || (inst.reloc.type
7122 		       == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7123 	    inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7124 	  /* Leave qualifier to be determined by libopcodes.  */
7125 	  break;
7126 
7127 	case AARCH64_OPND_SIMD_ADDR_POST:
7128 	  /* [<Xn|SP>], <Xm|#<amount>>  */
7129 	  po_misc_or_fail (parse_address (&str, info));
7130 	  if (!info->addr.postind || !info->addr.writeback)
7131 	    {
7132 	      set_syntax_error (_("invalid addressing mode"));
7133 	      goto failure;
7134 	    }
7135 	  if (!info->addr.offset.is_reg)
7136 	    {
7137 	      if (inst.reloc.exp.X_op == O_constant)
7138 		info->addr.offset.imm = inst.reloc.exp.X_add_number;
7139 	      else
7140 		{
7141 		  set_fatal_syntax_error
7142 		    (_("writeback value must be an immediate constant"));
7143 		  goto failure;
7144 		}
7145 	    }
7146 	  /* No qualifier.  */
7147 	  break;
7148 
7149 	case AARCH64_OPND_SME_SM_ZA:
7150 	  /* { SM | ZA }  */
7151 	  if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7152 	    {
7153 	      set_syntax_error (_("unknown or missing PSTATE field name"));
7154 	      goto failure;
7155 	    }
7156 	  info->reg.regno = val;
7157 	  break;
7158 
7159 	case AARCH64_OPND_SME_PnT_Wm_imm:
7160 	  /* <Pn>.<T>[<Wm>, #<imm>]  */
7161 	  {
7162 	    int index_base_reg;
7163 	    int imm;
7164 	    val = parse_sme_pred_reg_with_index (&str,
7165 	                                         &index_base_reg,
7166 	                                         &imm,
7167 	                                         &qualifier);
7168 	    if (val == PARSE_FAIL)
7169 	        goto failure;
7170 
7171 	    info->za_tile_vector.regno = val;
7172 	    info->za_tile_vector.index.regno = index_base_reg;
7173 	    info->za_tile_vector.index.imm = imm;
7174 	    info->qualifier = qualifier;
7175 	    break;
7176 	  }
7177 
7178 	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7179 	case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7180 	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7181 	case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7182 	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7183 	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7184 	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7185 	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7186 	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7187 	case AARCH64_OPND_SVE_ADDR_RI_U6:
7188 	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7189 	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7190 	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7191 	  /* [X<n>{, #imm, MUL VL}]
7192 	     [X<n>{, #imm}]
7193 	     but recognizing SVE registers.  */
7194 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7195 					      &offset_qualifier));
7196 	  if (base_qualifier != AARCH64_OPND_QLF_X)
7197 	    {
7198 	      set_syntax_error (_("invalid addressing mode"));
7199 	      goto failure;
7200 	    }
7201 	sve_regimm:
7202 	  if (info->addr.pcrel || info->addr.offset.is_reg
7203 	      || !info->addr.preind || info->addr.writeback)
7204 	    {
7205 	      set_syntax_error (_("invalid addressing mode"));
7206 	      goto failure;
7207 	    }
7208 	  if (inst.reloc.type != BFD_RELOC_UNUSED
7209 	      || inst.reloc.exp.X_op != O_constant)
7210 	    {
7211 	      /* Make sure this has priority over
7212 		 "invalid addressing mode".  */
7213 	      set_fatal_syntax_error (_("constant offset required"));
7214 	      goto failure;
7215 	    }
7216 	  info->addr.offset.imm = inst.reloc.exp.X_add_number;
7217 	  break;
7218 
7219 	case AARCH64_OPND_SVE_ADDR_R:
7220 	  /* [<Xn|SP>{, <R><m>}]
7221 	     but recognizing SVE registers.  */
7222 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7223 					      &offset_qualifier));
7224 	  if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7225 	    {
7226 	      offset_qualifier = AARCH64_OPND_QLF_X;
7227 	      info->addr.offset.is_reg = 1;
7228 	      info->addr.offset.regno = 31;
7229 	    }
7230 	  else if (base_qualifier != AARCH64_OPND_QLF_X
7231 	      || offset_qualifier != AARCH64_OPND_QLF_X)
7232 	    {
7233 	      set_syntax_error (_("invalid addressing mode"));
7234 	      goto failure;
7235 	    }
7236 	  goto regoff_addr;
7237 
7238 	case AARCH64_OPND_SVE_ADDR_RR:
7239 	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7240 	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7241 	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7242 	case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7243 	case AARCH64_OPND_SVE_ADDR_RX:
7244 	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7245 	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7246 	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7247 	  /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7248 	     but recognizing SVE registers.  */
7249 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7250 					      &offset_qualifier));
7251 	  if (base_qualifier != AARCH64_OPND_QLF_X
7252 	      || offset_qualifier != AARCH64_OPND_QLF_X)
7253 	    {
7254 	      set_syntax_error (_("invalid addressing mode"));
7255 	      goto failure;
7256 	    }
7257 	  goto regoff_addr;
7258 
7259 	case AARCH64_OPND_SVE_ADDR_RZ:
7260 	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7261 	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7262 	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7263 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7264 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7265 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7266 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7267 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7268 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7269 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7270 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7271 	  /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7272 	     [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}]  */
7273 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7274 					      &offset_qualifier));
7275 	  if (base_qualifier != AARCH64_OPND_QLF_X
7276 	      || (offset_qualifier != AARCH64_OPND_QLF_S_S
7277 		  && offset_qualifier != AARCH64_OPND_QLF_S_D))
7278 	    {
7279 	      set_syntax_error (_("invalid addressing mode"));
7280 	      goto failure;
7281 	    }
7282 	  info->qualifier = offset_qualifier;
7283 	  goto regoff_addr;
7284 
7285 	case AARCH64_OPND_SVE_ADDR_ZX:
7286 	  /* [Zn.<T>{, <Xm>}].  */
7287 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7288 					      &offset_qualifier));
7289 	  /* Things to check:
7290 	      base_qualifier either S_S or S_D
7291 	      offset_qualifier must be X
7292 	      */
7293 	  if ((base_qualifier != AARCH64_OPND_QLF_S_S
7294 	       && base_qualifier != AARCH64_OPND_QLF_S_D)
7295 	      || offset_qualifier != AARCH64_OPND_QLF_X)
7296 	    {
7297 	      set_syntax_error (_("invalid addressing mode"));
7298 	      goto failure;
7299 	    }
7300 	  info->qualifier = base_qualifier;
7301 	  if (!info->addr.offset.is_reg || info->addr.pcrel
7302 	      || !info->addr.preind || info->addr.writeback
7303 	      || info->shifter.operator_present != 0)
7304 	    {
7305 	      set_syntax_error (_("invalid addressing mode"));
7306 	      goto failure;
7307 	    }
7308 	  info->shifter.kind = AARCH64_MOD_LSL;
7309 	  break;
7310 
7311 
7312 	case AARCH64_OPND_SVE_ADDR_ZI_U5:
7313 	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7314 	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7315 	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7316 	  /* [Z<n>.<T>{, #imm}]  */
7317 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7318 					      &offset_qualifier));
7319 	  if (base_qualifier != AARCH64_OPND_QLF_S_S
7320 	      && base_qualifier != AARCH64_OPND_QLF_S_D)
7321 	    {
7322 	      set_syntax_error (_("invalid addressing mode"));
7323 	      goto failure;
7324 	    }
7325 	  info->qualifier = base_qualifier;
7326 	  goto sve_regimm;
7327 
7328 	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7329 	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7330 	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7331 	  /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7332 	     [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7333 
7334 	     We don't reject:
7335 
7336 	     [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7337 
7338 	     here since we get better error messages by leaving it to
7339 	     the qualifier checking routines.  */
7340 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7341 					      &offset_qualifier));
7342 	  if ((base_qualifier != AARCH64_OPND_QLF_S_S
7343 	       && base_qualifier != AARCH64_OPND_QLF_S_D)
7344 	      || offset_qualifier != base_qualifier)
7345 	    {
7346 	      set_syntax_error (_("invalid addressing mode"));
7347 	      goto failure;
7348 	    }
7349 	  info->qualifier = base_qualifier;
7350 	  goto regoff_addr;
7351 
7352 	case AARCH64_OPND_SYSREG:
7353 	  {
7354 	    uint32_t sysreg_flags;
7355 	    if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7356 				      &sysreg_flags)) == PARSE_FAIL)
7357 	      {
7358 		set_syntax_error (_("unknown or missing system register name"));
7359 		goto failure;
7360 	      }
7361 	    inst.base.operands[i].sysreg.value = val;
7362 	    inst.base.operands[i].sysreg.flags = sysreg_flags;
7363 	    break;
7364 	  }
7365 
7366 	case AARCH64_OPND_PSTATEFIELD:
7367 	  {
7368 	    uint32_t sysreg_flags;
7369 	    if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7370 				      &sysreg_flags)) == PARSE_FAIL)
7371 	      {
7372 	        set_syntax_error (_("unknown or missing PSTATE field name"));
7373 	        goto failure;
7374 	      }
7375 	    inst.base.operands[i].pstatefield = val;
7376 	    inst.base.operands[i].sysreg.flags = sysreg_flags;
7377 	    break;
7378 	  }
7379 
7380 	case AARCH64_OPND_SYSREG_IC:
7381 	  inst.base.operands[i].sysins_op =
7382 	    parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7383 	  goto sys_reg_ins;
7384 
7385 	case AARCH64_OPND_SYSREG_DC:
7386 	  inst.base.operands[i].sysins_op =
7387 	    parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7388 	  goto sys_reg_ins;
7389 
7390 	case AARCH64_OPND_SYSREG_AT:
7391 	  inst.base.operands[i].sysins_op =
7392 	    parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7393 	  goto sys_reg_ins;
7394 
7395 	case AARCH64_OPND_SYSREG_SR:
7396 	  inst.base.operands[i].sysins_op =
7397 	    parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7398 	  goto sys_reg_ins;
7399 
7400 	case AARCH64_OPND_SYSREG_TLBI:
7401 	  inst.base.operands[i].sysins_op =
7402 	    parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7403 	sys_reg_ins:
7404 	  if (inst.base.operands[i].sysins_op == NULL)
7405 	    {
7406 	      set_fatal_syntax_error ( _("unknown or missing operation name"));
7407 	      goto failure;
7408 	    }
7409 	  break;
7410 
7411 	case AARCH64_OPND_BARRIER:
7412 	case AARCH64_OPND_BARRIER_ISB:
7413 	  val = parse_barrier (&str);
7414 	  if (val != PARSE_FAIL
7415 	      && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7416 	    {
7417 	      /* ISB only accepts options name 'sy'.  */
7418 	      set_syntax_error
7419 		(_("the specified option is not accepted in ISB"));
7420 	      /* Turn off backtrack as this optional operand is present.  */
7421 	      backtrack_pos = 0;
7422 	      goto failure;
7423 	    }
7424 	  if (val != PARSE_FAIL
7425 	      && operands[i] == AARCH64_OPND_BARRIER)
7426 	    {
7427 	      /* Regular barriers accept options CRm (C0-C15).
7428 	         DSB nXS barrier variant accepts values > 15.  */
7429 	      if (val < 0 || val > 15)
7430 	      {
7431 	        set_syntax_error (_("the specified option is not accepted in DSB"));
7432 	        goto failure;
7433 	      }
7434 	    }
7435 	  /* This is an extension to accept a 0..15 immediate.  */
7436 	  if (val == PARSE_FAIL)
7437 	    po_imm_or_fail (0, 15);
7438 	  info->barrier = aarch64_barrier_options + val;
7439 	  break;
7440 
7441 	case AARCH64_OPND_BARRIER_DSB_NXS:
7442 	  val = parse_barrier (&str);
7443 	  if (val != PARSE_FAIL)
7444 	    {
7445 	      /* DSB nXS barrier variant accept only <option>nXS qualifiers.  */
7446 	      if (!(val == 16 || val == 20 || val == 24 || val == 28))
7447 	        {
7448 	          set_syntax_error (_("the specified option is not accepted in DSB"));
7449 	          /* Turn off backtrack as this optional operand is present.  */
7450 	          backtrack_pos = 0;
7451 	          goto failure;
7452 	        }
7453 	    }
7454 	  else
7455 	    {
7456 	      /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7457 	         possible values 16, 20, 24 or 28 , encoded as val<3:2>.  */
7458 	      if (! parse_constant_immediate (&str, &val, imm_reg_type))
7459 	        goto failure;
7460 	      if (!(val == 16 || val == 20 || val == 24 || val == 28))
7461 	        {
7462 	          set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7463 	          goto failure;
7464 	        }
7465 	    }
7466 	  /* Option index is encoded as 2-bit value in val<3:2>.  */
7467 	  val = (val >> 2) - 4;
7468 	  info->barrier = aarch64_barrier_dsb_nxs_options + val;
7469 	  break;
7470 
7471 	case AARCH64_OPND_PRFOP:
7472 	  val = parse_pldop (&str);
7473 	  /* This is an extension to accept a 0..31 immediate.  */
7474 	  if (val == PARSE_FAIL)
7475 	    po_imm_or_fail (0, 31);
7476 	  inst.base.operands[i].prfop = aarch64_prfops + val;
7477 	  break;
7478 
7479 	case AARCH64_OPND_BARRIER_PSB:
7480 	  val = parse_barrier_psb (&str, &(info->hint_option));
7481 	  if (val == PARSE_FAIL)
7482 	    goto failure;
7483 	  break;
7484 
7485 	case AARCH64_OPND_BTI_TARGET:
7486 	  val = parse_bti_operand (&str, &(info->hint_option));
7487 	  if (val == PARSE_FAIL)
7488 	    goto failure;
7489 	  break;
7490 
7491 	case AARCH64_OPND_SME_ZAda_2b:
7492 	case AARCH64_OPND_SME_ZAda_3b:
7493 	  val = parse_sme_zada_operand (&str, &qualifier);
7494 	  if (val == PARSE_FAIL)
7495 	    goto failure;
7496 	  info->reg.regno = val;
7497 	  info->qualifier = qualifier;
7498 	  break;
7499 
7500 	case AARCH64_OPND_SME_ZA_HV_idx_src:
7501 	case AARCH64_OPND_SME_ZA_HV_idx_dest:
7502 	case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7503 	  {
7504 	    enum sme_hv_slice slice_indicator;
7505 	    int vector_select_register;
7506 	    int imm;
7507 
7508 	    if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7509 	      val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7510 	                                                       &slice_indicator,
7511 	                                                       &vector_select_register,
7512 	                                                       &imm,
7513 	                                                       &qualifier);
7514 	    else
7515 	      val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7516 	                                           &vector_select_register,
7517 	                                           &imm,
7518 	                                           &qualifier);
7519 	    if (val == PARSE_FAIL)
7520 	      goto failure;
7521 	    info->za_tile_vector.regno = val;
7522 	    info->za_tile_vector.index.regno = vector_select_register;
7523 	    info->za_tile_vector.index.imm = imm;
7524 	    info->za_tile_vector.v = slice_indicator;
7525 	    info->qualifier = qualifier;
7526 	    break;
7527 	  }
7528 
7529 	  case AARCH64_OPND_SME_list_of_64bit_tiles:
7530 	    val = parse_sme_list_of_64bit_tiles (&str);
7531 	    if (val == PARSE_FAIL)
7532 	      goto failure;
7533 	    info->imm.value = val;
7534 	    break;
7535 
7536 	  case AARCH64_OPND_SME_ZA_array:
7537 	    {
7538 	      int imm;
7539 	      val = parse_sme_za_array (&str, &imm);
7540 	      if (val == PARSE_FAIL)
7541 	        goto failure;
7542 	      info->za_tile_vector.index.regno = val;
7543 	      info->za_tile_vector.index.imm = imm;
7544 	      break;
7545 	    }
7546 
7547 	case AARCH64_OPND_MOPS_ADDR_Rd:
7548 	case AARCH64_OPND_MOPS_ADDR_Rs:
7549 	  po_char_or_fail ('[');
7550 	  if (!parse_x0_to_x30 (&str, info))
7551 	    goto failure;
7552 	  po_char_or_fail (']');
7553 	  po_char_or_fail ('!');
7554 	  break;
7555 
7556 	case AARCH64_OPND_MOPS_WB_Rn:
7557 	  if (!parse_x0_to_x30 (&str, info))
7558 	    goto failure;
7559 	  po_char_or_fail ('!');
7560 	  break;
7561 
7562 	default:
7563 	  as_fatal (_("unhandled operand code %d"), operands[i]);
7564 	}
7565 
7566       /* If we get here, this operand was successfully parsed.  */
7567       inst.base.operands[i].present = 1;
7568       continue;
7569 
7570     failure:
7571       /* The parse routine should already have set the error, but in case
7572 	 not, set a default one here.  */
7573       if (! error_p ())
7574 	set_default_error ();
7575 
7576       if (! backtrack_pos)
7577 	goto parse_operands_return;
7578 
7579       {
7580 	/* We reach here because this operand is marked as optional, and
7581 	   either no operand was supplied or the operand was supplied but it
7582 	   was syntactically incorrect.  In the latter case we report an
7583 	   error.  In the former case we perform a few more checks before
7584 	   dropping through to the code to insert the default operand.  */
7585 
7586 	char *tmp = backtrack_pos;
7587 	char endchar = END_OF_INSN;
7588 
7589 	if (i != (aarch64_num_of_operands (opcode) - 1))
7590 	  endchar = ',';
7591 	skip_past_char (&tmp, ',');
7592 
7593 	if (*tmp != endchar)
7594 	  /* The user has supplied an operand in the wrong format.  */
7595 	  goto parse_operands_return;
7596 
7597 	/* Make sure there is not a comma before the optional operand.
7598 	   For example the fifth operand of 'sys' is optional:
7599 
7600 	     sys #0,c0,c0,#0,  <--- wrong
7601 	     sys #0,c0,c0,#0   <--- correct.  */
7602 	if (comma_skipped_p && i && endchar == END_OF_INSN)
7603 	  {
7604 	    set_fatal_syntax_error
7605 	      (_("unexpected comma before the omitted optional operand"));
7606 	    goto parse_operands_return;
7607 	  }
7608       }
7609 
7610       /* Reaching here means we are dealing with an optional operand that is
7611 	 omitted from the assembly line.  */
7612       gas_assert (optional_operand_p (opcode, i));
7613       info->present = 0;
7614       process_omitted_operand (operands[i], opcode, i, info);
7615 
7616       /* Try again, skipping the optional operand at backtrack_pos.  */
7617       str = backtrack_pos;
7618       backtrack_pos = 0;
7619 
7620       /* Clear any error record after the omitted optional operand has been
7621 	 successfully handled.  */
7622       clear_error ();
7623     }
7624 
7625   /* Check if we have parsed all the operands.  */
7626   if (*str != '\0' && ! error_p ())
7627     {
7628       /* Set I to the index of the last present operand; this is
7629 	 for the purpose of diagnostics.  */
7630       for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7631 	;
7632       set_fatal_syntax_error
7633 	(_("unexpected characters following instruction"));
7634     }
7635 
7636  parse_operands_return:
7637 
7638   if (error_p ())
7639     {
7640       DEBUG_TRACE ("parsing FAIL: %s - %s",
7641 		   operand_mismatch_kind_names[get_error_kind ()],
7642 		   get_error_message ());
7643       /* Record the operand error properly; this is useful when there
7644 	 are multiple instruction templates for a mnemonic name, so that
7645 	 later on, we can select the error that most closely describes
7646 	 the problem.  */
7647       record_operand_error (opcode, i, get_error_kind (),
7648 			    get_error_message ());
7649       return false;
7650     }
7651   else
7652     {
7653       DEBUG_TRACE ("parsing SUCCESS");
7654       return true;
7655     }
7656 }
7657 
7658 /* It does some fix-up to provide some programmer friendly feature while
7659    keeping the libopcodes happy, i.e. libopcodes only accepts
7660    the preferred architectural syntax.
7661    Return FALSE if there is any failure; otherwise return TRUE.  */
7662 
7663 static bool
programmer_friendly_fixup(aarch64_instruction * instr)7664 programmer_friendly_fixup (aarch64_instruction *instr)
7665 {
7666   aarch64_inst *base = &instr->base;
7667   const aarch64_opcode *opcode = base->opcode;
7668   enum aarch64_op op = opcode->op;
7669   aarch64_opnd_info *operands = base->operands;
7670 
7671   DEBUG_TRACE ("enter");
7672 
7673   switch (opcode->iclass)
7674     {
7675     case testbranch:
7676       /* TBNZ Xn|Wn, #uimm6, label
7677 	 Test and Branch Not Zero: conditionally jumps to label if bit number
7678 	 uimm6 in register Xn is not zero.  The bit number implies the width of
7679 	 the register, which may be written and should be disassembled as Wn if
7680 	 uimm is less than 32.  */
7681       if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7682 	{
7683 	  if (operands[1].imm.value >= 32)
7684 	    {
7685 	      record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7686 						 0, 31);
7687 	      return false;
7688 	    }
7689 	  operands[0].qualifier = AARCH64_OPND_QLF_X;
7690 	}
7691       break;
7692     case loadlit:
7693       /* LDR Wt, label | =value
7694 	 As a convenience assemblers will typically permit the notation
7695 	 "=value" in conjunction with the pc-relative literal load instructions
7696 	 to automatically place an immediate value or symbolic address in a
7697 	 nearby literal pool and generate a hidden label which references it.
7698 	 ISREG has been set to 0 in the case of =value.  */
7699       if (instr->gen_lit_pool
7700 	  && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7701 	{
7702 	  int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7703 	  if (op == OP_LDRSW_LIT)
7704 	    size = 4;
7705 	  if (instr->reloc.exp.X_op != O_constant
7706 	      && instr->reloc.exp.X_op != O_big
7707 	      && instr->reloc.exp.X_op != O_symbol)
7708 	    {
7709 	      record_operand_error (opcode, 1,
7710 				    AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7711 				    _("constant expression expected"));
7712 	      return false;
7713 	    }
7714 	  if (! add_to_lit_pool (&instr->reloc.exp, size))
7715 	    {
7716 	      record_operand_error (opcode, 1,
7717 				    AARCH64_OPDE_OTHER_ERROR,
7718 				    _("literal pool insertion failed"));
7719 	      return false;
7720 	    }
7721 	}
7722       break;
7723     case log_shift:
7724     case bitfield:
7725       /* UXT[BHW] Wd, Wn
7726 	 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7727 	 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7728 	 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7729 	 A programmer-friendly assembler should accept a destination Xd in
7730 	 place of Wd, however that is not the preferred form for disassembly.
7731 	 */
7732       if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7733 	  && operands[1].qualifier == AARCH64_OPND_QLF_W
7734 	  && operands[0].qualifier == AARCH64_OPND_QLF_X)
7735 	operands[0].qualifier = AARCH64_OPND_QLF_W;
7736       break;
7737 
7738     case addsub_ext:
7739 	{
7740 	  /* In the 64-bit form, the final register operand is written as Wm
7741 	     for all but the (possibly omitted) UXTX/LSL and SXTX
7742 	     operators.
7743 	     As a programmer-friendly assembler, we accept e.g.
7744 	     ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7745 	     ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}.  */
7746 	  int idx = aarch64_operand_index (opcode->operands,
7747 					   AARCH64_OPND_Rm_EXT);
7748 	  gas_assert (idx == 1 || idx == 2);
7749 	  if (operands[0].qualifier == AARCH64_OPND_QLF_X
7750 	      && operands[idx].qualifier == AARCH64_OPND_QLF_X
7751 	      && operands[idx].shifter.kind != AARCH64_MOD_LSL
7752 	      && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7753 	      && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7754 	    operands[idx].qualifier = AARCH64_OPND_QLF_W;
7755 	}
7756       break;
7757 
7758     default:
7759       break;
7760     }
7761 
7762   DEBUG_TRACE ("exit with SUCCESS");
7763   return true;
7764 }
7765 
7766 /* Check for loads and stores that will cause unpredictable behavior.  */
7767 
7768 static void
warn_unpredictable_ldst(aarch64_instruction * instr,char * str)7769 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7770 {
7771   aarch64_inst *base = &instr->base;
7772   const aarch64_opcode *opcode = base->opcode;
7773   const aarch64_opnd_info *opnds = base->operands;
7774   switch (opcode->iclass)
7775     {
7776     case ldst_pos:
7777     case ldst_imm9:
7778     case ldst_imm10:
7779     case ldst_unscaled:
7780     case ldst_unpriv:
7781       /* Loading/storing the base register is unpredictable if writeback.  */
7782       if ((aarch64_get_operand_class (opnds[0].type)
7783 	   == AARCH64_OPND_CLASS_INT_REG)
7784 	  && opnds[0].reg.regno == opnds[1].addr.base_regno
7785 	  && opnds[1].addr.base_regno != REG_SP
7786 	  /* Exempt STG/STZG/ST2G/STZ2G.  */
7787 	  && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7788 	  && opnds[1].addr.writeback)
7789 	as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7790       break;
7791 
7792     case ldstpair_off:
7793     case ldstnapair_offs:
7794     case ldstpair_indexed:
7795       /* Loading/storing the base register is unpredictable if writeback.  */
7796       if ((aarch64_get_operand_class (opnds[0].type)
7797 	   == AARCH64_OPND_CLASS_INT_REG)
7798 	  && (opnds[0].reg.regno == opnds[2].addr.base_regno
7799 	    || opnds[1].reg.regno == opnds[2].addr.base_regno)
7800 	  && opnds[2].addr.base_regno != REG_SP
7801 	  /* Exempt STGP.  */
7802 	  && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7803 	  && opnds[2].addr.writeback)
7804 	    as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7805       /* Load operations must load different registers.  */
7806       if ((opcode->opcode & (1 << 22))
7807 	  && opnds[0].reg.regno == opnds[1].reg.regno)
7808 	    as_warn (_("unpredictable load of register pair -- `%s'"), str);
7809       break;
7810 
7811     case ldstexcl:
7812       if ((aarch64_get_operand_class (opnds[0].type)
7813 	   == AARCH64_OPND_CLASS_INT_REG)
7814 	  && (aarch64_get_operand_class (opnds[1].type)
7815 	      == AARCH64_OPND_CLASS_INT_REG))
7816 	{
7817           if ((opcode->opcode & (1 << 22)))
7818 	    {
7819 	      /* It is unpredictable if load-exclusive pair with Rt == Rt2.  */
7820 	      if ((opcode->opcode & (1 << 21))
7821 		  && opnds[0].reg.regno == opnds[1].reg.regno)
7822 		as_warn (_("unpredictable load of register pair -- `%s'"), str);
7823 	    }
7824 	  else
7825 	    {
7826 	      /*  Store-Exclusive is unpredictable if Rt == Rs.  */
7827 	      if (opnds[0].reg.regno == opnds[1].reg.regno)
7828 		as_warn
7829 		  (_("unpredictable: identical transfer and status registers"
7830 		     " --`%s'"),str);
7831 
7832 	      if (opnds[0].reg.regno == opnds[2].reg.regno)
7833 		{
7834 		  if (!(opcode->opcode & (1 << 21)))
7835 	            /*  Store-Exclusive is unpredictable if Rn == Rs.  */
7836 		    as_warn
7837 		      (_("unpredictable: identical base and status registers"
7838 			 " --`%s'"),str);
7839 		  else
7840 	            /*  Store-Exclusive pair is unpredictable if Rt2 == Rs.  */
7841 		    as_warn
7842 		      (_("unpredictable: "
7843 			 "identical transfer and status registers"
7844 			 " --`%s'"),str);
7845 		}
7846 
7847 	      /* Store-Exclusive pair is unpredictable if Rn == Rs.  */
7848 	      if ((opcode->opcode & (1 << 21))
7849 		  && opnds[0].reg.regno == opnds[3].reg.regno
7850 		  && opnds[3].reg.regno != REG_SP)
7851 		as_warn (_("unpredictable: identical base and status registers"
7852 			   " --`%s'"),str);
7853 	    }
7854 	}
7855       break;
7856 
7857     default:
7858       break;
7859     }
7860 }
7861 
7862 static void
force_automatic_sequence_close(void)7863 force_automatic_sequence_close (void)
7864 {
7865   struct aarch64_segment_info_type *tc_seg_info;
7866 
7867   tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7868   if (tc_seg_info->insn_sequence.instr)
7869     {
7870       as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7871 		     _("previous `%s' sequence has not been closed"),
7872 		     tc_seg_info->insn_sequence.instr->opcode->name);
7873       init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7874     }
7875 }
7876 
7877 /* A wrapper function to interface with libopcodes on encoding and
7878    record the error message if there is any.
7879 
7880    Return TRUE on success; otherwise return FALSE.  */
7881 
7882 static bool
do_encode(const aarch64_opcode * opcode,aarch64_inst * instr,aarch64_insn * code)7883 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7884 	   aarch64_insn *code)
7885 {
7886   aarch64_operand_error error_info;
7887   memset (&error_info, '\0', sizeof (error_info));
7888   error_info.kind = AARCH64_OPDE_NIL;
7889   if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7890       && !error_info.non_fatal)
7891     return true;
7892 
7893   gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7894   record_operand_error_info (opcode, &error_info);
7895   return error_info.non_fatal;
7896 }
7897 
7898 #ifdef DEBUG_AARCH64
7899 static inline void
dump_opcode_operands(const aarch64_opcode * opcode)7900 dump_opcode_operands (const aarch64_opcode *opcode)
7901 {
7902   int i = 0;
7903   while (opcode->operands[i] != AARCH64_OPND_NIL)
7904     {
7905       aarch64_verbose ("\t\t opnd%d: %s", i,
7906 		       aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7907 		       ? aarch64_get_operand_name (opcode->operands[i])
7908 		       : aarch64_get_operand_desc (opcode->operands[i]));
7909       ++i;
7910     }
7911 }
7912 #endif /* DEBUG_AARCH64 */
7913 
7914 /* This is the guts of the machine-dependent assembler.  STR points to a
7915    machine dependent instruction.  This function is supposed to emit
7916    the frags/bytes it assembles to.  */
7917 
7918 void
md_assemble(char * str)7919 md_assemble (char *str)
7920 {
7921   templates *template;
7922   const aarch64_opcode *opcode;
7923   struct aarch64_segment_info_type *tc_seg_info;
7924   aarch64_inst *inst_base;
7925   unsigned saved_cond;
7926 
7927   /* Align the previous label if needed.  */
7928   if (last_label_seen != NULL)
7929     {
7930       symbol_set_frag (last_label_seen, frag_now);
7931       S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7932       S_SET_SEGMENT (last_label_seen, now_seg);
7933     }
7934 
7935   /* Update the current insn_sequence from the segment.  */
7936   tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7937   insn_sequence = &tc_seg_info->insn_sequence;
7938   tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7939 
7940   inst.reloc.type = BFD_RELOC_UNUSED;
7941 
7942   DEBUG_TRACE ("\n\n");
7943   DEBUG_TRACE ("==============================");
7944   DEBUG_TRACE ("Enter md_assemble with %s", str);
7945 
7946   /* Scan up to the end of the mnemonic, which must end in whitespace,
7947      '.', or end of string.  */
7948   char *p = str;
7949   char *dot = 0;
7950   for (; is_part_of_name (*p); p++)
7951     if (*p == '.' && !dot)
7952       dot = p;
7953 
7954   if (p == str)
7955     {
7956       as_bad (_("unknown mnemonic -- `%s'"), str);
7957       return;
7958     }
7959 
7960   if (!dot && create_register_alias (str, p))
7961     return;
7962 
7963   template = opcode_lookup (str, dot, p);
7964   if (!template)
7965     {
7966       as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7967 	      str);
7968       return;
7969     }
7970 
7971   skip_whitespace (p);
7972   if (*p == ',')
7973     {
7974       as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7975 	      get_mnemonic_name (str), str);
7976       return;
7977     }
7978 
7979   init_operand_error_report ();
7980 
7981   /* Sections are assumed to start aligned. In executable section, there is no
7982      MAP_DATA symbol pending. So we only align the address during
7983      MAP_DATA --> MAP_INSN transition.
7984      For other sections, this is not guaranteed.  */
7985   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7986   if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7987     frag_align_code (2, 0);
7988 
7989   saved_cond = inst.cond;
7990   reset_aarch64_instruction (&inst);
7991   inst.cond = saved_cond;
7992 
7993   /* Iterate through all opcode entries with the same mnemonic name.  */
7994   do
7995     {
7996       opcode = template->opcode;
7997 
7998       DEBUG_TRACE ("opcode %s found", opcode->name);
7999 #ifdef DEBUG_AARCH64
8000       if (debug_dump)
8001 	dump_opcode_operands (opcode);
8002 #endif /* DEBUG_AARCH64 */
8003 
8004       mapping_state (MAP_INSN);
8005 
8006       inst_base = &inst.base;
8007       inst_base->opcode = opcode;
8008 
8009       /* Truly conditionally executed instructions, e.g. b.cond.  */
8010       if (opcode->flags & F_COND)
8011 	{
8012 	  gas_assert (inst.cond != COND_ALWAYS);
8013 	  inst_base->cond = get_cond_from_value (inst.cond);
8014 	  DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8015 	}
8016       else if (inst.cond != COND_ALWAYS)
8017 	{
8018 	  /* It shouldn't arrive here, where the assembly looks like a
8019 	     conditional instruction but the found opcode is unconditional.  */
8020 	  gas_assert (0);
8021 	  continue;
8022 	}
8023 
8024       if (parse_operands (p, opcode)
8025 	  && programmer_friendly_fixup (&inst)
8026 	  && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8027 	{
8028 	  /* Check that this instruction is supported for this CPU.  */
8029 	  if (!opcode->avariant
8030 	      || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8031 	    {
8032 	      as_bad (_("selected processor does not support `%s'"), str);
8033 	      return;
8034 	    }
8035 
8036 	  warn_unpredictable_ldst (&inst, str);
8037 
8038 	  if (inst.reloc.type == BFD_RELOC_UNUSED
8039 	      || !inst.reloc.need_libopcodes_p)
8040 	    output_inst (NULL);
8041 	  else
8042 	    {
8043 	      /* If there is relocation generated for the instruction,
8044 	         store the instruction information for the future fix-up.  */
8045 	      struct aarch64_inst *copy;
8046 	      gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8047 	      copy = XNEW (struct aarch64_inst);
8048 	      memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8049 	      output_inst (copy);
8050 	    }
8051 
8052 	  /* Issue non-fatal messages if any.  */
8053 	  output_operand_error_report (str, true);
8054 	  return;
8055 	}
8056 
8057       template = template->next;
8058       if (template != NULL)
8059 	{
8060 	  reset_aarch64_instruction (&inst);
8061 	  inst.cond = saved_cond;
8062 	}
8063     }
8064   while (template != NULL);
8065 
8066   /* Issue the error messages if any.  */
8067   output_operand_error_report (str, false);
8068 }
8069 
8070 /* Various frobbings of labels and their addresses.  */
8071 
8072 void
aarch64_start_line_hook(void)8073 aarch64_start_line_hook (void)
8074 {
8075   last_label_seen = NULL;
8076 }
8077 
8078 void
aarch64_frob_label(symbolS * sym)8079 aarch64_frob_label (symbolS * sym)
8080 {
8081   last_label_seen = sym;
8082 
8083   dwarf2_emit_label (sym);
8084 }
8085 
8086 void
aarch64_frob_section(asection * sec ATTRIBUTE_UNUSED)8087 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8088 {
8089   /* Check to see if we have a block to close.  */
8090   force_automatic_sequence_close ();
8091 }
8092 
8093 int
aarch64_data_in_code(void)8094 aarch64_data_in_code (void)
8095 {
8096   if (startswith (input_line_pointer + 1, "data:"))
8097     {
8098       *input_line_pointer = '/';
8099       input_line_pointer += 5;
8100       *input_line_pointer = 0;
8101       return 1;
8102     }
8103 
8104   return 0;
8105 }
8106 
8107 char *
aarch64_canonicalize_symbol_name(char * name)8108 aarch64_canonicalize_symbol_name (char *name)
8109 {
8110   int len;
8111 
8112   if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8113     *(name + len - 5) = 0;
8114 
8115   return name;
8116 }
8117 
8118 /* Table of all register names defined by default.  The user can
8119    define additional names with .req.  Note that all register names
8120    should appear in both upper and lowercase variants.	Some registers
8121    also have mixed-case names.	*/
8122 
8123 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8124 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8125 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8126 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8127 #define REGSET16(p,t) \
8128   REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8129   REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8130   REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8131   REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8132 #define REGSET16S(p,s,t) \
8133   REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8134   REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8135   REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8136   REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8137 #define REGSET31(p,t) \
8138   REGSET16(p, t), \
8139   REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8140   REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8141   REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8142   REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8143 #define REGSET(p,t) \
8144   REGSET31(p,t), REGNUM(p,31,t)
8145 
8146 /* These go into aarch64_reg_hsh hash-table.  */
8147 static const reg_entry reg_names[] = {
8148   /* Integer registers.  */
8149   REGSET31 (x, R_64), REGSET31 (X, R_64),
8150   REGSET31 (w, R_32), REGSET31 (W, R_32),
8151 
8152   REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8153   REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8154   REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8155   REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8156   REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8157   REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8158 
8159   REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8160   REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8161 
8162   /* Floating-point single precision registers.  */
8163   REGSET (s, FP_S), REGSET (S, FP_S),
8164 
8165   /* Floating-point double precision registers.  */
8166   REGSET (d, FP_D), REGSET (D, FP_D),
8167 
8168   /* Floating-point half precision registers.  */
8169   REGSET (h, FP_H), REGSET (H, FP_H),
8170 
8171   /* Floating-point byte precision registers.  */
8172   REGSET (b, FP_B), REGSET (B, FP_B),
8173 
8174   /* Floating-point quad precision registers.  */
8175   REGSET (q, FP_Q), REGSET (Q, FP_Q),
8176 
8177   /* FP/SIMD registers.  */
8178   REGSET (v, VN), REGSET (V, VN),
8179 
8180   /* SVE vector registers.  */
8181   REGSET (z, ZN), REGSET (Z, ZN),
8182 
8183   /* SVE predicate registers.  */
8184   REGSET16 (p, PN), REGSET16 (P, PN),
8185 
8186   /* SME ZA tile registers.  */
8187   REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8188 
8189   /* SME ZA tile registers (horizontal slice).  */
8190   REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8191 
8192   /* SME ZA tile registers (vertical slice).  */
8193   REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8194 };
8195 
8196 #undef REGDEF
8197 #undef REGDEF_ALIAS
8198 #undef REGNUM
8199 #undef REGSET16
8200 #undef REGSET31
8201 #undef REGSET
8202 
8203 #define N 1
8204 #define n 0
8205 #define Z 1
8206 #define z 0
8207 #define C 1
8208 #define c 0
8209 #define V 1
8210 #define v 0
8211 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8212 static const asm_nzcv nzcv_names[] = {
8213   {"nzcv", B (n, z, c, v)},
8214   {"nzcV", B (n, z, c, V)},
8215   {"nzCv", B (n, z, C, v)},
8216   {"nzCV", B (n, z, C, V)},
8217   {"nZcv", B (n, Z, c, v)},
8218   {"nZcV", B (n, Z, c, V)},
8219   {"nZCv", B (n, Z, C, v)},
8220   {"nZCV", B (n, Z, C, V)},
8221   {"Nzcv", B (N, z, c, v)},
8222   {"NzcV", B (N, z, c, V)},
8223   {"NzCv", B (N, z, C, v)},
8224   {"NzCV", B (N, z, C, V)},
8225   {"NZcv", B (N, Z, c, v)},
8226   {"NZcV", B (N, Z, c, V)},
8227   {"NZCv", B (N, Z, C, v)},
8228   {"NZCV", B (N, Z, C, V)}
8229 };
8230 
8231 #undef N
8232 #undef n
8233 #undef Z
8234 #undef z
8235 #undef C
8236 #undef c
8237 #undef V
8238 #undef v
8239 #undef B
8240 
8241 /* MD interface: bits in the object file.  */
8242 
8243 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8244    for use in the a.out file, and stores them in the array pointed to by buf.
8245    This knows about the endian-ness of the target machine and does
8246    THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
8247    2 (short) and 4 (long)  Floating numbers are put out as a series of
8248    LITTLENUMS (shorts, here at least).	*/
8249 
8250 void
md_number_to_chars(char * buf,valueT val,int n)8251 md_number_to_chars (char *buf, valueT val, int n)
8252 {
8253   if (target_big_endian)
8254     number_to_chars_bigendian (buf, val, n);
8255   else
8256     number_to_chars_littleendian (buf, val, n);
8257 }
8258 
8259 /* MD interface: Sections.  */
8260 
8261 /* Estimate the size of a frag before relaxing.  Assume everything fits in
8262    4 bytes.  */
8263 
8264 int
md_estimate_size_before_relax(fragS * fragp,segT segtype ATTRIBUTE_UNUSED)8265 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8266 {
8267   fragp->fr_var = 4;
8268   return 4;
8269 }
8270 
8271 /* Round up a section size to the appropriate boundary.	 */
8272 
8273 valueT
md_section_align(segT segment ATTRIBUTE_UNUSED,valueT size)8274 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8275 {
8276   return size;
8277 }
8278 
8279 /* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
8280    of an rs_align_code fragment.
8281 
8282    Here we fill the frag with the appropriate info for padding the
8283    output stream.  The resulting frag will consist of a fixed (fr_fix)
8284    and of a repeating (fr_var) part.
8285 
8286    The fixed content is always emitted before the repeating content and
8287    these two parts are used as follows in constructing the output:
8288    - the fixed part will be used to align to a valid instruction word
8289      boundary, in case that we start at a misaligned address; as no
8290      executable instruction can live at the misaligned location, we
8291      simply fill with zeros;
8292    - the variable part will be used to cover the remaining padding and
8293      we fill using the AArch64 NOP instruction.
8294 
8295    Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8296    enough storage space for up to 3 bytes for padding the back to a valid
8297    instruction alignment and exactly 4 bytes to store the NOP pattern.  */
8298 
8299 void
aarch64_handle_align(fragS * fragP)8300 aarch64_handle_align (fragS * fragP)
8301 {
8302   /* NOP = d503201f */
8303   /* AArch64 instructions are always little-endian.  */
8304   static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8305 
8306   int bytes, fix, noop_size;
8307   char *p;
8308 
8309   if (fragP->fr_type != rs_align_code)
8310     return;
8311 
8312   bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8313   p = fragP->fr_literal + fragP->fr_fix;
8314 
8315 #ifdef OBJ_ELF
8316   gas_assert (fragP->tc_frag_data.recorded);
8317 #endif
8318 
8319   noop_size = sizeof (aarch64_noop);
8320 
8321   fix = bytes & (noop_size - 1);
8322   if (fix)
8323     {
8324 #ifdef OBJ_ELF
8325       insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8326 #endif
8327       memset (p, 0, fix);
8328       p += fix;
8329       fragP->fr_fix += fix;
8330     }
8331 
8332   if (noop_size)
8333     memcpy (p, aarch64_noop, noop_size);
8334   fragP->fr_var = noop_size;
8335 }
8336 
8337 /* Perform target specific initialisation of a frag.
8338    Note - despite the name this initialisation is not done when the frag
8339    is created, but only when its type is assigned.  A frag can be created
8340    and used a long time before its type is set, so beware of assuming that
8341    this initialisation is performed first.  */
8342 
8343 #ifndef OBJ_ELF
8344 void
aarch64_init_frag(fragS * fragP ATTRIBUTE_UNUSED,int max_chars ATTRIBUTE_UNUSED)8345 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8346 		   int max_chars ATTRIBUTE_UNUSED)
8347 {
8348 }
8349 
8350 #else /* OBJ_ELF is defined.  */
8351 void
aarch64_init_frag(fragS * fragP,int max_chars)8352 aarch64_init_frag (fragS * fragP, int max_chars)
8353 {
8354   /* Record a mapping symbol for alignment frags.  We will delete this
8355      later if the alignment ends up empty.  */
8356   if (!fragP->tc_frag_data.recorded)
8357     fragP->tc_frag_data.recorded = 1;
8358 
8359   /* PR 21809: Do not set a mapping state for debug sections
8360      - it just confuses other tools.  */
8361   if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8362     return;
8363 
8364   switch (fragP->fr_type)
8365     {
8366     case rs_align_test:
8367     case rs_fill:
8368       mapping_state_2 (MAP_DATA, max_chars);
8369       break;
8370     case rs_align:
8371       /* PR 20364: We can get alignment frags in code sections,
8372 	 so do not just assume that we should use the MAP_DATA state.  */
8373       mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8374       break;
8375     case rs_align_code:
8376       mapping_state_2 (MAP_INSN, max_chars);
8377       break;
8378     default:
8379       break;
8380     }
8381 }
8382 
8383 /* Initialize the DWARF-2 unwind information for this procedure.  */
8384 
8385 void
tc_aarch64_frame_initial_instructions(void)8386 tc_aarch64_frame_initial_instructions (void)
8387 {
8388   cfi_add_CFA_def_cfa (REG_SP, 0);
8389 }
8390 #endif /* OBJ_ELF */
8391 
8392 /* Convert REGNAME to a DWARF-2 register number.  */
8393 
8394 int
tc_aarch64_regname_to_dw2regnum(char * regname)8395 tc_aarch64_regname_to_dw2regnum (char *regname)
8396 {
8397   const reg_entry *reg = parse_reg (&regname);
8398   if (reg == NULL)
8399     return -1;
8400 
8401   switch (reg->type)
8402     {
8403     case REG_TYPE_SP_32:
8404     case REG_TYPE_SP_64:
8405     case REG_TYPE_R_32:
8406     case REG_TYPE_R_64:
8407       return reg->number;
8408 
8409     case REG_TYPE_FP_B:
8410     case REG_TYPE_FP_H:
8411     case REG_TYPE_FP_S:
8412     case REG_TYPE_FP_D:
8413     case REG_TYPE_FP_Q:
8414       return reg->number + 64;
8415 
8416     default:
8417       break;
8418     }
8419   return -1;
8420 }
8421 
8422 /* Implement DWARF2_ADDR_SIZE.  */
8423 
8424 int
aarch64_dwarf2_addr_size(void)8425 aarch64_dwarf2_addr_size (void)
8426 {
8427 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8428   if (ilp32_p)
8429     return 4;
8430 #endif
8431   return bfd_arch_bits_per_address (stdoutput) / 8;
8432 }
8433 
8434 /* MD interface: Symbol and relocation handling.  */
8435 
8436 /* Return the address within the segment that a PC-relative fixup is
8437    relative to.  For AArch64 PC-relative fixups applied to instructions
8438    are generally relative to the location plus AARCH64_PCREL_OFFSET bytes.  */
8439 
8440 long
md_pcrel_from_section(fixS * fixP,segT seg)8441 md_pcrel_from_section (fixS * fixP, segT seg)
8442 {
8443   offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8444 
8445   /* If this is pc-relative and we are going to emit a relocation
8446      then we just want to put out any pipeline compensation that the linker
8447      will need.  Otherwise we want to use the calculated base.  */
8448   if (fixP->fx_pcrel
8449       && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8450 	  || aarch64_force_relocation (fixP)))
8451     base = 0;
8452 
8453   /* AArch64 should be consistent for all pc-relative relocations.  */
8454   return base + AARCH64_PCREL_OFFSET;
8455 }
8456 
8457 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8458    Otherwise we have no need to default values of symbols.  */
8459 
8460 symbolS *
md_undefined_symbol(char * name ATTRIBUTE_UNUSED)8461 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8462 {
8463 #ifdef OBJ_ELF
8464   if (name[0] == '_' && name[1] == 'G'
8465       && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8466     {
8467       if (!GOT_symbol)
8468 	{
8469 	  if (symbol_find (name))
8470 	    as_bad (_("GOT already in the symbol table"));
8471 
8472 	  GOT_symbol = symbol_new (name, undefined_section,
8473 				   &zero_address_frag, 0);
8474 	}
8475 
8476       return GOT_symbol;
8477     }
8478 #endif
8479 
8480   return 0;
8481 }
8482 
8483 /* Return non-zero if the indicated VALUE has overflowed the maximum
8484    range expressible by a unsigned number with the indicated number of
8485    BITS.  */
8486 
8487 static bool
unsigned_overflow(valueT value,unsigned bits)8488 unsigned_overflow (valueT value, unsigned bits)
8489 {
8490   valueT lim;
8491   if (bits >= sizeof (valueT) * 8)
8492     return false;
8493   lim = (valueT) 1 << bits;
8494   return (value >= lim);
8495 }
8496 
8497 
8498 /* Return non-zero if the indicated VALUE has overflowed the maximum
8499    range expressible by an signed number with the indicated number of
8500    BITS.  */
8501 
8502 static bool
signed_overflow(offsetT value,unsigned bits)8503 signed_overflow (offsetT value, unsigned bits)
8504 {
8505   offsetT lim;
8506   if (bits >= sizeof (offsetT) * 8)
8507     return false;
8508   lim = (offsetT) 1 << (bits - 1);
8509   return (value < -lim || value >= lim);
8510 }
8511 
8512 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8513    unsigned immediate offset load/store instruction, try to encode it as
8514    an unscaled, 9-bit, signed immediate offset load/store instruction.
8515    Return TRUE if it is successful; otherwise return FALSE.
8516 
8517    As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8518    in response to the standard LDR/STR mnemonics when the immediate offset is
8519    unambiguous, i.e. when it is negative or unaligned.  */
8520 
8521 static bool
try_to_encode_as_unscaled_ldst(aarch64_inst * instr)8522 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8523 {
8524   int idx;
8525   enum aarch64_op new_op;
8526   const aarch64_opcode *new_opcode;
8527 
8528   gas_assert (instr->opcode->iclass == ldst_pos);
8529 
8530   switch (instr->opcode->op)
8531     {
8532     case OP_LDRB_POS:new_op = OP_LDURB; break;
8533     case OP_STRB_POS: new_op = OP_STURB; break;
8534     case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8535     case OP_LDRH_POS: new_op = OP_LDURH; break;
8536     case OP_STRH_POS: new_op = OP_STURH; break;
8537     case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8538     case OP_LDR_POS: new_op = OP_LDUR; break;
8539     case OP_STR_POS: new_op = OP_STUR; break;
8540     case OP_LDRF_POS: new_op = OP_LDURV; break;
8541     case OP_STRF_POS: new_op = OP_STURV; break;
8542     case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8543     case OP_PRFM_POS: new_op = OP_PRFUM; break;
8544     default: new_op = OP_NIL; break;
8545     }
8546 
8547   if (new_op == OP_NIL)
8548     return false;
8549 
8550   new_opcode = aarch64_get_opcode (new_op);
8551   gas_assert (new_opcode != NULL);
8552 
8553   DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8554 	       instr->opcode->op, new_opcode->op);
8555 
8556   aarch64_replace_opcode (instr, new_opcode);
8557 
8558   /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8559      qualifier matching may fail because the out-of-date qualifier will
8560      prevent the operand being updated with a new and correct qualifier.  */
8561   idx = aarch64_operand_index (instr->opcode->operands,
8562 			       AARCH64_OPND_ADDR_SIMM9);
8563   gas_assert (idx == 1);
8564   instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8565 
8566   DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8567 
8568   if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8569 			      insn_sequence))
8570     return false;
8571 
8572   return true;
8573 }
8574 
8575 /* Called by fix_insn to fix a MOV immediate alias instruction.
8576 
8577    Operand for a generic move immediate instruction, which is an alias
8578    instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8579    a 32-bit/64-bit immediate value into general register.  An assembler error
8580    shall result if the immediate cannot be created by a single one of these
8581    instructions. If there is a choice, then to ensure reversability an
8582    assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR.  */
8583 
8584 static void
fix_mov_imm_insn(fixS * fixP,char * buf,aarch64_inst * instr,offsetT value)8585 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8586 {
8587   const aarch64_opcode *opcode;
8588 
8589   /* Need to check if the destination is SP/ZR.  The check has to be done
8590      before any aarch64_replace_opcode.  */
8591   int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8592   int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8593 
8594   instr->operands[1].imm.value = value;
8595   instr->operands[1].skip = 0;
8596 
8597   if (try_mov_wide_p)
8598     {
8599       /* Try the MOVZ alias.  */
8600       opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8601       aarch64_replace_opcode (instr, opcode);
8602       if (aarch64_opcode_encode (instr->opcode, instr,
8603 				 &instr->value, NULL, NULL, insn_sequence))
8604 	{
8605 	  put_aarch64_insn (buf, instr->value);
8606 	  return;
8607 	}
8608       /* Try the MOVK alias.  */
8609       opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8610       aarch64_replace_opcode (instr, opcode);
8611       if (aarch64_opcode_encode (instr->opcode, instr,
8612 				 &instr->value, NULL, NULL, insn_sequence))
8613 	{
8614 	  put_aarch64_insn (buf, instr->value);
8615 	  return;
8616 	}
8617     }
8618 
8619   if (try_mov_bitmask_p)
8620     {
8621       /* Try the ORR alias.  */
8622       opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8623       aarch64_replace_opcode (instr, opcode);
8624       if (aarch64_opcode_encode (instr->opcode, instr,
8625 				 &instr->value, NULL, NULL, insn_sequence))
8626 	{
8627 	  put_aarch64_insn (buf, instr->value);
8628 	  return;
8629 	}
8630     }
8631 
8632   as_bad_where (fixP->fx_file, fixP->fx_line,
8633 		_("immediate cannot be moved by a single instruction"));
8634 }
8635 
8636 /* An instruction operand which is immediate related may have symbol used
8637    in the assembly, e.g.
8638 
8639      mov     w0, u32
8640      .set    u32,    0x00ffff00
8641 
8642    At the time when the assembly instruction is parsed, a referenced symbol,
8643    like 'u32' in the above example may not have been seen; a fixS is created
8644    in such a case and is handled here after symbols have been resolved.
8645    Instruction is fixed up with VALUE using the information in *FIXP plus
8646    extra information in FLAGS.
8647 
8648    This function is called by md_apply_fix to fix up instructions that need
8649    a fix-up described above but does not involve any linker-time relocation.  */
8650 
8651 static void
fix_insn(fixS * fixP,uint32_t flags,offsetT value)8652 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8653 {
8654   int idx;
8655   uint32_t insn;
8656   char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8657   enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8658   aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8659 
8660   if (new_inst)
8661     {
8662       /* Now the instruction is about to be fixed-up, so the operand that
8663 	 was previously marked as 'ignored' needs to be unmarked in order
8664 	 to get the encoding done properly.  */
8665       idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8666       new_inst->operands[idx].skip = 0;
8667     }
8668 
8669   gas_assert (opnd != AARCH64_OPND_NIL);
8670 
8671   switch (opnd)
8672     {
8673     case AARCH64_OPND_EXCEPTION:
8674     case AARCH64_OPND_UNDEFINED:
8675       if (unsigned_overflow (value, 16))
8676 	as_bad_where (fixP->fx_file, fixP->fx_line,
8677 		      _("immediate out of range"));
8678       insn = get_aarch64_insn (buf);
8679       insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8680       put_aarch64_insn (buf, insn);
8681       break;
8682 
8683     case AARCH64_OPND_AIMM:
8684       /* ADD or SUB with immediate.
8685 	 NOTE this assumes we come here with a add/sub shifted reg encoding
8686 		  3  322|2222|2  2  2 21111 111111
8687 		  1  098|7654|3  2  1 09876 543210 98765 43210
8688 	 0b000000 sf 000|1011|shift 0 Rm    imm6   Rn    Rd    ADD
8689 	 2b000000 sf 010|1011|shift 0 Rm    imm6   Rn    Rd    ADDS
8690 	 4b000000 sf 100|1011|shift 0 Rm    imm6   Rn    Rd    SUB
8691 	 6b000000 sf 110|1011|shift 0 Rm    imm6   Rn    Rd    SUBS
8692 	 ->
8693 		  3  322|2222|2 2   221111111111
8694 		  1  098|7654|3 2   109876543210 98765 43210
8695 	 11000000 sf 001|0001|shift imm12        Rn    Rd    ADD
8696 	 31000000 sf 011|0001|shift imm12        Rn    Rd    ADDS
8697 	 51000000 sf 101|0001|shift imm12        Rn    Rd    SUB
8698 	 71000000 sf 111|0001|shift imm12        Rn    Rd    SUBS
8699 	 Fields sf Rn Rd are already set.  */
8700       insn = get_aarch64_insn (buf);
8701       if (value < 0)
8702 	{
8703 	  /* Add <-> sub.  */
8704 	  insn = reencode_addsub_switch_add_sub (insn);
8705 	  value = -value;
8706 	}
8707 
8708       if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8709 	  && unsigned_overflow (value, 12))
8710 	{
8711 	  /* Try to shift the value by 12 to make it fit.  */
8712 	  if (((value >> 12) << 12) == value
8713 	      && ! unsigned_overflow (value, 12 + 12))
8714 	    {
8715 	      value >>= 12;
8716 	      insn |= encode_addsub_imm_shift_amount (1);
8717 	    }
8718 	}
8719 
8720       if (unsigned_overflow (value, 12))
8721 	as_bad_where (fixP->fx_file, fixP->fx_line,
8722 		      _("immediate out of range"));
8723 
8724       insn |= encode_addsub_imm (value);
8725 
8726       put_aarch64_insn (buf, insn);
8727       break;
8728 
8729     case AARCH64_OPND_SIMD_IMM:
8730     case AARCH64_OPND_SIMD_IMM_SFT:
8731     case AARCH64_OPND_LIMM:
8732       /* Bit mask immediate.  */
8733       gas_assert (new_inst != NULL);
8734       idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8735       new_inst->operands[idx].imm.value = value;
8736       if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8737 				 &new_inst->value, NULL, NULL, insn_sequence))
8738 	put_aarch64_insn (buf, new_inst->value);
8739       else
8740 	as_bad_where (fixP->fx_file, fixP->fx_line,
8741 		      _("invalid immediate"));
8742       break;
8743 
8744     case AARCH64_OPND_HALF:
8745       /* 16-bit unsigned immediate.  */
8746       if (unsigned_overflow (value, 16))
8747 	as_bad_where (fixP->fx_file, fixP->fx_line,
8748 		      _("immediate out of range"));
8749       insn = get_aarch64_insn (buf);
8750       insn |= encode_movw_imm (value & 0xffff);
8751       put_aarch64_insn (buf, insn);
8752       break;
8753 
8754     case AARCH64_OPND_IMM_MOV:
8755       /* Operand for a generic move immediate instruction, which is
8756 	 an alias instruction that generates a single MOVZ, MOVN or ORR
8757 	 instruction to loads a 32-bit/64-bit immediate value into general
8758 	 register.  An assembler error shall result if the immediate cannot be
8759 	 created by a single one of these instructions. If there is a choice,
8760 	 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8761 	 and MOVZ or MOVN to ORR.  */
8762       gas_assert (new_inst != NULL);
8763       fix_mov_imm_insn (fixP, buf, new_inst, value);
8764       break;
8765 
8766     case AARCH64_OPND_ADDR_SIMM7:
8767     case AARCH64_OPND_ADDR_SIMM9:
8768     case AARCH64_OPND_ADDR_SIMM9_2:
8769     case AARCH64_OPND_ADDR_SIMM10:
8770     case AARCH64_OPND_ADDR_UIMM12:
8771     case AARCH64_OPND_ADDR_SIMM11:
8772     case AARCH64_OPND_ADDR_SIMM13:
8773       /* Immediate offset in an address.  */
8774       insn = get_aarch64_insn (buf);
8775 
8776       gas_assert (new_inst != NULL && new_inst->value == insn);
8777       gas_assert (new_inst->opcode->operands[1] == opnd
8778 		  || new_inst->opcode->operands[2] == opnd);
8779 
8780       /* Get the index of the address operand.  */
8781       if (new_inst->opcode->operands[1] == opnd)
8782 	/* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
8783 	idx = 1;
8784       else
8785 	/* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}].  */
8786 	idx = 2;
8787 
8788       /* Update the resolved offset value.  */
8789       new_inst->operands[idx].addr.offset.imm = value;
8790 
8791       /* Encode/fix-up.  */
8792       if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8793 				 &new_inst->value, NULL, NULL, insn_sequence))
8794 	{
8795 	  put_aarch64_insn (buf, new_inst->value);
8796 	  break;
8797 	}
8798       else if (new_inst->opcode->iclass == ldst_pos
8799 	       && try_to_encode_as_unscaled_ldst (new_inst))
8800 	{
8801 	  put_aarch64_insn (buf, new_inst->value);
8802 	  break;
8803 	}
8804 
8805       as_bad_where (fixP->fx_file, fixP->fx_line,
8806 		    _("immediate offset out of range"));
8807       break;
8808 
8809     default:
8810       gas_assert (0);
8811       as_fatal (_("unhandled operand code %d"), opnd);
8812     }
8813 }
8814 
8815 /* Apply a fixup (fixP) to segment data, once it has been determined
8816    by our caller that we have all the info we need to fix it up.
8817 
8818    Parameter valP is the pointer to the value of the bits.  */
8819 
8820 void
md_apply_fix(fixS * fixP,valueT * valP,segT seg)8821 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8822 {
8823   offsetT value = *valP;
8824   uint32_t insn;
8825   char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8826   int scale;
8827   unsigned flags = fixP->fx_addnumber;
8828 
8829   DEBUG_TRACE ("\n\n");
8830   DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8831   DEBUG_TRACE ("Enter md_apply_fix");
8832 
8833   gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8834 
8835   /* Note whether this will delete the relocation.  */
8836 
8837   if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8838     fixP->fx_done = 1;
8839 
8840   /* Process the relocations.  */
8841   switch (fixP->fx_r_type)
8842     {
8843     case BFD_RELOC_NONE:
8844       /* This will need to go in the object file.  */
8845       fixP->fx_done = 0;
8846       break;
8847 
8848     case BFD_RELOC_8:
8849     case BFD_RELOC_8_PCREL:
8850       if (fixP->fx_done || !seg->use_rela_p)
8851 	md_number_to_chars (buf, value, 1);
8852       break;
8853 
8854     case BFD_RELOC_16:
8855     case BFD_RELOC_16_PCREL:
8856       if (fixP->fx_done || !seg->use_rela_p)
8857 	md_number_to_chars (buf, value, 2);
8858       break;
8859 
8860     case BFD_RELOC_32:
8861     case BFD_RELOC_32_PCREL:
8862       if (fixP->fx_done || !seg->use_rela_p)
8863 	md_number_to_chars (buf, value, 4);
8864       break;
8865 
8866     case BFD_RELOC_64:
8867     case BFD_RELOC_64_PCREL:
8868       if (fixP->fx_done || !seg->use_rela_p)
8869 	md_number_to_chars (buf, value, 8);
8870       break;
8871 
8872     case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8873       /* We claim that these fixups have been processed here, even if
8874          in fact we generate an error because we do not have a reloc
8875          for them, so tc_gen_reloc() will reject them.  */
8876       fixP->fx_done = 1;
8877       if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8878 	{
8879 	  as_bad_where (fixP->fx_file, fixP->fx_line,
8880 			_("undefined symbol %s used as an immediate value"),
8881 			S_GET_NAME (fixP->fx_addsy));
8882 	  goto apply_fix_return;
8883 	}
8884       fix_insn (fixP, flags, value);
8885       break;
8886 
8887     case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8888       if (fixP->fx_done || !seg->use_rela_p)
8889 	{
8890 	  if (value & 3)
8891 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8892 			  _("pc-relative load offset not word aligned"));
8893 	  if (signed_overflow (value, 21))
8894 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8895 			  _("pc-relative load offset out of range"));
8896 	  insn = get_aarch64_insn (buf);
8897 	  insn |= encode_ld_lit_ofs_19 (value >> 2);
8898 	  put_aarch64_insn (buf, insn);
8899 	}
8900       break;
8901 
8902     case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8903       if (fixP->fx_done || !seg->use_rela_p)
8904 	{
8905 	  if (signed_overflow (value, 21))
8906 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8907 			  _("pc-relative address offset out of range"));
8908 	  insn = get_aarch64_insn (buf);
8909 	  insn |= encode_adr_imm (value);
8910 	  put_aarch64_insn (buf, insn);
8911 	}
8912       break;
8913 
8914     case BFD_RELOC_AARCH64_BRANCH19:
8915       if (fixP->fx_done || !seg->use_rela_p)
8916 	{
8917 	  if (value & 3)
8918 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8919 			  _("conditional branch target not word aligned"));
8920 	  if (signed_overflow (value, 21))
8921 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8922 			  _("conditional branch out of range"));
8923 	  insn = get_aarch64_insn (buf);
8924 	  insn |= encode_cond_branch_ofs_19 (value >> 2);
8925 	  put_aarch64_insn (buf, insn);
8926 	}
8927       break;
8928 
8929     case BFD_RELOC_AARCH64_TSTBR14:
8930       if (fixP->fx_done || !seg->use_rela_p)
8931 	{
8932 	  if (value & 3)
8933 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8934 			  _("conditional branch target not word aligned"));
8935 	  if (signed_overflow (value, 16))
8936 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8937 			  _("conditional branch out of range"));
8938 	  insn = get_aarch64_insn (buf);
8939 	  insn |= encode_tst_branch_ofs_14 (value >> 2);
8940 	  put_aarch64_insn (buf, insn);
8941 	}
8942       break;
8943 
8944     case BFD_RELOC_AARCH64_CALL26:
8945     case BFD_RELOC_AARCH64_JUMP26:
8946       if (fixP->fx_done || !seg->use_rela_p)
8947 	{
8948 	  if (value & 3)
8949 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8950 			  _("branch target not word aligned"));
8951 	  if (signed_overflow (value, 28))
8952 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8953 			  _("branch out of range"));
8954 	  insn = get_aarch64_insn (buf);
8955 	  insn |= encode_branch_ofs_26 (value >> 2);
8956 	  put_aarch64_insn (buf, insn);
8957 	}
8958       break;
8959 
8960     case BFD_RELOC_AARCH64_MOVW_G0:
8961     case BFD_RELOC_AARCH64_MOVW_G0_NC:
8962     case BFD_RELOC_AARCH64_MOVW_G0_S:
8963     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8964     case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8965     case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8966       scale = 0;
8967       goto movw_common;
8968     case BFD_RELOC_AARCH64_MOVW_G1:
8969     case BFD_RELOC_AARCH64_MOVW_G1_NC:
8970     case BFD_RELOC_AARCH64_MOVW_G1_S:
8971     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8972     case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8973     case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8974       scale = 16;
8975       goto movw_common;
8976     case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8977       scale = 0;
8978       S_SET_THREAD_LOCAL (fixP->fx_addsy);
8979       /* Should always be exported to object file, see
8980 	 aarch64_force_relocation().  */
8981       gas_assert (!fixP->fx_done);
8982       gas_assert (seg->use_rela_p);
8983       goto movw_common;
8984     case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8985       scale = 16;
8986       S_SET_THREAD_LOCAL (fixP->fx_addsy);
8987       /* Should always be exported to object file, see
8988 	 aarch64_force_relocation().  */
8989       gas_assert (!fixP->fx_done);
8990       gas_assert (seg->use_rela_p);
8991       goto movw_common;
8992     case BFD_RELOC_AARCH64_MOVW_G2:
8993     case BFD_RELOC_AARCH64_MOVW_G2_NC:
8994     case BFD_RELOC_AARCH64_MOVW_G2_S:
8995     case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8996     case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8997       scale = 32;
8998       goto movw_common;
8999     case BFD_RELOC_AARCH64_MOVW_G3:
9000     case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9001       scale = 48;
9002     movw_common:
9003       if (fixP->fx_done || !seg->use_rela_p)
9004 	{
9005 	  insn = get_aarch64_insn (buf);
9006 
9007 	  if (!fixP->fx_done)
9008 	    {
9009 	      /* REL signed addend must fit in 16 bits */
9010 	      if (signed_overflow (value, 16))
9011 		as_bad_where (fixP->fx_file, fixP->fx_line,
9012 			      _("offset out of range"));
9013 	    }
9014 	  else
9015 	    {
9016 	      /* Check for overflow and scale. */
9017 	      switch (fixP->fx_r_type)
9018 		{
9019 		case BFD_RELOC_AARCH64_MOVW_G0:
9020 		case BFD_RELOC_AARCH64_MOVW_G1:
9021 		case BFD_RELOC_AARCH64_MOVW_G2:
9022 		case BFD_RELOC_AARCH64_MOVW_G3:
9023 		case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9024 		case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9025 		  if (unsigned_overflow (value, scale + 16))
9026 		    as_bad_where (fixP->fx_file, fixP->fx_line,
9027 				  _("unsigned value out of range"));
9028 		  break;
9029 		case BFD_RELOC_AARCH64_MOVW_G0_S:
9030 		case BFD_RELOC_AARCH64_MOVW_G1_S:
9031 		case BFD_RELOC_AARCH64_MOVW_G2_S:
9032 		case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9033 		case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9034 		case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9035 		  /* NOTE: We can only come here with movz or movn. */
9036 		  if (signed_overflow (value, scale + 16))
9037 		    as_bad_where (fixP->fx_file, fixP->fx_line,
9038 				  _("signed value out of range"));
9039 		  if (value < 0)
9040 		    {
9041 		      /* Force use of MOVN.  */
9042 		      value = ~value;
9043 		      insn = reencode_movzn_to_movn (insn);
9044 		    }
9045 		  else
9046 		    {
9047 		      /* Force use of MOVZ.  */
9048 		      insn = reencode_movzn_to_movz (insn);
9049 		    }
9050 		  break;
9051 		default:
9052 		  /* Unchecked relocations.  */
9053 		  break;
9054 		}
9055 	      value >>= scale;
9056 	    }
9057 
9058 	  /* Insert value into MOVN/MOVZ/MOVK instruction. */
9059 	  insn |= encode_movw_imm (value & 0xffff);
9060 
9061 	  put_aarch64_insn (buf, insn);
9062 	}
9063       break;
9064 
9065     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9066       fixP->fx_r_type = (ilp32_p
9067 			 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9068 			 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9069       S_SET_THREAD_LOCAL (fixP->fx_addsy);
9070       /* Should always be exported to object file, see
9071 	 aarch64_force_relocation().  */
9072       gas_assert (!fixP->fx_done);
9073       gas_assert (seg->use_rela_p);
9074       break;
9075 
9076     case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9077       fixP->fx_r_type = (ilp32_p
9078 			 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9079 			 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9080       S_SET_THREAD_LOCAL (fixP->fx_addsy);
9081       /* Should always be exported to object file, see
9082 	 aarch64_force_relocation().  */
9083       gas_assert (!fixP->fx_done);
9084       gas_assert (seg->use_rela_p);
9085       break;
9086 
9087     case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9088     case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9089     case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9090     case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9091     case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9092     case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9093     case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9094     case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9095     case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9096     case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9097     case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9098     case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9099     case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9100     case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9101     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9102     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9103     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9104     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9105     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9106     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9107     case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9108     case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9109     case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9110     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9111     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9112     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9113     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9114     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9115     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9116     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9117     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9118     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9119     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9120     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9121     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9122     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9123     case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9124     case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9125     case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9126     case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9127     case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9128     case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9129     case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9130     case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9131     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9132     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9133     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9134     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9135     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9136     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9137     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9138     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9139       S_SET_THREAD_LOCAL (fixP->fx_addsy);
9140       /* Should always be exported to object file, see
9141 	 aarch64_force_relocation().  */
9142       gas_assert (!fixP->fx_done);
9143       gas_assert (seg->use_rela_p);
9144       break;
9145 
9146     case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9147       /* Should always be exported to object file, see
9148 	 aarch64_force_relocation().  */
9149       fixP->fx_r_type = (ilp32_p
9150 			 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9151 			 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9152       gas_assert (!fixP->fx_done);
9153       gas_assert (seg->use_rela_p);
9154       break;
9155 
9156     case BFD_RELOC_AARCH64_ADD_LO12:
9157     case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9158     case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9159     case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9160     case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9161     case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9162     case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9163     case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9164     case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9165     case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9166     case BFD_RELOC_AARCH64_LDST128_LO12:
9167     case BFD_RELOC_AARCH64_LDST16_LO12:
9168     case BFD_RELOC_AARCH64_LDST32_LO12:
9169     case BFD_RELOC_AARCH64_LDST64_LO12:
9170     case BFD_RELOC_AARCH64_LDST8_LO12:
9171       /* Should always be exported to object file, see
9172 	 aarch64_force_relocation().  */
9173       gas_assert (!fixP->fx_done);
9174       gas_assert (seg->use_rela_p);
9175       break;
9176 
9177     case BFD_RELOC_AARCH64_TLSDESC_ADD:
9178     case BFD_RELOC_AARCH64_TLSDESC_CALL:
9179     case BFD_RELOC_AARCH64_TLSDESC_LDR:
9180       break;
9181 
9182     case BFD_RELOC_UNUSED:
9183       /* An error will already have been reported.  */
9184       break;
9185 
9186     default:
9187       as_bad_where (fixP->fx_file, fixP->fx_line,
9188 		    _("unexpected %s fixup"),
9189 		    bfd_get_reloc_code_name (fixP->fx_r_type));
9190       break;
9191     }
9192 
9193  apply_fix_return:
9194   /* Free the allocated the struct aarch64_inst.
9195      N.B. currently there are very limited number of fix-up types actually use
9196      this field, so the impact on the performance should be minimal .  */
9197   free (fixP->tc_fix_data.inst);
9198 
9199   return;
9200 }
9201 
9202 /* Translate internal representation of relocation info to BFD target
9203    format.  */
9204 
9205 arelent *
tc_gen_reloc(asection * section,fixS * fixp)9206 tc_gen_reloc (asection * section, fixS * fixp)
9207 {
9208   arelent *reloc;
9209   bfd_reloc_code_real_type code;
9210 
9211   reloc = XNEW (arelent);
9212 
9213   reloc->sym_ptr_ptr = XNEW (asymbol *);
9214   *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9215   reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9216 
9217   if (fixp->fx_pcrel)
9218     {
9219       if (section->use_rela_p)
9220 	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9221       else
9222 	fixp->fx_offset = reloc->address;
9223     }
9224   reloc->addend = fixp->fx_offset;
9225 
9226   code = fixp->fx_r_type;
9227   switch (code)
9228     {
9229     case BFD_RELOC_16:
9230       if (fixp->fx_pcrel)
9231 	code = BFD_RELOC_16_PCREL;
9232       break;
9233 
9234     case BFD_RELOC_32:
9235       if (fixp->fx_pcrel)
9236 	code = BFD_RELOC_32_PCREL;
9237       break;
9238 
9239     case BFD_RELOC_64:
9240       if (fixp->fx_pcrel)
9241 	code = BFD_RELOC_64_PCREL;
9242       break;
9243 
9244     default:
9245       break;
9246     }
9247 
9248   reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9249   if (reloc->howto == NULL)
9250     {
9251       as_bad_where (fixp->fx_file, fixp->fx_line,
9252 		    _
9253 		    ("cannot represent %s relocation in this object file format"),
9254 		    bfd_get_reloc_code_name (code));
9255       return NULL;
9256     }
9257 
9258   return reloc;
9259 }
9260 
9261 /* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
9262 
9263 void
cons_fix_new_aarch64(fragS * frag,int where,int size,expressionS * exp)9264 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9265 {
9266   bfd_reloc_code_real_type type;
9267   int pcrel = 0;
9268 
9269   /* Pick a reloc.
9270      FIXME: @@ Should look at CPU word size.  */
9271   switch (size)
9272     {
9273     case 1:
9274       type = BFD_RELOC_8;
9275       break;
9276     case 2:
9277       type = BFD_RELOC_16;
9278       break;
9279     case 4:
9280       type = BFD_RELOC_32;
9281       break;
9282     case 8:
9283       type = BFD_RELOC_64;
9284       break;
9285     default:
9286       as_bad (_("cannot do %u-byte relocation"), size);
9287       type = BFD_RELOC_UNUSED;
9288       break;
9289     }
9290 
9291   fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9292 }
9293 
9294 #ifdef OBJ_ELF
9295 
9296 /* Implement md_after_parse_args.  This is the earliest time we need to decide
9297    ABI.  If no -mabi specified, the ABI will be decided by target triplet.  */
9298 
9299 void
aarch64_after_parse_args(void)9300 aarch64_after_parse_args (void)
9301 {
9302   if (aarch64_abi != AARCH64_ABI_NONE)
9303     return;
9304 
9305   /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32.  */
9306   if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9307     aarch64_abi = AARCH64_ABI_ILP32;
9308   else
9309     aarch64_abi = AARCH64_ABI_LP64;
9310 }
9311 
9312 const char *
elf64_aarch64_target_format(void)9313 elf64_aarch64_target_format (void)
9314 {
9315 #ifdef TE_CLOUDABI
9316   /* FIXME: What to do for ilp32_p ?  */
9317   if (target_big_endian)
9318     return "elf64-bigaarch64-cloudabi";
9319   else
9320     return "elf64-littleaarch64-cloudabi";
9321 #else
9322   if (target_big_endian)
9323     return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9324   else
9325     return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9326 #endif
9327 }
9328 
9329 void
aarch64elf_frob_symbol(symbolS * symp,int * puntp)9330 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9331 {
9332   elf_frob_symbol (symp, puntp);
9333 }
9334 #endif
9335 
9336 /* MD interface: Finalization.	*/
9337 
9338 /* A good place to do this, although this was probably not intended
9339    for this kind of use.  We need to dump the literal pool before
9340    references are made to a null symbol pointer.  */
9341 
9342 void
aarch64_cleanup(void)9343 aarch64_cleanup (void)
9344 {
9345   literal_pool *pool;
9346 
9347   for (pool = list_of_pools; pool; pool = pool->next)
9348     {
9349       /* Put it at the end of the relevant section.  */
9350       subseg_set (pool->section, pool->sub_section);
9351       s_ltorg (0);
9352     }
9353 }
9354 
9355 #ifdef OBJ_ELF
9356 /* Remove any excess mapping symbols generated for alignment frags in
9357    SEC.  We may have created a mapping symbol before a zero byte
9358    alignment; remove it if there's a mapping symbol after the
9359    alignment.  */
9360 static void
check_mapping_symbols(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,void * dummy ATTRIBUTE_UNUSED)9361 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9362 		       void *dummy ATTRIBUTE_UNUSED)
9363 {
9364   segment_info_type *seginfo = seg_info (sec);
9365   fragS *fragp;
9366 
9367   if (seginfo == NULL || seginfo->frchainP == NULL)
9368     return;
9369 
9370   for (fragp = seginfo->frchainP->frch_root;
9371        fragp != NULL; fragp = fragp->fr_next)
9372     {
9373       symbolS *sym = fragp->tc_frag_data.last_map;
9374       fragS *next = fragp->fr_next;
9375 
9376       /* Variable-sized frags have been converted to fixed size by
9377          this point.  But if this was variable-sized to start with,
9378          there will be a fixed-size frag after it.  So don't handle
9379          next == NULL.  */
9380       if (sym == NULL || next == NULL)
9381 	continue;
9382 
9383       if (S_GET_VALUE (sym) < next->fr_address)
9384 	/* Not at the end of this frag.  */
9385 	continue;
9386       know (S_GET_VALUE (sym) == next->fr_address);
9387 
9388       do
9389 	{
9390 	  if (next->tc_frag_data.first_map != NULL)
9391 	    {
9392 	      /* Next frag starts with a mapping symbol.  Discard this
9393 	         one.  */
9394 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9395 	      break;
9396 	    }
9397 
9398 	  if (next->fr_next == NULL)
9399 	    {
9400 	      /* This mapping symbol is at the end of the section.  Discard
9401 	         it.  */
9402 	      know (next->fr_fix == 0 && next->fr_var == 0);
9403 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9404 	      break;
9405 	    }
9406 
9407 	  /* As long as we have empty frags without any mapping symbols,
9408 	     keep looking.  */
9409 	  /* If the next frag is non-empty and does not start with a
9410 	     mapping symbol, then this mapping symbol is required.  */
9411 	  if (next->fr_address != next->fr_next->fr_address)
9412 	    break;
9413 
9414 	  next = next->fr_next;
9415 	}
9416       while (next != NULL);
9417     }
9418 }
9419 #endif
9420 
9421 /* Adjust the symbol table.  */
9422 
9423 void
aarch64_adjust_symtab(void)9424 aarch64_adjust_symtab (void)
9425 {
9426 #ifdef OBJ_ELF
9427   /* Remove any overlapping mapping symbols generated by alignment frags.  */
9428   bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9429   /* Now do generic ELF adjustments.  */
9430   elf_adjust_symtab ();
9431 #endif
9432 }
9433 
9434 static void
checked_hash_insert(htab_t table,const char * key,void * value)9435 checked_hash_insert (htab_t table, const char *key, void *value)
9436 {
9437   str_hash_insert (table, key, value, 0);
9438 }
9439 
9440 static void
sysreg_hash_insert(htab_t table,const char * key,void * value)9441 sysreg_hash_insert (htab_t table, const char *key, void *value)
9442 {
9443   gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9444   checked_hash_insert (table, key, value);
9445 }
9446 
9447 static void
fill_instruction_hash_table(void)9448 fill_instruction_hash_table (void)
9449 {
9450   const aarch64_opcode *opcode = aarch64_opcode_table;
9451 
9452   while (opcode->name != NULL)
9453     {
9454       templates *templ, *new_templ;
9455       templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9456 
9457       new_templ = XNEW (templates);
9458       new_templ->opcode = opcode;
9459       new_templ->next = NULL;
9460 
9461       if (!templ)
9462 	checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9463       else
9464 	{
9465 	  new_templ->next = templ->next;
9466 	  templ->next = new_templ;
9467 	}
9468       ++opcode;
9469     }
9470 }
9471 
9472 static inline void
convert_to_upper(char * dst,const char * src,size_t num)9473 convert_to_upper (char *dst, const char *src, size_t num)
9474 {
9475   unsigned int i;
9476   for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9477     *dst = TOUPPER (*src);
9478   *dst = '\0';
9479 }
9480 
9481 /* Assume STR point to a lower-case string, allocate, convert and return
9482    the corresponding upper-case string.  */
9483 static inline const char*
get_upper_str(const char * str)9484 get_upper_str (const char *str)
9485 {
9486   char *ret;
9487   size_t len = strlen (str);
9488   ret = XNEWVEC (char, len + 1);
9489   convert_to_upper (ret, str, len);
9490   return ret;
9491 }
9492 
9493 /* MD interface: Initialization.  */
9494 
9495 void
md_begin(void)9496 md_begin (void)
9497 {
9498   unsigned mach;
9499   unsigned int i;
9500 
9501   aarch64_ops_hsh = str_htab_create ();
9502   aarch64_cond_hsh = str_htab_create ();
9503   aarch64_shift_hsh = str_htab_create ();
9504   aarch64_sys_regs_hsh = str_htab_create ();
9505   aarch64_pstatefield_hsh = str_htab_create ();
9506   aarch64_sys_regs_ic_hsh = str_htab_create ();
9507   aarch64_sys_regs_dc_hsh = str_htab_create ();
9508   aarch64_sys_regs_at_hsh = str_htab_create ();
9509   aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9510   aarch64_sys_regs_sr_hsh = str_htab_create ();
9511   aarch64_reg_hsh = str_htab_create ();
9512   aarch64_barrier_opt_hsh = str_htab_create ();
9513   aarch64_nzcv_hsh = str_htab_create ();
9514   aarch64_pldop_hsh = str_htab_create ();
9515   aarch64_hint_opt_hsh = str_htab_create ();
9516 
9517   fill_instruction_hash_table ();
9518 
9519   for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9520     sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9521 			 (void *) (aarch64_sys_regs + i));
9522 
9523   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9524     sysreg_hash_insert (aarch64_pstatefield_hsh,
9525 			 aarch64_pstatefields[i].name,
9526 			 (void *) (aarch64_pstatefields + i));
9527 
9528   for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9529     sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9530 			 aarch64_sys_regs_ic[i].name,
9531 			 (void *) (aarch64_sys_regs_ic + i));
9532 
9533   for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9534     sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9535 			 aarch64_sys_regs_dc[i].name,
9536 			 (void *) (aarch64_sys_regs_dc + i));
9537 
9538   for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9539     sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9540 			 aarch64_sys_regs_at[i].name,
9541 			 (void *) (aarch64_sys_regs_at + i));
9542 
9543   for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9544     sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9545 			 aarch64_sys_regs_tlbi[i].name,
9546 			 (void *) (aarch64_sys_regs_tlbi + i));
9547 
9548   for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9549     sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9550 			 aarch64_sys_regs_sr[i].name,
9551 			 (void *) (aarch64_sys_regs_sr + i));
9552 
9553   for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9554     checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9555 			 (void *) (reg_names + i));
9556 
9557   for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9558     checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9559 			 (void *) (nzcv_names + i));
9560 
9561   for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9562     {
9563       const char *name = aarch64_operand_modifiers[i].name;
9564       checked_hash_insert (aarch64_shift_hsh, name,
9565 			   (void *) (aarch64_operand_modifiers + i));
9566       /* Also hash the name in the upper case.  */
9567       checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9568 			   (void *) (aarch64_operand_modifiers + i));
9569     }
9570 
9571   for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9572     {
9573       unsigned int j;
9574       /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9575 	 the same condition code.  */
9576       for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9577 	{
9578 	  const char *name = aarch64_conds[i].names[j];
9579 	  if (name == NULL)
9580 	    break;
9581 	  checked_hash_insert (aarch64_cond_hsh, name,
9582 			       (void *) (aarch64_conds + i));
9583 	  /* Also hash the name in the upper case.  */
9584 	  checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9585 			       (void *) (aarch64_conds + i));
9586 	}
9587     }
9588 
9589   for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9590     {
9591       const char *name = aarch64_barrier_options[i].name;
9592       /* Skip xx00 - the unallocated values of option.  */
9593       if ((i & 0x3) == 0)
9594 	continue;
9595       checked_hash_insert (aarch64_barrier_opt_hsh, name,
9596 			   (void *) (aarch64_barrier_options + i));
9597       /* Also hash the name in the upper case.  */
9598       checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9599 			   (void *) (aarch64_barrier_options + i));
9600     }
9601 
9602   for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9603     {
9604       const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9605       checked_hash_insert (aarch64_barrier_opt_hsh, name,
9606 			   (void *) (aarch64_barrier_dsb_nxs_options + i));
9607       /* Also hash the name in the upper case.  */
9608       checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9609 			   (void *) (aarch64_barrier_dsb_nxs_options + i));
9610     }
9611 
9612   for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9613     {
9614       const char* name = aarch64_prfops[i].name;
9615       /* Skip the unallocated hint encodings.  */
9616       if (name == NULL)
9617 	continue;
9618       checked_hash_insert (aarch64_pldop_hsh, name,
9619 			   (void *) (aarch64_prfops + i));
9620       /* Also hash the name in the upper case.  */
9621       checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9622 			   (void *) (aarch64_prfops + i));
9623     }
9624 
9625   for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9626     {
9627       const char* name = aarch64_hint_options[i].name;
9628       const char* upper_name = get_upper_str(name);
9629 
9630       checked_hash_insert (aarch64_hint_opt_hsh, name,
9631 			   (void *) (aarch64_hint_options + i));
9632 
9633       /* Also hash the name in the upper case if not the same.  */
9634       if (strcmp (name, upper_name) != 0)
9635 	checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9636 			     (void *) (aarch64_hint_options + i));
9637     }
9638 
9639   /* Set the cpu variant based on the command-line options.  */
9640   if (!mcpu_cpu_opt)
9641     mcpu_cpu_opt = march_cpu_opt;
9642 
9643   if (!mcpu_cpu_opt)
9644     mcpu_cpu_opt = &cpu_default;
9645 
9646   cpu_variant = *mcpu_cpu_opt;
9647 
9648   /* Record the CPU type.  */
9649   mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9650 
9651   bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9652 }
9653 
9654 /* Command line processing.  */
9655 
9656 const char *md_shortopts = "m:";
9657 
9658 #ifdef AARCH64_BI_ENDIAN
9659 #define OPTION_EB (OPTION_MD_BASE + 0)
9660 #define OPTION_EL (OPTION_MD_BASE + 1)
9661 #else
9662 #if TARGET_BYTES_BIG_ENDIAN
9663 #define OPTION_EB (OPTION_MD_BASE + 0)
9664 #else
9665 #define OPTION_EL (OPTION_MD_BASE + 1)
9666 #endif
9667 #endif
9668 
9669 struct option md_longopts[] = {
9670 #ifdef OPTION_EB
9671   {"EB", no_argument, NULL, OPTION_EB},
9672 #endif
9673 #ifdef OPTION_EL
9674   {"EL", no_argument, NULL, OPTION_EL},
9675 #endif
9676   {NULL, no_argument, NULL, 0}
9677 };
9678 
9679 size_t md_longopts_size = sizeof (md_longopts);
9680 
9681 struct aarch64_option_table
9682 {
9683   const char *option;			/* Option name to match.  */
9684   const char *help;			/* Help information.  */
9685   int *var;			/* Variable to change.  */
9686   int value;			/* What to change it to.  */
9687   char *deprecated;		/* If non-null, print this message.  */
9688 };
9689 
9690 static struct aarch64_option_table aarch64_opts[] = {
9691   {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9692   {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9693    NULL},
9694 #ifdef DEBUG_AARCH64
9695   {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9696 #endif /* DEBUG_AARCH64 */
9697   {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9698    NULL},
9699   {"mno-verbose-error", N_("do not output verbose error messages"),
9700    &verbose_error_p, 0, NULL},
9701   {NULL, NULL, NULL, 0, NULL}
9702 };
9703 
9704 struct aarch64_cpu_option_table
9705 {
9706   const char *name;
9707   const aarch64_feature_set value;
9708   /* The canonical name of the CPU, or NULL to use NAME converted to upper
9709      case.  */
9710   const char *canonical_name;
9711 };
9712 
9713 /* This list should, at a minimum, contain all the cpu names
9714    recognized by GCC.  */
9715 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9716   {"all", AARCH64_ANY, NULL},
9717   {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9718 				  AARCH64_FEATURE_CRC), "Cortex-A34"},
9719   {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9720 				  AARCH64_FEATURE_CRC), "Cortex-A35"},
9721   {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9722 				  AARCH64_FEATURE_CRC), "Cortex-A53"},
9723   {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9724 				  AARCH64_FEATURE_CRC), "Cortex-A57"},
9725   {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9726 				  AARCH64_FEATURE_CRC), "Cortex-A72"},
9727   {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9728 				  AARCH64_FEATURE_CRC), "Cortex-A73"},
9729   {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9730 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9731 				  "Cortex-A55"},
9732   {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9733 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9734 				  "Cortex-A75"},
9735   {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9736 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9737 				  "Cortex-A76"},
9738   {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9739 				    AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9740 				    | AARCH64_FEATURE_DOTPROD
9741 				    | AARCH64_FEATURE_SSBS),
9742 				    "Cortex-A76AE"},
9743   {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9744 				  AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9745 				  | AARCH64_FEATURE_DOTPROD
9746 				  | AARCH64_FEATURE_SSBS),
9747 				  "Cortex-A77"},
9748   {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9749 				  AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9750 				  | AARCH64_FEATURE_DOTPROD
9751 				  | AARCH64_FEATURE_SSBS),
9752 				  "Cortex-A65"},
9753   {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9754 				    AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9755 				    | AARCH64_FEATURE_DOTPROD
9756 				    | AARCH64_FEATURE_SSBS),
9757 				    "Cortex-A65AE"},
9758   {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9759                  AARCH64_FEATURE_F16
9760                  | AARCH64_FEATURE_RCPC
9761                  | AARCH64_FEATURE_DOTPROD
9762                  | AARCH64_FEATURE_SSBS
9763                  | AARCH64_FEATURE_PROFILE),
9764    "Cortex-A78"},
9765   {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9766                    AARCH64_FEATURE_F16
9767                    | AARCH64_FEATURE_RCPC
9768                    | AARCH64_FEATURE_DOTPROD
9769                    | AARCH64_FEATURE_SSBS
9770                    | AARCH64_FEATURE_PROFILE),
9771    "Cortex-A78AE"},
9772   {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9773                    AARCH64_FEATURE_DOTPROD
9774                    | AARCH64_FEATURE_F16
9775                    | AARCH64_FEATURE_FLAGM
9776                    | AARCH64_FEATURE_PAC
9777                    | AARCH64_FEATURE_PROFILE
9778                    | AARCH64_FEATURE_RCPC
9779                    | AARCH64_FEATURE_SSBS),
9780    "Cortex-A78C"},
9781   {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9782                   AARCH64_FEATURE_BFLOAT16
9783                   | AARCH64_FEATURE_I8MM
9784                   | AARCH64_FEATURE_MEMTAG
9785                   | AARCH64_FEATURE_SVE2_BITPERM),
9786    "Cortex-A510"},
9787   {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9788                   AARCH64_FEATURE_BFLOAT16
9789                   | AARCH64_FEATURE_I8MM
9790                   | AARCH64_FEATURE_MEMTAG
9791                   | AARCH64_FEATURE_SVE2_BITPERM),
9792    "Cortex-A710"},
9793   {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9794 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9795 				  | AARCH64_FEATURE_DOTPROD
9796 				  | AARCH64_FEATURE_PROFILE),
9797 				  "Ares"},
9798   {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9799 				 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9800 				"Samsung Exynos M1"},
9801   {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9802 			      AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9803 			      | AARCH64_FEATURE_RDMA),
9804    "Qualcomm Falkor"},
9805   {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9806 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9807 				  | AARCH64_FEATURE_DOTPROD
9808 				  | AARCH64_FEATURE_SSBS),
9809 				  "Neoverse E1"},
9810   {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9811 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9812 				  | AARCH64_FEATURE_DOTPROD
9813 				  | AARCH64_FEATURE_PROFILE),
9814 				  "Neoverse N1"},
9815   {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9816 				   AARCH64_FEATURE_BFLOAT16
9817 				 | AARCH64_FEATURE_I8MM
9818 				 | AARCH64_FEATURE_F16
9819 				 | AARCH64_FEATURE_SVE
9820 				 | AARCH64_FEATURE_SVE2
9821 				 | AARCH64_FEATURE_SVE2_BITPERM
9822 				 | AARCH64_FEATURE_MEMTAG
9823 				 | AARCH64_FEATURE_RNG),
9824 				 "Neoverse N2"},
9825   {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9826 			    AARCH64_FEATURE_PROFILE
9827 			  | AARCH64_FEATURE_CVADP
9828 			  | AARCH64_FEATURE_SVE
9829 			  | AARCH64_FEATURE_SSBS
9830 			  | AARCH64_FEATURE_RNG
9831 			  | AARCH64_FEATURE_F16
9832 			  | AARCH64_FEATURE_BFLOAT16
9833 			  | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9834   {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9835 			       AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9836 			       | AARCH64_FEATURE_RDMA),
9837    "Qualcomm QDF24XX"},
9838   {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9839 			       AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9840    "Qualcomm Saphira"},
9841   {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9842 				AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9843    "Cavium ThunderX"},
9844   {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9845 			      AARCH64_FEATURE_CRYPTO),
9846   "Broadcom Vulcan"},
9847   /* The 'xgene-1' name is an older name for 'xgene1', which was used
9848      in earlier releases and is superseded by 'xgene1' in all
9849      tools.  */
9850   {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9851   {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9852   {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9853 			      AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9854   {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9855   {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9856                 AARCH64_FEATURE_F16
9857                 | AARCH64_FEATURE_RCPC
9858                 | AARCH64_FEATURE_DOTPROD
9859                 | AARCH64_FEATURE_SSBS
9860                 | AARCH64_FEATURE_PROFILE),
9861                 "Cortex-X1"},
9862   {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9863                 AARCH64_FEATURE_BFLOAT16
9864                 | AARCH64_FEATURE_I8MM
9865                 | AARCH64_FEATURE_MEMTAG
9866                 | AARCH64_FEATURE_SVE2_BITPERM),
9867                 "Cortex-X2"},
9868   {"generic", AARCH64_ARCH_V8, NULL},
9869 
9870   {NULL, AARCH64_ARCH_NONE, NULL}
9871 };
9872 
9873 struct aarch64_arch_option_table
9874 {
9875   const char *name;
9876   const aarch64_feature_set value;
9877 };
9878 
9879 /* This list should, at a minimum, contain all the architecture names
9880    recognized by GCC.  */
9881 static const struct aarch64_arch_option_table aarch64_archs[] = {
9882   {"all", AARCH64_ANY},
9883   {"armv8-a", AARCH64_ARCH_V8},
9884   {"armv8.1-a", AARCH64_ARCH_V8_1},
9885   {"armv8.2-a", AARCH64_ARCH_V8_2},
9886   {"armv8.3-a", AARCH64_ARCH_V8_3},
9887   {"armv8.4-a", AARCH64_ARCH_V8_4},
9888   {"armv8.5-a", AARCH64_ARCH_V8_5},
9889   {"armv8.6-a", AARCH64_ARCH_V8_6},
9890   {"armv8.7-a", AARCH64_ARCH_V8_7},
9891   {"armv8.8-a", AARCH64_ARCH_V8_8},
9892   {"armv8-r",	AARCH64_ARCH_V8_R},
9893   {"armv9-a",	AARCH64_ARCH_V9},
9894   {"armv9.1-a",	AARCH64_ARCH_V9_1},
9895   {"armv9.2-a",	AARCH64_ARCH_V9_2},
9896   {"armv9.3-a",	AARCH64_ARCH_V9_3},
9897   {NULL, AARCH64_ARCH_NONE}
9898 };
9899 
9900 /* ISA extensions.  */
9901 struct aarch64_option_cpu_value_table
9902 {
9903   const char *name;
9904   const aarch64_feature_set value;
9905   const aarch64_feature_set require; /* Feature dependencies.  */
9906 };
9907 
9908 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9909   {"crc",		AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9910 			AARCH64_ARCH_NONE},
9911   {"crypto",		AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9912 			AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9913   {"fp",		AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9914 			AARCH64_ARCH_NONE},
9915   {"lse",		AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9916 			AARCH64_ARCH_NONE},
9917   {"simd",		AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9918 			AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9919   {"pan",		AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9920 			AARCH64_ARCH_NONE},
9921   {"lor",		AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9922 			AARCH64_ARCH_NONE},
9923   {"ras",		AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9924 			AARCH64_ARCH_NONE},
9925   {"rdma",		AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9926 			AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9927   {"fp16",		AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9928 			AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9929   {"fp16fml",		AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9930 			AARCH64_FEATURE (AARCH64_FEATURE_FP
9931 					 | AARCH64_FEATURE_F16, 0)},
9932   {"profile",		AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9933 			AARCH64_ARCH_NONE},
9934   {"sve",		AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9935 			AARCH64_FEATURE (AARCH64_FEATURE_F16
9936 					 | AARCH64_FEATURE_SIMD
9937 					 | AARCH64_FEATURE_COMPNUM, 0)},
9938   {"tme",		AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9939 			AARCH64_ARCH_NONE},
9940   {"compnum",		AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9941 			AARCH64_FEATURE (AARCH64_FEATURE_F16
9942 					 | AARCH64_FEATURE_SIMD, 0)},
9943   {"rcpc",		AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9944 			AARCH64_ARCH_NONE},
9945   {"dotprod",		AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9946 			AARCH64_ARCH_NONE},
9947   {"sha2",		AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9948 			AARCH64_ARCH_NONE},
9949   {"sb",		AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9950 			AARCH64_ARCH_NONE},
9951   {"predres",		AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9952 			AARCH64_ARCH_NONE},
9953   {"aes",		AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9954 			AARCH64_ARCH_NONE},
9955   {"sm4",		AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9956 			AARCH64_ARCH_NONE},
9957   {"sha3",		AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9958 			AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9959   {"rng",		AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9960 			AARCH64_ARCH_NONE},
9961   {"ssbs",		AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9962 			AARCH64_ARCH_NONE},
9963   {"memtag",		AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9964 			AARCH64_ARCH_NONE},
9965   {"sve2",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9966 			AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9967   {"sve2-sm4",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9968 			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9969 					 | AARCH64_FEATURE_SM4, 0)},
9970   {"sve2-aes",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9971 			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9972 					 | AARCH64_FEATURE_AES, 0)},
9973   {"sve2-sha3",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9974 			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9975 					 | AARCH64_FEATURE_SHA3, 0)},
9976   {"sve2-bitperm",	AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9977 			AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9978   {"sme",		AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9979 			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9980 					 | AARCH64_FEATURE_BFLOAT16, 0)},
9981   {"sme-f64",		AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9982 			AARCH64_FEATURE (AARCH64_FEATURE_SME
9983 					 | AARCH64_FEATURE_SVE2
9984 					 | AARCH64_FEATURE_BFLOAT16, 0)},
9985   {"sme-i64",		AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9986 			AARCH64_FEATURE (AARCH64_FEATURE_SME
9987 					 | AARCH64_FEATURE_SVE2
9988 					 | AARCH64_FEATURE_BFLOAT16, 0)},
9989   {"bf16",		AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9990 			AARCH64_ARCH_NONE},
9991   {"i8mm",		AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9992 			AARCH64_ARCH_NONE},
9993   {"f32mm",		AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9994 			AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9995   {"f64mm",		AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9996 			AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9997   {"ls64",		AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9998 			AARCH64_ARCH_NONE},
9999   {"flagm",		AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10000 			AARCH64_ARCH_NONE},
10001   {"pauth",		AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10002 			AARCH64_ARCH_NONE},
10003   {"mops",		AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10004 			AARCH64_ARCH_NONE},
10005   {"hbc",		AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10006 			AARCH64_ARCH_NONE},
10007   {NULL,		AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10008 };
10009 
10010 struct aarch64_long_option_table
10011 {
10012   const char *option;			/* Substring to match.  */
10013   const char *help;			/* Help information.  */
10014   int (*func) (const char *subopt);	/* Function to decode sub-option.  */
10015   char *deprecated;		/* If non-null, print this message.  */
10016 };
10017 
10018 /* Transitive closure of features depending on set.  */
10019 static aarch64_feature_set
aarch64_feature_disable_set(aarch64_feature_set set)10020 aarch64_feature_disable_set (aarch64_feature_set set)
10021 {
10022   const struct aarch64_option_cpu_value_table *opt;
10023   aarch64_feature_set prev = 0;
10024 
10025   while (prev != set) {
10026     prev = set;
10027     for (opt = aarch64_features; opt->name != NULL; opt++)
10028       if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10029         AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10030   }
10031   return set;
10032 }
10033 
10034 /* Transitive closure of dependencies of set.  */
10035 static aarch64_feature_set
aarch64_feature_enable_set(aarch64_feature_set set)10036 aarch64_feature_enable_set (aarch64_feature_set set)
10037 {
10038   const struct aarch64_option_cpu_value_table *opt;
10039   aarch64_feature_set prev = 0;
10040 
10041   while (prev != set) {
10042     prev = set;
10043     for (opt = aarch64_features; opt->name != NULL; opt++)
10044       if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10045         AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10046   }
10047   return set;
10048 }
10049 
10050 static int
aarch64_parse_features(const char * str,const aarch64_feature_set ** opt_p,bool ext_only)10051 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10052 			bool ext_only)
10053 {
10054   /* We insist on extensions being added before being removed.  We achieve
10055      this by using the ADDING_VALUE variable to indicate whether we are
10056      adding an extension (1) or removing it (0) and only allowing it to
10057      change in the order -1 -> 1 -> 0.  */
10058   int adding_value = -1;
10059   aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10060 
10061   /* Copy the feature set, so that we can modify it.  */
10062   *ext_set = **opt_p;
10063   *opt_p = ext_set;
10064 
10065   while (str != NULL && *str != 0)
10066     {
10067       const struct aarch64_option_cpu_value_table *opt;
10068       const char *ext = NULL;
10069       int optlen;
10070 
10071       if (!ext_only)
10072 	{
10073 	  if (*str != '+')
10074 	    {
10075 	      as_bad (_("invalid architectural extension"));
10076 	      return 0;
10077 	    }
10078 
10079 	  ext = strchr (++str, '+');
10080 	}
10081 
10082       if (ext != NULL)
10083 	optlen = ext - str;
10084       else
10085 	optlen = strlen (str);
10086 
10087       if (optlen >= 2 && startswith (str, "no"))
10088 	{
10089 	  if (adding_value != 0)
10090 	    adding_value = 0;
10091 	  optlen -= 2;
10092 	  str += 2;
10093 	}
10094       else if (optlen > 0)
10095 	{
10096 	  if (adding_value == -1)
10097 	    adding_value = 1;
10098 	  else if (adding_value != 1)
10099 	    {
10100 	      as_bad (_("must specify extensions to add before specifying "
10101 			"those to remove"));
10102 	      return false;
10103 	    }
10104 	}
10105 
10106       if (optlen == 0)
10107 	{
10108 	  as_bad (_("missing architectural extension"));
10109 	  return 0;
10110 	}
10111 
10112       gas_assert (adding_value != -1);
10113 
10114       for (opt = aarch64_features; opt->name != NULL; opt++)
10115 	if (strncmp (opt->name, str, optlen) == 0)
10116 	  {
10117 	    aarch64_feature_set set;
10118 
10119 	    /* Add or remove the extension.  */
10120 	    if (adding_value)
10121 	      {
10122 		set = aarch64_feature_enable_set (opt->value);
10123 		AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10124 	      }
10125 	    else
10126 	      {
10127 		set = aarch64_feature_disable_set (opt->value);
10128 		AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10129 	      }
10130 	    break;
10131 	  }
10132 
10133       if (opt->name == NULL)
10134 	{
10135 	  as_bad (_("unknown architectural extension `%s'"), str);
10136 	  return 0;
10137 	}
10138 
10139       str = ext;
10140     };
10141 
10142   return 1;
10143 }
10144 
10145 static int
aarch64_parse_cpu(const char * str)10146 aarch64_parse_cpu (const char *str)
10147 {
10148   const struct aarch64_cpu_option_table *opt;
10149   const char *ext = strchr (str, '+');
10150   size_t optlen;
10151 
10152   if (ext != NULL)
10153     optlen = ext - str;
10154   else
10155     optlen = strlen (str);
10156 
10157   if (optlen == 0)
10158     {
10159       as_bad (_("missing cpu name `%s'"), str);
10160       return 0;
10161     }
10162 
10163   for (opt = aarch64_cpus; opt->name != NULL; opt++)
10164     if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10165       {
10166 	mcpu_cpu_opt = &opt->value;
10167 	if (ext != NULL)
10168 	  return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10169 
10170 	return 1;
10171       }
10172 
10173   as_bad (_("unknown cpu `%s'"), str);
10174   return 0;
10175 }
10176 
10177 static int
aarch64_parse_arch(const char * str)10178 aarch64_parse_arch (const char *str)
10179 {
10180   const struct aarch64_arch_option_table *opt;
10181   const char *ext = strchr (str, '+');
10182   size_t optlen;
10183 
10184   if (ext != NULL)
10185     optlen = ext - str;
10186   else
10187     optlen = strlen (str);
10188 
10189   if (optlen == 0)
10190     {
10191       as_bad (_("missing architecture name `%s'"), str);
10192       return 0;
10193     }
10194 
10195   for (opt = aarch64_archs; opt->name != NULL; opt++)
10196     if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10197       {
10198 	march_cpu_opt = &opt->value;
10199 	if (ext != NULL)
10200 	  return aarch64_parse_features (ext, &march_cpu_opt, false);
10201 
10202 	return 1;
10203       }
10204 
10205   as_bad (_("unknown architecture `%s'\n"), str);
10206   return 0;
10207 }
10208 
10209 /* ABIs.  */
10210 struct aarch64_option_abi_value_table
10211 {
10212   const char *name;
10213   enum aarch64_abi_type value;
10214 };
10215 
10216 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10217   {"ilp32",		AARCH64_ABI_ILP32},
10218   {"lp64",		AARCH64_ABI_LP64},
10219 };
10220 
10221 static int
aarch64_parse_abi(const char * str)10222 aarch64_parse_abi (const char *str)
10223 {
10224   unsigned int i;
10225 
10226   if (str[0] == '\0')
10227     {
10228       as_bad (_("missing abi name `%s'"), str);
10229       return 0;
10230     }
10231 
10232   for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10233     if (strcmp (str, aarch64_abis[i].name) == 0)
10234       {
10235 	aarch64_abi = aarch64_abis[i].value;
10236 	return 1;
10237       }
10238 
10239   as_bad (_("unknown abi `%s'\n"), str);
10240   return 0;
10241 }
10242 
10243 static struct aarch64_long_option_table aarch64_long_opts[] = {
10244 #ifdef OBJ_ELF
10245   {"mabi=", N_("<abi name>\t  specify for ABI <abi name>"),
10246    aarch64_parse_abi, NULL},
10247 #endif /* OBJ_ELF */
10248   {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
10249    aarch64_parse_cpu, NULL},
10250   {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
10251    aarch64_parse_arch, NULL},
10252   {NULL, NULL, 0, NULL}
10253 };
10254 
10255 int
md_parse_option(int c,const char * arg)10256 md_parse_option (int c, const char *arg)
10257 {
10258   struct aarch64_option_table *opt;
10259   struct aarch64_long_option_table *lopt;
10260 
10261   switch (c)
10262     {
10263 #ifdef OPTION_EB
10264     case OPTION_EB:
10265       target_big_endian = 1;
10266       break;
10267 #endif
10268 
10269 #ifdef OPTION_EL
10270     case OPTION_EL:
10271       target_big_endian = 0;
10272       break;
10273 #endif
10274 
10275     case 'a':
10276       /* Listing option.  Just ignore these, we don't support additional
10277          ones.  */
10278       return 0;
10279 
10280     default:
10281       for (opt = aarch64_opts; opt->option != NULL; opt++)
10282 	{
10283 	  if (c == opt->option[0]
10284 	      && ((arg == NULL && opt->option[1] == 0)
10285 		  || streq (arg, opt->option + 1)))
10286 	    {
10287 	      /* If the option is deprecated, tell the user.  */
10288 	      if (opt->deprecated != NULL)
10289 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10290 			   arg ? arg : "", _(opt->deprecated));
10291 
10292 	      if (opt->var != NULL)
10293 		*opt->var = opt->value;
10294 
10295 	      return 1;
10296 	    }
10297 	}
10298 
10299       for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10300 	{
10301 	  /* These options are expected to have an argument.  */
10302 	  if (c == lopt->option[0]
10303 	      && arg != NULL
10304 	      && startswith (arg, lopt->option + 1))
10305 	    {
10306 	      /* If the option is deprecated, tell the user.  */
10307 	      if (lopt->deprecated != NULL)
10308 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10309 			   _(lopt->deprecated));
10310 
10311 	      /* Call the sup-option parser.  */
10312 	      return lopt->func (arg + strlen (lopt->option) - 1);
10313 	    }
10314 	}
10315 
10316       return 0;
10317     }
10318 
10319   return 1;
10320 }
10321 
10322 void
md_show_usage(FILE * fp)10323 md_show_usage (FILE * fp)
10324 {
10325   struct aarch64_option_table *opt;
10326   struct aarch64_long_option_table *lopt;
10327 
10328   fprintf (fp, _(" AArch64-specific assembler options:\n"));
10329 
10330   for (opt = aarch64_opts; opt->option != NULL; opt++)
10331     if (opt->help != NULL)
10332       fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
10333 
10334   for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10335     if (lopt->help != NULL)
10336       fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
10337 
10338 #ifdef OPTION_EB
10339   fprintf (fp, _("\
10340   -EB                     assemble code for a big-endian cpu\n"));
10341 #endif
10342 
10343 #ifdef OPTION_EL
10344   fprintf (fp, _("\
10345   -EL                     assemble code for a little-endian cpu\n"));
10346 #endif
10347 }
10348 
10349 /* Parse a .cpu directive.  */
10350 
10351 static void
s_aarch64_cpu(int ignored ATTRIBUTE_UNUSED)10352 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10353 {
10354   const struct aarch64_cpu_option_table *opt;
10355   char saved_char;
10356   char *name;
10357   char *ext;
10358   size_t optlen;
10359 
10360   name = input_line_pointer;
10361   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10362     input_line_pointer++;
10363   saved_char = *input_line_pointer;
10364   *input_line_pointer = 0;
10365 
10366   ext = strchr (name, '+');
10367 
10368   if (ext != NULL)
10369     optlen = ext - name;
10370   else
10371     optlen = strlen (name);
10372 
10373   /* Skip the first "all" entry.  */
10374   for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10375     if (strlen (opt->name) == optlen
10376 	&& strncmp (name, opt->name, optlen) == 0)
10377       {
10378 	mcpu_cpu_opt = &opt->value;
10379 	if (ext != NULL)
10380 	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10381 	    return;
10382 
10383 	cpu_variant = *mcpu_cpu_opt;
10384 
10385 	*input_line_pointer = saved_char;
10386 	demand_empty_rest_of_line ();
10387 	return;
10388       }
10389   as_bad (_("unknown cpu `%s'"), name);
10390   *input_line_pointer = saved_char;
10391   ignore_rest_of_line ();
10392 }
10393 
10394 
10395 /* Parse a .arch directive.  */
10396 
10397 static void
s_aarch64_arch(int ignored ATTRIBUTE_UNUSED)10398 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10399 {
10400   const struct aarch64_arch_option_table *opt;
10401   char saved_char;
10402   char *name;
10403   char *ext;
10404   size_t optlen;
10405 
10406   name = input_line_pointer;
10407   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10408     input_line_pointer++;
10409   saved_char = *input_line_pointer;
10410   *input_line_pointer = 0;
10411 
10412   ext = strchr (name, '+');
10413 
10414   if (ext != NULL)
10415     optlen = ext - name;
10416   else
10417     optlen = strlen (name);
10418 
10419   /* Skip the first "all" entry.  */
10420   for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10421     if (strlen (opt->name) == optlen
10422 	&& strncmp (name, opt->name, optlen) == 0)
10423       {
10424 	mcpu_cpu_opt = &opt->value;
10425 	if (ext != NULL)
10426 	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10427 	    return;
10428 
10429 	cpu_variant = *mcpu_cpu_opt;
10430 
10431 	*input_line_pointer = saved_char;
10432 	demand_empty_rest_of_line ();
10433 	return;
10434       }
10435 
10436   as_bad (_("unknown architecture `%s'\n"), name);
10437   *input_line_pointer = saved_char;
10438   ignore_rest_of_line ();
10439 }
10440 
10441 /* Parse a .arch_extension directive.  */
10442 
10443 static void
s_aarch64_arch_extension(int ignored ATTRIBUTE_UNUSED)10444 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10445 {
10446   char saved_char;
10447   char *ext = input_line_pointer;;
10448 
10449   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10450     input_line_pointer++;
10451   saved_char = *input_line_pointer;
10452   *input_line_pointer = 0;
10453 
10454   if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10455     return;
10456 
10457   cpu_variant = *mcpu_cpu_opt;
10458 
10459   *input_line_pointer = saved_char;
10460   demand_empty_rest_of_line ();
10461 }
10462 
10463 /* Copy symbol information.  */
10464 
10465 void
aarch64_copy_symbol_attributes(symbolS * dest,symbolS * src)10466 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10467 {
10468   AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10469 }
10470 
10471 #ifdef OBJ_ELF
10472 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10473    This is needed so AArch64 specific st_other values can be independently
10474    specified for an IFUNC resolver (that is called by the dynamic linker)
10475    and the symbol it resolves (aliased to the resolver).  In particular,
10476    if a function symbol has special st_other value set via directives,
10477    then attaching an IFUNC resolver to that symbol should not override
10478    the st_other setting.  Requiring the directive on the IFUNC resolver
10479    symbol would be unexpected and problematic in C code, where the two
10480    symbols appear as two independent function declarations.  */
10481 
10482 void
aarch64_elf_copy_symbol_attributes(symbolS * dest,symbolS * src)10483 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10484 {
10485   struct elf_obj_sy *srcelf = symbol_get_obj (src);
10486   struct elf_obj_sy *destelf = symbol_get_obj (dest);
10487   /* If size is unset, copy size from src.  Because we don't track whether
10488      .size has been used, we can't differentiate .size dest, 0 from the case
10489      where dest's size is unset.  */
10490   if (!destelf->size && S_GET_SIZE (dest) == 0)
10491     {
10492       if (srcelf->size)
10493 	{
10494 	  destelf->size = XNEW (expressionS);
10495 	  *destelf->size = *srcelf->size;
10496 	}
10497       S_SET_SIZE (dest, S_GET_SIZE (src));
10498     }
10499 }
10500 #endif
10501