xref: /netbsd-src/external/gpl3/binutils.old/dist/gas/config/tc-aarch64.c (revision 04028aa9310ca9c619eca5cf58ddf1e58624d1d7)
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2 
3    Copyright 2009, 2010, 2011, 2012, 2013
4    Free Software Foundation, Inc.
5    Contributed by ARM Ltd.
6 
7    This file is part of GAS.
8 
9    GAS is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the license, or
12    (at your option) any later version.
13 
14    GAS is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; see the file COPYING3. If not,
21    see <http://www.gnu.org/licenses/>.  */
22 
23 #include "as.h"
24 #include <limits.h>
25 #include <stdarg.h>
26 #include "bfd_stdint.h"
27 #define	 NO_RELOC 0
28 #include "safe-ctype.h"
29 #include "subsegs.h"
30 #include "obstack.h"
31 
32 #ifdef OBJ_ELF
33 #include "elf/aarch64.h"
34 #include "dw2gencfi.h"
35 #endif
36 
37 #include "dwarf2dbg.h"
38 
39 /* Types of processor to assemble for.  */
40 #ifndef CPU_DEFAULT
41 #define CPU_DEFAULT AARCH64_ARCH_V8
42 #endif
43 
44 #define streq(a, b)	      (strcmp (a, b) == 0)
45 
46 static aarch64_feature_set cpu_variant;
47 
48 /* Variables that we set while parsing command-line options.  Once all
49    options have been read we re-process these values to set the real
50    assembly flags.  */
51 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
52 static const aarch64_feature_set *march_cpu_opt = NULL;
53 
54 /* Constants for known architecture features.  */
55 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
56 
57 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
58 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
59 
60 #ifdef OBJ_ELF
61 /* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
62 static symbolS *GOT_symbol;
63 #endif
64 
65 enum neon_el_type
66 {
67   NT_invtype = -1,
68   NT_b,
69   NT_h,
70   NT_s,
71   NT_d,
72   NT_q
73 };
74 
75 /* Bits for DEFINED field in neon_type_el.  */
76 #define NTA_HASTYPE  1
77 #define NTA_HASINDEX 2
78 
79 struct neon_type_el
80 {
81   enum neon_el_type type;
82   unsigned char defined;
83   unsigned width;
84   int64_t index;
85 };
86 
87 #define FIXUP_F_HAS_EXPLICIT_SHIFT	0x00000001
88 
89 struct reloc
90 {
91   bfd_reloc_code_real_type type;
92   expressionS exp;
93   int pc_rel;
94   enum aarch64_opnd opnd;
95   uint32_t flags;
96   unsigned need_libopcodes_p : 1;
97 };
98 
99 struct aarch64_instruction
100 {
101   /* libopcodes structure for instruction intermediate representation.  */
102   aarch64_inst base;
103   /* Record assembly errors found during the parsing.  */
104   struct
105     {
106       enum aarch64_operand_error_kind kind;
107       const char *error;
108     } parsing_error;
109   /* The condition that appears in the assembly line.  */
110   int cond;
111   /* Relocation information (including the GAS internal fixup).  */
112   struct reloc reloc;
113   /* Need to generate an immediate in the literal pool.  */
114   unsigned gen_lit_pool : 1;
115 };
116 
117 typedef struct aarch64_instruction aarch64_instruction;
118 
119 static aarch64_instruction inst;
120 
121 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
122 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
123 
124 /* Diagnostics inline function utilites.
125 
126    These are lightweight utlities which should only be called by parse_operands
127    and other parsers.  GAS processes each assembly line by parsing it against
128    instruction template(s), in the case of multiple templates (for the same
129    mnemonic name), those templates are tried one by one until one succeeds or
130    all fail.  An assembly line may fail a few templates before being
131    successfully parsed; an error saved here in most cases is not a user error
132    but an error indicating the current template is not the right template.
133    Therefore it is very important that errors can be saved at a low cost during
134    the parsing; we don't want to slow down the whole parsing by recording
135    non-user errors in detail.
136 
137    Remember that the objective is to help GAS pick up the most approapriate
138    error message in the case of multiple templates, e.g. FMOV which has 8
139    templates.  */
140 
141 static inline void
142 clear_error (void)
143 {
144   inst.parsing_error.kind = AARCH64_OPDE_NIL;
145   inst.parsing_error.error = NULL;
146 }
147 
148 static inline bfd_boolean
149 error_p (void)
150 {
151   return inst.parsing_error.kind != AARCH64_OPDE_NIL;
152 }
153 
154 static inline const char *
155 get_error_message (void)
156 {
157   return inst.parsing_error.error;
158 }
159 
160 static inline void
161 set_error_message (const char *error)
162 {
163   inst.parsing_error.error = error;
164 }
165 
166 static inline enum aarch64_operand_error_kind
167 get_error_kind (void)
168 {
169   return inst.parsing_error.kind;
170 }
171 
172 static inline void
173 set_error_kind (enum aarch64_operand_error_kind kind)
174 {
175   inst.parsing_error.kind = kind;
176 }
177 
178 static inline void
179 set_error (enum aarch64_operand_error_kind kind, const char *error)
180 {
181   inst.parsing_error.kind = kind;
182   inst.parsing_error.error = error;
183 }
184 
185 static inline void
186 set_recoverable_error (const char *error)
187 {
188   set_error (AARCH64_OPDE_RECOVERABLE, error);
189 }
190 
191 /* Use the DESC field of the corresponding aarch64_operand entry to compose
192    the error message.  */
193 static inline void
194 set_default_error (void)
195 {
196   set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
197 }
198 
199 static inline void
200 set_syntax_error (const char *error)
201 {
202   set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
203 }
204 
205 static inline void
206 set_first_syntax_error (const char *error)
207 {
208   if (! error_p ())
209     set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
210 }
211 
212 static inline void
213 set_fatal_syntax_error (const char *error)
214 {
215   set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
216 }
217 
218 /* Number of littlenums required to hold an extended precision number.  */
219 #define MAX_LITTLENUMS 6
220 
221 /* Return value for certain parsers when the parsing fails; those parsers
222    return the information of the parsed result, e.g. register number, on
223    success.  */
224 #define PARSE_FAIL -1
225 
226 /* This is an invalid condition code that means no conditional field is
227    present. */
228 #define COND_ALWAYS 0x10
229 
230 typedef struct
231 {
232   const char *template;
233   unsigned long value;
234 } asm_barrier_opt;
235 
236 typedef struct
237 {
238   const char *template;
239   uint32_t value;
240 } asm_nzcv;
241 
242 struct reloc_entry
243 {
244   char *name;
245   bfd_reloc_code_real_type reloc;
246 };
247 
248 /* Structure for a hash table entry for a register.  */
249 typedef struct
250 {
251   const char *name;
252   unsigned char number;
253   unsigned char type;
254   unsigned char builtin;
255 } reg_entry;
256 
257 /* Macros to define the register types and masks for the purpose
258    of parsing.  */
259 
260 #undef AARCH64_REG_TYPES
261 #define AARCH64_REG_TYPES	\
262   BASIC_REG_TYPE(R_32)	/* w[0-30] */	\
263   BASIC_REG_TYPE(R_64)	/* x[0-30] */	\
264   BASIC_REG_TYPE(SP_32)	/* wsp     */	\
265   BASIC_REG_TYPE(SP_64)	/* sp      */	\
266   BASIC_REG_TYPE(Z_32)	/* wzr     */	\
267   BASIC_REG_TYPE(Z_64)	/* xzr     */	\
268   BASIC_REG_TYPE(FP_B)	/* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
269   BASIC_REG_TYPE(FP_H)	/* h[0-31] */	\
270   BASIC_REG_TYPE(FP_S)	/* s[0-31] */	\
271   BASIC_REG_TYPE(FP_D)	/* d[0-31] */	\
272   BASIC_REG_TYPE(FP_Q)	/* q[0-31] */	\
273   BASIC_REG_TYPE(CN)	/* c[0-7]  */	\
274   BASIC_REG_TYPE(VN)	/* v[0-31] */	\
275   /* Typecheck: any 64-bit int reg         (inc SP exc XZR) */		\
276   MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64))		\
277   /* Typecheck: any int                    (inc {W}SP inc [WX]ZR) */	\
278   MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64)		\
279 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
280 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) 			\
281   /* Typecheck: any [BHSDQ]P FP.  */					\
282   MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
283 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
284   /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR)  */	\
285   MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64)		\
286 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN)	\
287 		 | REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
288 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
289   /* Any integer register; used for error messages only.  */		\
290   MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64)			\
291 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
292 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64))			\
293   /* Pseudo type to mark the end of the enumerator sequence.  */	\
294   BASIC_REG_TYPE(MAX)
295 
296 #undef BASIC_REG_TYPE
297 #define BASIC_REG_TYPE(T)	REG_TYPE_##T,
298 #undef MULTI_REG_TYPE
299 #define MULTI_REG_TYPE(T,V)	BASIC_REG_TYPE(T)
300 
301 /* Register type enumerators.  */
302 typedef enum
303 {
304   /* A list of REG_TYPE_*.  */
305   AARCH64_REG_TYPES
306 } aarch64_reg_type;
307 
308 #undef BASIC_REG_TYPE
309 #define BASIC_REG_TYPE(T)	1 << REG_TYPE_##T,
310 #undef REG_TYPE
311 #define REG_TYPE(T)		(1 << REG_TYPE_##T)
312 #undef MULTI_REG_TYPE
313 #define MULTI_REG_TYPE(T,V)	V,
314 
315 /* Values indexed by aarch64_reg_type to assist the type checking.  */
316 static const unsigned reg_type_masks[] =
317 {
318   AARCH64_REG_TYPES
319 };
320 
321 #undef BASIC_REG_TYPE
322 #undef REG_TYPE
323 #undef MULTI_REG_TYPE
324 #undef AARCH64_REG_TYPES
325 
326 /* Diagnostics used when we don't get a register of the expected type.
327    Note:  this has to synchronized with aarch64_reg_type definitions
328    above.  */
329 static const char *
330 get_reg_expected_msg (aarch64_reg_type reg_type)
331 {
332   const char *msg;
333 
334   switch (reg_type)
335     {
336     case REG_TYPE_R_32:
337       msg = N_("integer 32-bit register expected");
338       break;
339     case REG_TYPE_R_64:
340       msg = N_("integer 64-bit register expected");
341       break;
342     case REG_TYPE_R_N:
343       msg = N_("integer register expected");
344       break;
345     case REG_TYPE_R_Z_SP:
346       msg = N_("integer, zero or SP register expected");
347       break;
348     case REG_TYPE_FP_B:
349       msg = N_("8-bit SIMD scalar register expected");
350       break;
351     case REG_TYPE_FP_H:
352       msg = N_("16-bit SIMD scalar or floating-point half precision "
353 	       "register expected");
354       break;
355     case REG_TYPE_FP_S:
356       msg = N_("32-bit SIMD scalar or floating-point single precision "
357 	       "register expected");
358       break;
359     case REG_TYPE_FP_D:
360       msg = N_("64-bit SIMD scalar or floating-point double precision "
361 	       "register expected");
362       break;
363     case REG_TYPE_FP_Q:
364       msg = N_("128-bit SIMD scalar or floating-point quad precision "
365 	       "register expected");
366       break;
367     case REG_TYPE_CN:
368       msg = N_("C0 - C15 expected");
369       break;
370     case REG_TYPE_R_Z_BHSDQ_V:
371       msg = N_("register expected");
372       break;
373     case REG_TYPE_BHSDQ:	/* any [BHSDQ]P FP  */
374       msg = N_("SIMD scalar or floating-point register expected");
375       break;
376     case REG_TYPE_VN:		/* any V reg  */
377       msg = N_("vector register expected");
378       break;
379     default:
380       as_fatal (_("invalid register type %d"), reg_type);
381     }
382   return msg;
383 }
384 
385 /* Some well known registers that we refer to directly elsewhere.  */
386 #define REG_SP	31
387 
388 /* Instructions take 4 bytes in the object file.  */
389 #define INSN_SIZE	4
390 
391 /* Define some common error messages.  */
392 #define BAD_SP          _("SP not allowed here")
393 
394 static struct hash_control *aarch64_ops_hsh;
395 static struct hash_control *aarch64_cond_hsh;
396 static struct hash_control *aarch64_shift_hsh;
397 static struct hash_control *aarch64_sys_regs_hsh;
398 static struct hash_control *aarch64_pstatefield_hsh;
399 static struct hash_control *aarch64_sys_regs_ic_hsh;
400 static struct hash_control *aarch64_sys_regs_dc_hsh;
401 static struct hash_control *aarch64_sys_regs_at_hsh;
402 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
403 static struct hash_control *aarch64_reg_hsh;
404 static struct hash_control *aarch64_barrier_opt_hsh;
405 static struct hash_control *aarch64_nzcv_hsh;
406 static struct hash_control *aarch64_pldop_hsh;
407 
408 /* Stuff needed to resolve the label ambiguity
409    As:
410      ...
411      label:   <insn>
412    may differ from:
413      ...
414      label:
415 	      <insn>  */
416 
417 static symbolS *last_label_seen;
418 
419 /* Literal pool structure.  Held on a per-section
420    and per-sub-section basis.  */
421 
422 #define MAX_LITERAL_POOL_SIZE 1024
423 typedef struct literal_pool
424 {
425   expressionS literals[MAX_LITERAL_POOL_SIZE];
426   unsigned int next_free_entry;
427   unsigned int id;
428   symbolS *symbol;
429   segT section;
430   subsegT sub_section;
431   int size;
432   struct literal_pool *next;
433 } literal_pool;
434 
435 /* Pointer to a linked list of literal pools.  */
436 static literal_pool *list_of_pools = NULL;
437 
438 /* Pure syntax.	 */
439 
440 /* This array holds the chars that always start a comment.  If the
441    pre-processor is disabled, these aren't very useful.	 */
442 const char comment_chars[] = "";
443 
444 /* This array holds the chars that only start a comment at the beginning of
445    a line.  If the line seems to have the form '# 123 filename'
446    .line and .file directives will appear in the pre-processed output.	*/
447 /* Note that input_file.c hand checks for '#' at the beginning of the
448    first line of the input file.  This is because the compiler outputs
449    #NO_APP at the beginning of its output.  */
450 /* Also note that comments like this one will always work.  */
451 const char line_comment_chars[] = "#";
452 
453 const char line_separator_chars[] = ";";
454 
455 /* Chars that can be used to separate mant
456    from exp in floating point numbers.	*/
457 const char EXP_CHARS[] = "eE";
458 
459 /* Chars that mean this number is a floating point constant.  */
460 /* As in 0f12.456  */
461 /* or	 0d1.2345e12  */
462 
463 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
464 
465 /* Prefix character that indicates the start of an immediate value.  */
466 #define is_immediate_prefix(C) ((C) == '#')
467 
468 /* Separator character handling.  */
469 
470 #define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
471 
472 static inline bfd_boolean
473 skip_past_char (char **str, char c)
474 {
475   if (**str == c)
476     {
477       (*str)++;
478       return TRUE;
479     }
480   else
481     return FALSE;
482 }
483 
484 #define skip_past_comma(str) skip_past_char (str, ',')
485 
486 /* Arithmetic expressions (possibly involving symbols).	 */
487 
488 static bfd_boolean in_my_get_expression_p = FALSE;
489 
490 /* Third argument to my_get_expression.	 */
491 #define GE_NO_PREFIX 0
492 #define GE_OPT_PREFIX 1
493 
494 /* Return TRUE if the string pointed by *STR is successfully parsed
495    as an valid expression; *EP will be filled with the information of
496    such an expression.  Otherwise return FALSE.  */
497 
498 static bfd_boolean
499 my_get_expression (expressionS * ep, char **str, int prefix_mode,
500 		   int reject_absent)
501 {
502   char *save_in;
503   segT seg;
504   int prefix_present_p = 0;
505 
506   switch (prefix_mode)
507     {
508     case GE_NO_PREFIX:
509       break;
510     case GE_OPT_PREFIX:
511       if (is_immediate_prefix (**str))
512 	{
513 	  (*str)++;
514 	  prefix_present_p = 1;
515 	}
516       break;
517     default:
518       abort ();
519     }
520 
521   memset (ep, 0, sizeof (expressionS));
522 
523   save_in = input_line_pointer;
524   input_line_pointer = *str;
525   in_my_get_expression_p = TRUE;
526   seg = expression (ep);
527   in_my_get_expression_p = FALSE;
528 
529   if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
530     {
531       /* We found a bad expression in md_operand().  */
532       *str = input_line_pointer;
533       input_line_pointer = save_in;
534       if (prefix_present_p && ! error_p ())
535 	set_fatal_syntax_error (_("bad expression"));
536       else
537 	set_first_syntax_error (_("bad expression"));
538       return FALSE;
539     }
540 
541 #ifdef OBJ_AOUT
542   if (seg != absolute_section
543       && seg != text_section
544       && seg != data_section
545       && seg != bss_section && seg != undefined_section)
546     {
547       set_syntax_error (_("bad segment"));
548       *str = input_line_pointer;
549       input_line_pointer = save_in;
550       return FALSE;
551     }
552 #else
553   (void) seg;
554 #endif
555 
556   *str = input_line_pointer;
557   input_line_pointer = save_in;
558   return TRUE;
559 }
560 
561 /* Turn a string in input_line_pointer into a floating point constant
562    of type TYPE, and store the appropriate bytes in *LITP.  The number
563    of LITTLENUMS emitted is stored in *SIZEP.  An error message is
564    returned, or NULL on OK.  */
565 
566 char *
567 md_atof (int type, char *litP, int *sizeP)
568 {
569   return ieee_md_atof (type, litP, sizeP, target_big_endian);
570 }
571 
572 /* We handle all bad expressions here, so that we can report the faulty
573    instruction in the error message.  */
574 void
575 md_operand (expressionS * exp)
576 {
577   if (in_my_get_expression_p)
578     exp->X_op = O_illegal;
579 }
580 
581 /* Immediate values.  */
582 
583 /* Errors may be set multiple times during parsing or bit encoding
584    (particularly in the Neon bits), but usually the earliest error which is set
585    will be the most meaningful. Avoid overwriting it with later (cascading)
586    errors by calling this function.  */
587 
588 static void
589 first_error (const char *error)
590 {
591   if (! error_p ())
592     set_syntax_error (error);
593 }
594 
595 /* Similiar to first_error, but this function accepts formatted error
596    message.  */
597 static void
598 first_error_fmt (const char *format, ...)
599 {
600   va_list args;
601   enum
602   { size = 100 };
603   /* N.B. this single buffer will not cause error messages for different
604      instructions to pollute each other; this is because at the end of
605      processing of each assembly line, error message if any will be
606      collected by as_bad.  */
607   static char buffer[size];
608 
609   if (! error_p ())
610     {
611       int ret ATTRIBUTE_UNUSED;
612       va_start (args, format);
613       ret = vsnprintf (buffer, size, format, args);
614       know (ret <= size - 1 && ret >= 0);
615       va_end (args);
616       set_syntax_error (buffer);
617     }
618 }
619 
620 /* Register parsing.  */
621 
622 /* Generic register parser which is called by other specialized
623    register parsers.
624    CCP points to what should be the beginning of a register name.
625    If it is indeed a valid register name, advance CCP over it and
626    return the reg_entry structure; otherwise return NULL.
627    It does not issue diagnostics.  */
628 
629 static reg_entry *
630 parse_reg (char **ccp)
631 {
632   char *start = *ccp;
633   char *p;
634   reg_entry *reg;
635 
636 #ifdef REGISTER_PREFIX
637   if (*start != REGISTER_PREFIX)
638     return NULL;
639   start++;
640 #endif
641 
642   p = start;
643   if (!ISALPHA (*p) || !is_name_beginner (*p))
644     return NULL;
645 
646   do
647     p++;
648   while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
649 
650   reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
651 
652   if (!reg)
653     return NULL;
654 
655   *ccp = p;
656   return reg;
657 }
658 
659 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
660    return FALSE.  */
661 static bfd_boolean
662 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
663 {
664   if (reg->type == type)
665     return TRUE;
666 
667   switch (type)
668     {
669     case REG_TYPE_R64_SP:	/* 64-bit integer reg (inc SP exc XZR).  */
670     case REG_TYPE_R_Z_SP:	/* Integer reg (inc {X}SP inc [WX]ZR).  */
671     case REG_TYPE_R_Z_BHSDQ_V:	/* Any register apart from Cn.  */
672     case REG_TYPE_BHSDQ:	/* Any [BHSDQ]P FP or SIMD scalar register.  */
673     case REG_TYPE_VN:		/* Vector register.  */
674       gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
675       return ((reg_type_masks[reg->type] & reg_type_masks[type])
676 	      == reg_type_masks[reg->type]);
677     default:
678       as_fatal ("unhandled type %d", type);
679       abort ();
680     }
681 }
682 
683 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
684    Return the register number otherwise.  *ISREG32 is set to one if the
685    register is 32-bit wide; *ISREGZERO is set to one if the register is
686    of type Z_32 or Z_64.
687    Note that this function does not issue any diagnostics.  */
688 
689 static int
690 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
691 			 int *isreg32, int *isregzero)
692 {
693   char *str = *ccp;
694   const reg_entry *reg = parse_reg (&str);
695 
696   if (reg == NULL)
697     return PARSE_FAIL;
698 
699   if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
700     return PARSE_FAIL;
701 
702   switch (reg->type)
703     {
704     case REG_TYPE_SP_32:
705     case REG_TYPE_SP_64:
706       if (reject_sp)
707 	return PARSE_FAIL;
708       *isreg32 = reg->type == REG_TYPE_SP_32;
709       *isregzero = 0;
710       break;
711     case REG_TYPE_R_32:
712     case REG_TYPE_R_64:
713       *isreg32 = reg->type == REG_TYPE_R_32;
714       *isregzero = 0;
715       break;
716     case REG_TYPE_Z_32:
717     case REG_TYPE_Z_64:
718       if (reject_rz)
719 	return PARSE_FAIL;
720       *isreg32 = reg->type == REG_TYPE_Z_32;
721       *isregzero = 1;
722       break;
723     default:
724       return PARSE_FAIL;
725     }
726 
727   *ccp = str;
728 
729   return reg->number;
730 }
731 
732 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
733    Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
734    otherwise return FALSE.
735 
736    Accept only one occurrence of:
737    8b 16b 4h 8h 2s 4s 1d 2d
738    b h s d q  */
739 static bfd_boolean
740 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
741 {
742   char *ptr = *str;
743   unsigned width;
744   unsigned element_size;
745   enum neon_el_type type;
746 
747   /* skip '.' */
748   ptr++;
749 
750   if (!ISDIGIT (*ptr))
751     {
752       width = 0;
753       goto elt_size;
754     }
755   width = strtoul (ptr, &ptr, 10);
756   if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
757     {
758       first_error_fmt (_("bad size %d in vector width specifier"), width);
759       return FALSE;
760     }
761 
762 elt_size:
763   switch (TOLOWER (*ptr))
764     {
765     case 'b':
766       type = NT_b;
767       element_size = 8;
768       break;
769     case 'h':
770       type = NT_h;
771       element_size = 16;
772       break;
773     case 's':
774       type = NT_s;
775       element_size = 32;
776       break;
777     case 'd':
778       type = NT_d;
779       element_size = 64;
780       break;
781     case 'q':
782       if (width == 1)
783 	{
784 	  type = NT_q;
785 	  element_size = 128;
786 	  break;
787 	}
788       /* fall through.  */
789     default:
790       if (*ptr != '\0')
791 	first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
792       else
793 	first_error (_("missing element size"));
794       return FALSE;
795     }
796   if (width != 0 && width * element_size != 64 && width * element_size != 128)
797     {
798       first_error_fmt (_
799 		       ("invalid element size %d and vector size combination %c"),
800 		       width, *ptr);
801       return FALSE;
802     }
803   ptr++;
804 
805   parsed_type->type = type;
806   parsed_type->width = width;
807 
808   *str = ptr;
809 
810   return TRUE;
811 }
812 
813 /* Parse a single type, e.g. ".8b", leading period included.
814    Only applicable to Vn registers.
815 
816    Return TRUE on success; otherwise return FALSE.  */
817 static bfd_boolean
818 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
819 {
820   char *str = *ccp;
821 
822   if (*str == '.')
823     {
824       if (! parse_neon_type_for_operand (vectype, &str))
825 	{
826 	  first_error (_("vector type expected"));
827 	  return FALSE;
828 	}
829     }
830   else
831     return FALSE;
832 
833   *ccp = str;
834 
835   return TRUE;
836 }
837 
838 /* Parse a register of the type TYPE.
839 
840    Return PARSE_FAIL if the string pointed by *CCP is not a valid register
841    name or the parsed register is not of TYPE.
842 
843    Otherwise return the register number, and optionally fill in the actual
844    type of the register in *RTYPE when multiple alternatives were given, and
845    return the register shape and element index information in *TYPEINFO.
846 
847    IN_REG_LIST should be set with TRUE if the caller is parsing a register
848    list.  */
849 
850 static int
851 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
852 		 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
853 {
854   char *str = *ccp;
855   const reg_entry *reg = parse_reg (&str);
856   struct neon_type_el atype;
857   struct neon_type_el parsetype;
858   bfd_boolean is_typed_vecreg = FALSE;
859 
860   atype.defined = 0;
861   atype.type = NT_invtype;
862   atype.width = -1;
863   atype.index = 0;
864 
865   if (reg == NULL)
866     {
867       if (typeinfo)
868 	*typeinfo = atype;
869       set_default_error ();
870       return PARSE_FAIL;
871     }
872 
873   if (! aarch64_check_reg_type (reg, type))
874     {
875       DEBUG_TRACE ("reg type check failed");
876       set_default_error ();
877       return PARSE_FAIL;
878     }
879   type = reg->type;
880 
881   if (type == REG_TYPE_VN
882       && parse_neon_operand_type (&parsetype, &str))
883     {
884       /* Register if of the form Vn.[bhsdq].  */
885       is_typed_vecreg = TRUE;
886 
887       if (parsetype.width == 0)
888 	/* Expect index. In the new scheme we cannot have
889 	   Vn.[bhsdq] represent a scalar. Therefore any
890 	   Vn.[bhsdq] should have an index following it.
891 	   Except in reglists ofcourse.  */
892 	atype.defined |= NTA_HASINDEX;
893       else
894 	atype.defined |= NTA_HASTYPE;
895 
896       atype.type = parsetype.type;
897       atype.width = parsetype.width;
898     }
899 
900   if (skip_past_char (&str, '['))
901     {
902       expressionS exp;
903 
904       /* Reject Sn[index] syntax.  */
905       if (!is_typed_vecreg)
906 	{
907 	  first_error (_("this type of register can't be indexed"));
908 	  return PARSE_FAIL;
909 	}
910 
911       if (in_reg_list == TRUE)
912 	{
913 	  first_error (_("index not allowed inside register list"));
914 	  return PARSE_FAIL;
915 	}
916 
917       atype.defined |= NTA_HASINDEX;
918 
919       my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
920 
921       if (exp.X_op != O_constant)
922 	{
923 	  first_error (_("constant expression required"));
924 	  return PARSE_FAIL;
925 	}
926 
927       if (! skip_past_char (&str, ']'))
928 	return PARSE_FAIL;
929 
930       atype.index = exp.X_add_number;
931     }
932   else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
933     {
934       /* Indexed vector register expected.  */
935       first_error (_("indexed vector register expected"));
936       return PARSE_FAIL;
937     }
938 
939   /* A vector reg Vn should be typed or indexed.  */
940   if (type == REG_TYPE_VN && atype.defined == 0)
941     {
942       first_error (_("invalid use of vector register"));
943     }
944 
945   if (typeinfo)
946     *typeinfo = atype;
947 
948   if (rtype)
949     *rtype = type;
950 
951   *ccp = str;
952 
953   return reg->number;
954 }
955 
956 /* Parse register.
957 
958    Return the register number on success; return PARSE_FAIL otherwise.
959 
960    If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
961    the register (e.g. NEON double or quad reg when either has been requested).
962 
963    If this is a NEON vector register with additional type information, fill
964    in the struct pointed to by VECTYPE (if non-NULL).
965 
966    This parser does not handle register list.  */
967 
968 static int
969 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
970 		   aarch64_reg_type *rtype, struct neon_type_el *vectype)
971 {
972   struct neon_type_el atype;
973   char *str = *ccp;
974   int reg = parse_typed_reg (&str, type, rtype, &atype,
975 			     /*in_reg_list= */ FALSE);
976 
977   if (reg == PARSE_FAIL)
978     return PARSE_FAIL;
979 
980   if (vectype)
981     *vectype = atype;
982 
983   *ccp = str;
984 
985   return reg;
986 }
987 
988 static inline bfd_boolean
989 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
990 {
991   return
992     e1.type == e2.type
993     && e1.defined == e2.defined
994     && e1.width == e2.width && e1.index == e2.index;
995 }
996 
997 /* This function parses the NEON register list.  On success, it returns
998    the parsed register list information in the following encoded format:
999 
1000    bit   18-22   |   13-17   |   7-11    |    2-6    |   0-1
1001        4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1002 
1003    The information of the register shape and/or index is returned in
1004    *VECTYPE.
1005 
1006    It returns PARSE_FAIL if the register list is invalid.
1007 
1008    The list contains one to four registers.
1009    Each register can be one of:
1010    <Vt>.<T>[<index>]
1011    <Vt>.<T>
1012    All <T> should be identical.
1013    All <index> should be identical.
1014    There are restrictions on <Vt> numbers which are checked later
1015    (by reg_list_valid_p).  */
1016 
1017 static int
1018 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1019 {
1020   char *str = *ccp;
1021   int nb_regs;
1022   struct neon_type_el typeinfo, typeinfo_first;
1023   int val, val_range;
1024   int in_range;
1025   int ret_val;
1026   int i;
1027   bfd_boolean error = FALSE;
1028   bfd_boolean expect_index = FALSE;
1029 
1030   if (*str != '{')
1031     {
1032       set_syntax_error (_("expecting {"));
1033       return PARSE_FAIL;
1034     }
1035   str++;
1036 
1037   nb_regs = 0;
1038   typeinfo_first.defined = 0;
1039   typeinfo_first.type = NT_invtype;
1040   typeinfo_first.width = -1;
1041   typeinfo_first.index = 0;
1042   ret_val = 0;
1043   val = -1;
1044   val_range = -1;
1045   in_range = 0;
1046   do
1047     {
1048       if (in_range)
1049 	{
1050 	  str++;		/* skip over '-' */
1051 	  val_range = val;
1052 	}
1053       val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1054 			     /*in_reg_list= */ TRUE);
1055       if (val == PARSE_FAIL)
1056 	{
1057 	  set_first_syntax_error (_("invalid vector register in list"));
1058 	  error = TRUE;
1059 	  continue;
1060 	}
1061       /* reject [bhsd]n */
1062       if (typeinfo.defined == 0)
1063 	{
1064 	  set_first_syntax_error (_("invalid scalar register in list"));
1065 	  error = TRUE;
1066 	  continue;
1067 	}
1068 
1069       if (typeinfo.defined & NTA_HASINDEX)
1070 	expect_index = TRUE;
1071 
1072       if (in_range)
1073 	{
1074 	  if (val < val_range)
1075 	    {
1076 	      set_first_syntax_error
1077 		(_("invalid range in vector register list"));
1078 	      error = TRUE;
1079 	    }
1080 	  val_range++;
1081 	}
1082       else
1083 	{
1084 	  val_range = val;
1085 	  if (nb_regs == 0)
1086 	    typeinfo_first = typeinfo;
1087 	  else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1088 	    {
1089 	      set_first_syntax_error
1090 		(_("type mismatch in vector register list"));
1091 	      error = TRUE;
1092 	    }
1093 	}
1094       if (! error)
1095 	for (i = val_range; i <= val; i++)
1096 	  {
1097 	    ret_val |= i << (5 * nb_regs);
1098 	    nb_regs++;
1099 	  }
1100       in_range = 0;
1101     }
1102   while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1103 
1104   skip_whitespace (str);
1105   if (*str != '}')
1106     {
1107       set_first_syntax_error (_("end of vector register list not found"));
1108       error = TRUE;
1109     }
1110   str++;
1111 
1112   skip_whitespace (str);
1113 
1114   if (expect_index)
1115     {
1116       if (skip_past_char (&str, '['))
1117 	{
1118 	  expressionS exp;
1119 
1120 	  my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1121 	  if (exp.X_op != O_constant)
1122 	    {
1123 	      set_first_syntax_error (_("constant expression required."));
1124 	      error = TRUE;
1125 	    }
1126 	  if (! skip_past_char (&str, ']'))
1127 	    error = TRUE;
1128 	  else
1129 	    typeinfo_first.index = exp.X_add_number;
1130 	}
1131       else
1132 	{
1133 	  set_first_syntax_error (_("expected index"));
1134 	  error = TRUE;
1135 	}
1136     }
1137 
1138   if (nb_regs > 4)
1139     {
1140       set_first_syntax_error (_("too many registers in vector register list"));
1141       error = TRUE;
1142     }
1143   else if (nb_regs == 0)
1144     {
1145       set_first_syntax_error (_("empty vector register list"));
1146       error = TRUE;
1147     }
1148 
1149   *ccp = str;
1150   if (! error)
1151     *vectype = typeinfo_first;
1152 
1153   return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1154 }
1155 
1156 /* Directives: register aliases.  */
1157 
1158 static reg_entry *
1159 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1160 {
1161   reg_entry *new;
1162   const char *name;
1163 
1164   if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1165     {
1166       if (new->builtin)
1167 	as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1168 		 str);
1169 
1170       /* Only warn about a redefinition if it's not defined as the
1171          same register.  */
1172       else if (new->number != number || new->type != type)
1173 	as_warn (_("ignoring redefinition of register alias '%s'"), str);
1174 
1175       return NULL;
1176     }
1177 
1178   name = xstrdup (str);
1179   new = xmalloc (sizeof (reg_entry));
1180 
1181   new->name = name;
1182   new->number = number;
1183   new->type = type;
1184   new->builtin = FALSE;
1185 
1186   if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1187     abort ();
1188 
1189   return new;
1190 }
1191 
1192 /* Look for the .req directive.	 This is of the form:
1193 
1194 	new_register_name .req existing_register_name
1195 
1196    If we find one, or if it looks sufficiently like one that we want to
1197    handle any error here, return TRUE.  Otherwise return FALSE.  */
1198 
1199 static bfd_boolean
1200 create_register_alias (char *newname, char *p)
1201 {
1202   const reg_entry *old;
1203   char *oldname, *nbuf;
1204   size_t nlen;
1205 
1206   /* The input scrubber ensures that whitespace after the mnemonic is
1207      collapsed to single spaces.  */
1208   oldname = p;
1209   if (strncmp (oldname, " .req ", 6) != 0)
1210     return FALSE;
1211 
1212   oldname += 6;
1213   if (*oldname == '\0')
1214     return FALSE;
1215 
1216   old = hash_find (aarch64_reg_hsh, oldname);
1217   if (!old)
1218     {
1219       as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1220       return TRUE;
1221     }
1222 
1223   /* If TC_CASE_SENSITIVE is defined, then newname already points to
1224      the desired alias name, and p points to its end.  If not, then
1225      the desired alias name is in the global original_case_string.  */
1226 #ifdef TC_CASE_SENSITIVE
1227   nlen = p - newname;
1228 #else
1229   newname = original_case_string;
1230   nlen = strlen (newname);
1231 #endif
1232 
1233   nbuf = alloca (nlen + 1);
1234   memcpy (nbuf, newname, nlen);
1235   nbuf[nlen] = '\0';
1236 
1237   /* Create aliases under the new name as stated; an all-lowercase
1238      version of the new name; and an all-uppercase version of the new
1239      name.  */
1240   if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1241     {
1242       for (p = nbuf; *p; p++)
1243 	*p = TOUPPER (*p);
1244 
1245       if (strncmp (nbuf, newname, nlen))
1246 	{
1247 	  /* If this attempt to create an additional alias fails, do not bother
1248 	     trying to create the all-lower case alias.  We will fail and issue
1249 	     a second, duplicate error message.  This situation arises when the
1250 	     programmer does something like:
1251 	     foo .req r0
1252 	     Foo .req r1
1253 	     The second .req creates the "Foo" alias but then fails to create
1254 	     the artificial FOO alias because it has already been created by the
1255 	     first .req.  */
1256 	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1257 	    return TRUE;
1258 	}
1259 
1260       for (p = nbuf; *p; p++)
1261 	*p = TOLOWER (*p);
1262 
1263       if (strncmp (nbuf, newname, nlen))
1264 	insert_reg_alias (nbuf, old->number, old->type);
1265     }
1266 
1267   return TRUE;
1268 }
1269 
1270 /* Should never be called, as .req goes between the alias and the
1271    register name, not at the beginning of the line.  */
1272 static void
1273 s_req (int a ATTRIBUTE_UNUSED)
1274 {
1275   as_bad (_("invalid syntax for .req directive"));
1276 }
1277 
1278 /* The .unreq directive deletes an alias which was previously defined
1279    by .req.  For example:
1280 
1281        my_alias .req r11
1282        .unreq my_alias	  */
1283 
1284 static void
1285 s_unreq (int a ATTRIBUTE_UNUSED)
1286 {
1287   char *name;
1288   char saved_char;
1289 
1290   name = input_line_pointer;
1291 
1292   while (*input_line_pointer != 0
1293 	 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1294     ++input_line_pointer;
1295 
1296   saved_char = *input_line_pointer;
1297   *input_line_pointer = 0;
1298 
1299   if (!*name)
1300     as_bad (_("invalid syntax for .unreq directive"));
1301   else
1302     {
1303       reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1304 
1305       if (!reg)
1306 	as_bad (_("unknown register alias '%s'"), name);
1307       else if (reg->builtin)
1308 	as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1309 		 name);
1310       else
1311 	{
1312 	  char *p;
1313 	  char *nbuf;
1314 
1315 	  hash_delete (aarch64_reg_hsh, name, FALSE);
1316 	  free ((char *) reg->name);
1317 	  free (reg);
1318 
1319 	  /* Also locate the all upper case and all lower case versions.
1320 	     Do not complain if we cannot find one or the other as it
1321 	     was probably deleted above.  */
1322 
1323 	  nbuf = strdup (name);
1324 	  for (p = nbuf; *p; p++)
1325 	    *p = TOUPPER (*p);
1326 	  reg = hash_find (aarch64_reg_hsh, nbuf);
1327 	  if (reg)
1328 	    {
1329 	      hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1330 	      free ((char *) reg->name);
1331 	      free (reg);
1332 	    }
1333 
1334 	  for (p = nbuf; *p; p++)
1335 	    *p = TOLOWER (*p);
1336 	  reg = hash_find (aarch64_reg_hsh, nbuf);
1337 	  if (reg)
1338 	    {
1339 	      hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1340 	      free ((char *) reg->name);
1341 	      free (reg);
1342 	    }
1343 
1344 	  free (nbuf);
1345 	}
1346     }
1347 
1348   *input_line_pointer = saved_char;
1349   demand_empty_rest_of_line ();
1350 }
1351 
1352 /* Directives: Instruction set selection.  */
1353 
1354 #ifdef OBJ_ELF
1355 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1356    spec.  (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1357    Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1358    and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
1359 
1360 /* Create a new mapping symbol for the transition to STATE.  */
1361 
1362 static void
1363 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1364 {
1365   symbolS *symbolP;
1366   const char *symname;
1367   int type;
1368 
1369   switch (state)
1370     {
1371     case MAP_DATA:
1372       symname = "$d";
1373       type = BSF_NO_FLAGS;
1374       break;
1375     case MAP_INSN:
1376       symname = "$x";
1377       type = BSF_NO_FLAGS;
1378       break;
1379     default:
1380       abort ();
1381     }
1382 
1383   symbolP = symbol_new (symname, now_seg, value, frag);
1384   symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1385 
1386   /* Save the mapping symbols for future reference.  Also check that
1387      we do not place two mapping symbols at the same offset within a
1388      frag.  We'll handle overlap between frags in
1389      check_mapping_symbols.
1390 
1391      If .fill or other data filling directive generates zero sized data,
1392      the mapping symbol for the following code will have the same value
1393      as the one generated for the data filling directive.  In this case,
1394      we replace the old symbol with the new one at the same address.  */
1395   if (value == 0)
1396     {
1397       if (frag->tc_frag_data.first_map != NULL)
1398 	{
1399 	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1400 	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1401 			 &symbol_lastP);
1402 	}
1403       frag->tc_frag_data.first_map = symbolP;
1404     }
1405   if (frag->tc_frag_data.last_map != NULL)
1406     {
1407       know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1408 	    S_GET_VALUE (symbolP));
1409       if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1410 	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1411 		       &symbol_lastP);
1412     }
1413   frag->tc_frag_data.last_map = symbolP;
1414 }
1415 
1416 /* We must sometimes convert a region marked as code to data during
1417    code alignment, if an odd number of bytes have to be padded.  The
1418    code mapping symbol is pushed to an aligned address.  */
1419 
1420 static void
1421 insert_data_mapping_symbol (enum mstate state,
1422 			    valueT value, fragS * frag, offsetT bytes)
1423 {
1424   /* If there was already a mapping symbol, remove it.  */
1425   if (frag->tc_frag_data.last_map != NULL
1426       && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1427       frag->fr_address + value)
1428     {
1429       symbolS *symp = frag->tc_frag_data.last_map;
1430 
1431       if (value == 0)
1432 	{
1433 	  know (frag->tc_frag_data.first_map == symp);
1434 	  frag->tc_frag_data.first_map = NULL;
1435 	}
1436       frag->tc_frag_data.last_map = NULL;
1437       symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1438     }
1439 
1440   make_mapping_symbol (MAP_DATA, value, frag);
1441   make_mapping_symbol (state, value + bytes, frag);
1442 }
1443 
1444 static void mapping_state_2 (enum mstate state, int max_chars);
1445 
1446 /* Set the mapping state to STATE.  Only call this when about to
1447    emit some STATE bytes to the file.  */
1448 
1449 void
1450 mapping_state (enum mstate state)
1451 {
1452   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1453 
1454 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1455 
1456   if (mapstate == state)
1457     /* The mapping symbol has already been emitted.
1458        There is nothing else to do.  */
1459     return;
1460   else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1461     /* This case will be evaluated later in the next else.  */
1462     return;
1463   else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1464     {
1465       /* Only add the symbol if the offset is > 0:
1466          if we're at the first frag, check it's size > 0;
1467          if we're not at the first frag, then for sure
1468          the offset is > 0.  */
1469       struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1470       const int add_symbol = (frag_now != frag_first)
1471 	|| (frag_now_fix () > 0);
1472 
1473       if (add_symbol)
1474 	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1475     }
1476 
1477   mapping_state_2 (state, 0);
1478 #undef TRANSITION
1479 }
1480 
1481 /* Same as mapping_state, but MAX_CHARS bytes have already been
1482    allocated.  Put the mapping symbol that far back.  */
1483 
1484 static void
1485 mapping_state_2 (enum mstate state, int max_chars)
1486 {
1487   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1488 
1489   if (!SEG_NORMAL (now_seg))
1490     return;
1491 
1492   if (mapstate == state)
1493     /* The mapping symbol has already been emitted.
1494        There is nothing else to do.  */
1495     return;
1496 
1497   seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1498   make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1499 }
1500 #else
1501 #define mapping_state(x)	/* nothing */
1502 #define mapping_state_2(x, y)	/* nothing */
1503 #endif
1504 
1505 /* Directives: sectioning and alignment.  */
1506 
1507 static void
1508 s_bss (int ignore ATTRIBUTE_UNUSED)
1509 {
1510   /* We don't support putting frags in the BSS segment, we fake it by
1511      marking in_bss, then looking at s_skip for clues.  */
1512   subseg_set (bss_section, 0);
1513   demand_empty_rest_of_line ();
1514   mapping_state (MAP_DATA);
1515 }
1516 
1517 static void
1518 s_even (int ignore ATTRIBUTE_UNUSED)
1519 {
1520   /* Never make frag if expect extra pass.  */
1521   if (!need_pass_2)
1522     frag_align (1, 0, 0);
1523 
1524   record_alignment (now_seg, 1);
1525 
1526   demand_empty_rest_of_line ();
1527 }
1528 
1529 /* Directives: Literal pools.  */
1530 
1531 static literal_pool *
1532 find_literal_pool (int size)
1533 {
1534   literal_pool *pool;
1535 
1536   for (pool = list_of_pools; pool != NULL; pool = pool->next)
1537     {
1538       if (pool->section == now_seg
1539 	  && pool->sub_section == now_subseg && pool->size == size)
1540 	break;
1541     }
1542 
1543   return pool;
1544 }
1545 
1546 static literal_pool *
1547 find_or_make_literal_pool (int size)
1548 {
1549   /* Next literal pool ID number.  */
1550   static unsigned int latest_pool_num = 1;
1551   literal_pool *pool;
1552 
1553   pool = find_literal_pool (size);
1554 
1555   if (pool == NULL)
1556     {
1557       /* Create a new pool.  */
1558       pool = xmalloc (sizeof (*pool));
1559       if (!pool)
1560 	return NULL;
1561 
1562       /* Currently we always put the literal pool in the current text
1563          section.  If we were generating "small" model code where we
1564          knew that all code and initialised data was within 1MB then
1565          we could output literals to mergeable, read-only data
1566          sections. */
1567 
1568       pool->next_free_entry = 0;
1569       pool->section = now_seg;
1570       pool->sub_section = now_subseg;
1571       pool->size = size;
1572       pool->next = list_of_pools;
1573       pool->symbol = NULL;
1574 
1575       /* Add it to the list.  */
1576       list_of_pools = pool;
1577     }
1578 
1579   /* New pools, and emptied pools, will have a NULL symbol.  */
1580   if (pool->symbol == NULL)
1581     {
1582       pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1583 				    (valueT) 0, &zero_address_frag);
1584       pool->id = latest_pool_num++;
1585     }
1586 
1587   /* Done.  */
1588   return pool;
1589 }
1590 
1591 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1592    Return TRUE on success, otherwise return FALSE.  */
1593 static bfd_boolean
1594 add_to_lit_pool (expressionS *exp, int size)
1595 {
1596   literal_pool *pool;
1597   unsigned int entry;
1598 
1599   pool = find_or_make_literal_pool (size);
1600 
1601   /* Check if this literal value is already in the pool.  */
1602   for (entry = 0; entry < pool->next_free_entry; entry++)
1603     {
1604       if ((pool->literals[entry].X_op == exp->X_op)
1605 	  && (exp->X_op == O_constant)
1606 	  && (pool->literals[entry].X_add_number == exp->X_add_number)
1607 	  && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1608 	break;
1609 
1610       if ((pool->literals[entry].X_op == exp->X_op)
1611 	  && (exp->X_op == O_symbol)
1612 	  && (pool->literals[entry].X_add_number == exp->X_add_number)
1613 	  && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1614 	  && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1615 	break;
1616     }
1617 
1618   /* Do we need to create a new entry?  */
1619   if (entry == pool->next_free_entry)
1620     {
1621       if (entry >= MAX_LITERAL_POOL_SIZE)
1622 	{
1623 	  set_syntax_error (_("literal pool overflow"));
1624 	  return FALSE;
1625 	}
1626 
1627       pool->literals[entry] = *exp;
1628       pool->next_free_entry += 1;
1629     }
1630 
1631   exp->X_op = O_symbol;
1632   exp->X_add_number = ((int) entry) * size;
1633   exp->X_add_symbol = pool->symbol;
1634 
1635   return TRUE;
1636 }
1637 
1638 /* Can't use symbol_new here, so have to create a symbol and then at
1639    a later date assign it a value. Thats what these functions do.  */
1640 
1641 static void
1642 symbol_locate (symbolS * symbolP,
1643 	       const char *name,/* It is copied, the caller can modify.  */
1644 	       segT segment,	/* Segment identifier (SEG_<something>).  */
1645 	       valueT valu,	/* Symbol value.  */
1646 	       fragS * frag)	/* Associated fragment.  */
1647 {
1648   unsigned int name_length;
1649   char *preserved_copy_of_name;
1650 
1651   name_length = strlen (name) + 1;	/* +1 for \0.  */
1652   obstack_grow (&notes, name, name_length);
1653   preserved_copy_of_name = obstack_finish (&notes);
1654 
1655 #ifdef tc_canonicalize_symbol_name
1656   preserved_copy_of_name =
1657     tc_canonicalize_symbol_name (preserved_copy_of_name);
1658 #endif
1659 
1660   S_SET_NAME (symbolP, preserved_copy_of_name);
1661 
1662   S_SET_SEGMENT (symbolP, segment);
1663   S_SET_VALUE (symbolP, valu);
1664   symbol_clear_list_pointers (symbolP);
1665 
1666   symbol_set_frag (symbolP, frag);
1667 
1668   /* Link to end of symbol chain.  */
1669   {
1670     extern int symbol_table_frozen;
1671 
1672     if (symbol_table_frozen)
1673       abort ();
1674   }
1675 
1676   symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1677 
1678   obj_symbol_new_hook (symbolP);
1679 
1680 #ifdef tc_symbol_new_hook
1681   tc_symbol_new_hook (symbolP);
1682 #endif
1683 
1684 #ifdef DEBUG_SYMS
1685   verify_symbol_chain (symbol_rootP, symbol_lastP);
1686 #endif /* DEBUG_SYMS  */
1687 }
1688 
1689 
1690 static void
1691 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1692 {
1693   unsigned int entry;
1694   literal_pool *pool;
1695   char sym_name[20];
1696   int align;
1697 
1698   for (align = 2; align <= 4; align++)
1699     {
1700       int size = 1 << align;
1701 
1702       pool = find_literal_pool (size);
1703       if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1704 	continue;
1705 
1706       mapping_state (MAP_DATA);
1707 
1708       /* Align pool as you have word accesses.
1709          Only make a frag if we have to.  */
1710       if (!need_pass_2)
1711 	frag_align (align, 0, 0);
1712 
1713       record_alignment (now_seg, align);
1714 
1715       sprintf (sym_name, "$$lit_\002%x", pool->id);
1716 
1717       symbol_locate (pool->symbol, sym_name, now_seg,
1718 		     (valueT) frag_now_fix (), frag_now);
1719       symbol_table_insert (pool->symbol);
1720 
1721       for (entry = 0; entry < pool->next_free_entry; entry++)
1722 	/* First output the expression in the instruction to the pool.  */
1723 	emit_expr (&(pool->literals[entry]), size);	/* .word|.xword  */
1724 
1725       /* Mark the pool as empty.  */
1726       pool->next_free_entry = 0;
1727       pool->symbol = NULL;
1728     }
1729 }
1730 
1731 #ifdef OBJ_ELF
1732 /* Forward declarations for functions below, in the MD interface
1733    section.  */
1734 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1735 static struct reloc_table_entry * find_reloc_table_entry (char **);
1736 
1737 /* Directives: Data.  */
1738 /* N.B. the support for relocation suffix in this directive needs to be
1739    implemented properly.  */
1740 
1741 static void
1742 s_aarch64_elf_cons (int nbytes)
1743 {
1744   expressionS exp;
1745 
1746 #ifdef md_flush_pending_output
1747   md_flush_pending_output ();
1748 #endif
1749 
1750   if (is_it_end_of_statement ())
1751     {
1752       demand_empty_rest_of_line ();
1753       return;
1754     }
1755 
1756 #ifdef md_cons_align
1757   md_cons_align (nbytes);
1758 #endif
1759 
1760   mapping_state (MAP_DATA);
1761   do
1762     {
1763       struct reloc_table_entry *reloc;
1764 
1765       expression (&exp);
1766 
1767       if (exp.X_op != O_symbol)
1768 	emit_expr (&exp, (unsigned int) nbytes);
1769       else
1770 	{
1771 	  skip_past_char (&input_line_pointer, '#');
1772 	  if (skip_past_char (&input_line_pointer, ':'))
1773 	    {
1774 	      reloc = find_reloc_table_entry (&input_line_pointer);
1775 	      if (reloc == NULL)
1776 		as_bad (_("unrecognized relocation suffix"));
1777 	      else
1778 		as_bad (_("unimplemented relocation suffix"));
1779 	      ignore_rest_of_line ();
1780 	      return;
1781 	    }
1782 	  else
1783 	    emit_expr (&exp, (unsigned int) nbytes);
1784 	}
1785     }
1786   while (*input_line_pointer++ == ',');
1787 
1788   /* Put terminator back into stream.  */
1789   input_line_pointer--;
1790   demand_empty_rest_of_line ();
1791 }
1792 
1793 #endif /* OBJ_ELF */
1794 
1795 /* Output a 32-bit word, but mark as an instruction.  */
1796 
1797 static void
1798 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1799 {
1800   expressionS exp;
1801 
1802 #ifdef md_flush_pending_output
1803   md_flush_pending_output ();
1804 #endif
1805 
1806   if (is_it_end_of_statement ())
1807     {
1808       demand_empty_rest_of_line ();
1809       return;
1810     }
1811 
1812   if (!need_pass_2)
1813     frag_align_code (2, 0);
1814 #ifdef OBJ_ELF
1815   mapping_state (MAP_INSN);
1816 #endif
1817 
1818   do
1819     {
1820       expression (&exp);
1821       if (exp.X_op != O_constant)
1822 	{
1823 	  as_bad (_("constant expression required"));
1824 	  ignore_rest_of_line ();
1825 	  return;
1826 	}
1827 
1828       if (target_big_endian)
1829 	{
1830 	  unsigned int val = exp.X_add_number;
1831 	  exp.X_add_number = SWAP_32 (val);
1832 	}
1833       emit_expr (&exp, 4);
1834     }
1835   while (*input_line_pointer++ == ',');
1836 
1837   /* Put terminator back into stream.  */
1838   input_line_pointer--;
1839   demand_empty_rest_of_line ();
1840 }
1841 
1842 #ifdef OBJ_ELF
1843 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction.  */
1844 
1845 static void
1846 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1847 {
1848   expressionS exp;
1849 
1850   /* Since we're just labelling the code, there's no need to define a
1851      mapping symbol.  */
1852   expression (&exp);
1853   /* Make sure there is enough room in this frag for the following
1854      blr.  This trick only works if the blr follows immediately after
1855      the .tlsdesc directive.  */
1856   frag_grow (4);
1857   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1858 		   BFD_RELOC_AARCH64_TLSDESC_CALL);
1859 
1860   demand_empty_rest_of_line ();
1861 }
1862 #endif	/* OBJ_ELF */
1863 
1864 static void s_aarch64_arch (int);
1865 static void s_aarch64_cpu (int);
1866 
1867 /* This table describes all the machine specific pseudo-ops the assembler
1868    has to support.  The fields are:
1869      pseudo-op name without dot
1870      function to call to execute this pseudo-op
1871      Integer arg to pass to the function.  */
1872 
1873 const pseudo_typeS md_pseudo_table[] = {
1874   /* Never called because '.req' does not start a line.  */
1875   {"req", s_req, 0},
1876   {"unreq", s_unreq, 0},
1877   {"bss", s_bss, 0},
1878   {"even", s_even, 0},
1879   {"ltorg", s_ltorg, 0},
1880   {"pool", s_ltorg, 0},
1881   {"cpu", s_aarch64_cpu, 0},
1882   {"arch", s_aarch64_arch, 0},
1883   {"inst", s_aarch64_inst, 0},
1884 #ifdef OBJ_ELF
1885   {"tlsdesccall", s_tlsdesccall, 0},
1886   {"word", s_aarch64_elf_cons, 4},
1887   {"long", s_aarch64_elf_cons, 4},
1888   {"xword", s_aarch64_elf_cons, 8},
1889   {"dword", s_aarch64_elf_cons, 8},
1890 #endif
1891   {0, 0, 0}
1892 };
1893 
1894 
1895 /* Check whether STR points to a register name followed by a comma or the
1896    end of line; REG_TYPE indicates which register types are checked
1897    against.  Return TRUE if STR is such a register name; otherwise return
1898    FALSE.  The function does not intend to produce any diagnostics, but since
1899    the register parser aarch64_reg_parse, which is called by this function,
1900    does produce diagnostics, we call clear_error to clear any diagnostics
1901    that may be generated by aarch64_reg_parse.
1902    Also, the function returns FALSE directly if there is any user error
1903    present at the function entry.  This prevents the existing diagnostics
1904    state from being spoiled.
1905    The function currently serves parse_constant_immediate and
1906    parse_big_immediate only.  */
1907 static bfd_boolean
1908 reg_name_p (char *str, aarch64_reg_type reg_type)
1909 {
1910   int reg;
1911 
1912   /* Prevent the diagnostics state from being spoiled.  */
1913   if (error_p ())
1914     return FALSE;
1915 
1916   reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1917 
1918   /* Clear the parsing error that may be set by the reg parser.  */
1919   clear_error ();
1920 
1921   if (reg == PARSE_FAIL)
1922     return FALSE;
1923 
1924   skip_whitespace (str);
1925   if (*str == ',' || is_end_of_line[(unsigned int) *str])
1926     return TRUE;
1927 
1928   return FALSE;
1929 }
1930 
1931 /* Parser functions used exclusively in instruction operands.  */
1932 
1933 /* Parse an immediate expression which may not be constant.
1934 
1935    To prevent the expression parser from pushing a register name
1936    into the symbol table as an undefined symbol, firstly a check is
1937    done to find out whether STR is a valid register name followed
1938    by a comma or the end of line.  Return FALSE if STR is such a
1939    string.  */
1940 
1941 static bfd_boolean
1942 parse_immediate_expression (char **str, expressionS *exp)
1943 {
1944   if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1945     {
1946       set_recoverable_error (_("immediate operand required"));
1947       return FALSE;
1948     }
1949 
1950   my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1951 
1952   if (exp->X_op == O_absent)
1953     {
1954       set_fatal_syntax_error (_("missing immediate expression"));
1955       return FALSE;
1956     }
1957 
1958   return TRUE;
1959 }
1960 
1961 /* Constant immediate-value read function for use in insn parsing.
1962    STR points to the beginning of the immediate (with the optional
1963    leading #); *VAL receives the value.
1964 
1965    Return TRUE on success; otherwise return FALSE.  */
1966 
1967 static bfd_boolean
1968 parse_constant_immediate (char **str, int64_t * val)
1969 {
1970   expressionS exp;
1971 
1972   if (! parse_immediate_expression (str, &exp))
1973     return FALSE;
1974 
1975   if (exp.X_op != O_constant)
1976     {
1977       set_syntax_error (_("constant expression required"));
1978       return FALSE;
1979     }
1980 
1981   *val = exp.X_add_number;
1982   return TRUE;
1983 }
1984 
1985 static uint32_t
1986 encode_imm_float_bits (uint32_t imm)
1987 {
1988   return ((imm >> 19) & 0x7f)	/* b[25:19] -> b[6:0] */
1989     | ((imm >> (31 - 7)) & 0x80);	/* b[31]    -> b[7]   */
1990 }
1991 
1992 /* Return TRUE if the single-precision floating-point value encoded in IMM
1993    can be expressed in the AArch64 8-bit signed floating-point format with
1994    3-bit exponent and normalized 4 bits of precision; in other words, the
1995    floating-point value must be expressable as
1996      (+/-) n / 16 * power (2, r)
1997    where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4.  */
1998 
1999 static bfd_boolean
2000 aarch64_imm_float_p (uint32_t imm)
2001 {
2002   /* If a single-precision floating-point value has the following bit
2003      pattern, it can be expressed in the AArch64 8-bit floating-point
2004      format:
2005 
2006      3 32222222 2221111111111
2007      1 09876543 21098765432109876543210
2008      n Eeeeeexx xxxx0000000000000000000
2009 
2010      where n, e and each x are either 0 or 1 independently, with
2011      E == ~ e.  */
2012 
2013   uint32_t pattern;
2014 
2015   /* Prepare the pattern for 'Eeeeee'.  */
2016   if (((imm >> 30) & 0x1) == 0)
2017     pattern = 0x3e000000;
2018   else
2019     pattern = 0x40000000;
2020 
2021   return (imm & 0x7ffff) == 0		/* lower 19 bits are 0.  */
2022     && ((imm & 0x7e000000) == pattern);	/* bits 25 - 29 == ~ bit 30.  */
2023 }
2024 
2025 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2026 
2027    Return TRUE if the value encoded in IMM can be expressed in the AArch64
2028    8-bit signed floating-point format with 3-bit exponent and normalized 4
2029    bits of precision (i.e. can be used in an FMOV instruction); return the
2030    equivalent single-precision encoding in *FPWORD.
2031 
2032    Otherwise return FALSE.  */
2033 
2034 static bfd_boolean
2035 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2036 {
2037   /* If a double-precision floating-point value has the following bit
2038      pattern, it can be expressed in the AArch64 8-bit floating-point
2039      format:
2040 
2041      6 66655555555 554444444...21111111111
2042      3 21098765432 109876543...098765432109876543210
2043      n Eeeeeeeeexx xxxx00000...000000000000000000000
2044 
2045      where n, e and each x are either 0 or 1 independently, with
2046      E == ~ e.  */
2047 
2048   uint32_t pattern;
2049   uint32_t high32 = imm >> 32;
2050 
2051   /* Lower 32 bits need to be 0s.  */
2052   if ((imm & 0xffffffff) != 0)
2053     return FALSE;
2054 
2055   /* Prepare the pattern for 'Eeeeeeeee'.  */
2056   if (((high32 >> 30) & 0x1) == 0)
2057     pattern = 0x3fc00000;
2058   else
2059     pattern = 0x40000000;
2060 
2061   if ((high32 & 0xffff) == 0			/* bits 32 - 47 are 0.  */
2062       && (high32 & 0x7fc00000) == pattern)	/* bits 54 - 61 == ~ bit 62.  */
2063     {
2064       /* Convert to the single-precision encoding.
2065          i.e. convert
2066 	   n Eeeeeeeeexx xxxx00000...000000000000000000000
2067 	 to
2068 	   n Eeeeeexx xxxx0000000000000000000.  */
2069       *fpword = ((high32 & 0xfe000000)			/* nEeeeee.  */
2070 		 | (((high32 >> 16) & 0x3f) << 19));	/* xxxxxx.  */
2071       return TRUE;
2072     }
2073   else
2074     return FALSE;
2075 }
2076 
2077 /* Parse a floating-point immediate.  Return TRUE on success and return the
2078    value in *IMMED in the format of IEEE754 single-precision encoding.
2079    *CCP points to the start of the string; DP_P is TRUE when the immediate
2080    is expected to be in double-precision (N.B. this only matters when
2081    hexadecimal representation is involved).
2082 
2083    N.B. 0.0 is accepted by this function.  */
2084 
2085 static bfd_boolean
2086 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2087 {
2088   char *str = *ccp;
2089   char *fpnum;
2090   LITTLENUM_TYPE words[MAX_LITTLENUMS];
2091   int found_fpchar = 0;
2092   int64_t val = 0;
2093   unsigned fpword = 0;
2094   bfd_boolean hex_p = FALSE;
2095 
2096   skip_past_char (&str, '#');
2097 
2098   fpnum = str;
2099   skip_whitespace (fpnum);
2100 
2101   if (strncmp (fpnum, "0x", 2) == 0)
2102     {
2103       /* Support the hexadecimal representation of the IEEE754 encoding.
2104 	 Double-precision is expected when DP_P is TRUE, otherwise the
2105 	 representation should be in single-precision.  */
2106       if (! parse_constant_immediate (&str, &val))
2107 	goto invalid_fp;
2108 
2109       if (dp_p)
2110 	{
2111 	  if (! aarch64_double_precision_fmovable (val, &fpword))
2112 	    goto invalid_fp;
2113 	}
2114       else if ((uint64_t) val > 0xffffffff)
2115 	goto invalid_fp;
2116       else
2117 	fpword = val;
2118 
2119       hex_p = TRUE;
2120     }
2121   else
2122     {
2123       /* We must not accidentally parse an integer as a floating-point number.
2124 	 Make sure that the value we parse is not an integer by checking for
2125 	 special characters '.' or 'e'.  */
2126       for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2127 	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2128 	  {
2129 	    found_fpchar = 1;
2130 	    break;
2131 	  }
2132 
2133       if (!found_fpchar)
2134 	return FALSE;
2135     }
2136 
2137   if (! hex_p)
2138     {
2139       int i;
2140 
2141       if ((str = atof_ieee (str, 's', words)) == NULL)
2142 	goto invalid_fp;
2143 
2144       /* Our FP word must be 32 bits (single-precision FP).  */
2145       for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2146 	{
2147 	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
2148 	  fpword |= words[i];
2149 	}
2150     }
2151 
2152   if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2153     {
2154       *immed = fpword;
2155       *ccp = str;
2156       return TRUE;
2157     }
2158 
2159 invalid_fp:
2160   set_fatal_syntax_error (_("invalid floating-point constant"));
2161   return FALSE;
2162 }
2163 
2164 /* Less-generic immediate-value read function with the possibility of loading
2165    a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2166    instructions.
2167 
2168    To prevent the expression parser from pushing a register name into the
2169    symbol table as an undefined symbol, a check is firstly done to find
2170    out whether STR is a valid register name followed by a comma or the end
2171    of line.  Return FALSE if STR is such a register.  */
2172 
2173 static bfd_boolean
2174 parse_big_immediate (char **str, int64_t *imm)
2175 {
2176   char *ptr = *str;
2177 
2178   if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2179     {
2180       set_syntax_error (_("immediate operand required"));
2181       return FALSE;
2182     }
2183 
2184   my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2185 
2186   if (inst.reloc.exp.X_op == O_constant)
2187     *imm = inst.reloc.exp.X_add_number;
2188 
2189   *str = ptr;
2190 
2191   return TRUE;
2192 }
2193 
2194 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2195    if NEED_LIBOPCODES is non-zero, the fixup will need
2196    assistance from the libopcodes.   */
2197 
2198 static inline void
2199 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2200 				const aarch64_opnd_info *operand,
2201 				int need_libopcodes_p)
2202 {
2203   reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2204   reloc->opnd = operand->type;
2205   if (need_libopcodes_p)
2206     reloc->need_libopcodes_p = 1;
2207 };
2208 
2209 /* Return TRUE if the instruction needs to be fixed up later internally by
2210    the GAS; otherwise return FALSE.  */
2211 
2212 static inline bfd_boolean
2213 aarch64_gas_internal_fixup_p (void)
2214 {
2215   return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2216 }
2217 
2218 /* Assign the immediate value to the relavant field in *OPERAND if
2219    RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2220    needs an internal fixup in a later stage.
2221    ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2222    IMM.VALUE that may get assigned with the constant.  */
2223 static inline void
2224 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2225 				    aarch64_opnd_info *operand,
2226 				    int addr_off_p,
2227 				    int need_libopcodes_p,
2228 				    int skip_p)
2229 {
2230   if (reloc->exp.X_op == O_constant)
2231     {
2232       if (addr_off_p)
2233 	operand->addr.offset.imm = reloc->exp.X_add_number;
2234       else
2235 	operand->imm.value = reloc->exp.X_add_number;
2236       reloc->type = BFD_RELOC_UNUSED;
2237     }
2238   else
2239     {
2240       aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2241       /* Tell libopcodes to ignore this operand or not.  This is helpful
2242 	 when one of the operands needs to be fixed up later but we need
2243 	 libopcodes to check the other operands.  */
2244       operand->skip = skip_p;
2245     }
2246 }
2247 
2248 /* Relocation modifiers.  Each entry in the table contains the textual
2249    name for the relocation which may be placed before a symbol used as
2250    a load/store offset, or add immediate. It must be surrounded by a
2251    leading and trailing colon, for example:
2252 
2253 	ldr	x0, [x1, #:rello:varsym]
2254 	add	x0, x1, #:rello:varsym  */
2255 
2256 struct reloc_table_entry
2257 {
2258   const char *name;
2259   int pc_rel;
2260   bfd_reloc_code_real_type adrp_type;
2261   bfd_reloc_code_real_type movw_type;
2262   bfd_reloc_code_real_type add_type;
2263   bfd_reloc_code_real_type ldst_type;
2264 };
2265 
2266 static struct reloc_table_entry reloc_table[] = {
2267   /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2268   {"lo12", 0,
2269    0,
2270    0,
2271    BFD_RELOC_AARCH64_ADD_LO12,
2272    BFD_RELOC_AARCH64_LDST_LO12},
2273 
2274   /* Higher 21 bits of pc-relative page offset: ADRP */
2275   {"pg_hi21", 1,
2276    BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2277    0,
2278    0,
2279    0},
2280 
2281   /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2282   {"pg_hi21_nc", 1,
2283    BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2284    0,
2285    0,
2286    0},
2287 
2288   /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2289   {"abs_g0", 0,
2290    0,
2291    BFD_RELOC_AARCH64_MOVW_G0,
2292    0,
2293    0},
2294 
2295   /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2296   {"abs_g0_s", 0,
2297    0,
2298    BFD_RELOC_AARCH64_MOVW_G0_S,
2299    0,
2300    0},
2301 
2302   /* Less significant bits 0-15 of address/value: MOVK, no check */
2303   {"abs_g0_nc", 0,
2304    0,
2305    BFD_RELOC_AARCH64_MOVW_G0_NC,
2306    0,
2307    0},
2308 
2309   /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2310   {"abs_g1", 0,
2311    0,
2312    BFD_RELOC_AARCH64_MOVW_G1,
2313    0,
2314    0},
2315 
2316   /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2317   {"abs_g1_s", 0,
2318    0,
2319    BFD_RELOC_AARCH64_MOVW_G1_S,
2320    0,
2321    0},
2322 
2323   /* Less significant bits 16-31 of address/value: MOVK, no check */
2324   {"abs_g1_nc", 0,
2325    0,
2326    BFD_RELOC_AARCH64_MOVW_G1_NC,
2327    0,
2328    0},
2329 
2330   /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2331   {"abs_g2", 0,
2332    0,
2333    BFD_RELOC_AARCH64_MOVW_G2,
2334    0,
2335    0},
2336 
2337   /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2338   {"abs_g2_s", 0,
2339    0,
2340    BFD_RELOC_AARCH64_MOVW_G2_S,
2341    0,
2342    0},
2343 
2344   /* Less significant bits 32-47 of address/value: MOVK, no check */
2345   {"abs_g2_nc", 0,
2346    0,
2347    BFD_RELOC_AARCH64_MOVW_G2_NC,
2348    0,
2349    0},
2350 
2351   /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2352   {"abs_g3", 0,
2353    0,
2354    BFD_RELOC_AARCH64_MOVW_G3,
2355    0,
2356    0},
2357   /* Get to the page containing GOT entry for a symbol.  */
2358   {"got", 1,
2359    BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2360    0,
2361    0,
2362    0},
2363   /* 12 bit offset into the page containing GOT entry for that symbol.  */
2364   {"got_lo12", 0,
2365    0,
2366    0,
2367    0,
2368    BFD_RELOC_AARCH64_LD64_GOT_LO12_NC},
2369 
2370   /* Get to the page containing GOT TLS entry for a symbol */
2371   {"tlsgd", 0,
2372    BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2373    0,
2374    0,
2375    0},
2376 
2377   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2378   {"tlsgd_lo12", 0,
2379    0,
2380    0,
2381    BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2382    0},
2383 
2384   /* Get to the page containing GOT TLS entry for a symbol */
2385   {"tlsdesc", 0,
2386    BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE,
2387    0,
2388    0,
2389    0},
2390 
2391   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2392   {"tlsdesc_lo12", 0,
2393    0,
2394    0,
2395    BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2396    BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC},
2397 
2398   /* Get to the page containing GOT TLS entry for a symbol */
2399   {"gottprel", 0,
2400    BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2401    0,
2402    0,
2403    0},
2404 
2405   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2406   {"gottprel_lo12", 0,
2407    0,
2408    0,
2409    0,
2410    BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
2411 
2412   /* Get tp offset for a symbol.  */
2413   {"tprel", 0,
2414    0,
2415    0,
2416    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2417    0},
2418 
2419   /* Get tp offset for a symbol.  */
2420   {"tprel_lo12", 0,
2421    0,
2422    0,
2423    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2424    0},
2425 
2426   /* Get tp offset for a symbol.  */
2427   {"tprel_hi12", 0,
2428    0,
2429    0,
2430    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2431    0},
2432 
2433   /* Get tp offset for a symbol.  */
2434   {"tprel_lo12_nc", 0,
2435    0,
2436    0,
2437    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2438    0},
2439 
2440   /* Most significant bits 32-47 of address/value: MOVZ.  */
2441   {"tprel_g2", 0,
2442    0,
2443    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2444    0,
2445    0},
2446 
2447   /* Most significant bits 16-31 of address/value: MOVZ.  */
2448   {"tprel_g1", 0,
2449    0,
2450    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2451    0,
2452    0},
2453 
2454   /* Most significant bits 16-31 of address/value: MOVZ, no check.  */
2455   {"tprel_g1_nc", 0,
2456    0,
2457    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2458    0,
2459    0},
2460 
2461   /* Most significant bits 0-15 of address/value: MOVZ.  */
2462   {"tprel_g0", 0,
2463    0,
2464    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2465    0,
2466    0},
2467 
2468   /* Most significant bits 0-15 of address/value: MOVZ, no check.  */
2469   {"tprel_g0_nc", 0,
2470    0,
2471    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2472    0,
2473    0},
2474 };
2475 
2476 /* Given the address of a pointer pointing to the textual name of a
2477    relocation as may appear in assembler source, attempt to find its
2478    details in reloc_table.  The pointer will be updated to the character
2479    after the trailing colon.  On failure, NULL will be returned;
2480    otherwise return the reloc_table_entry.  */
2481 
2482 static struct reloc_table_entry *
2483 find_reloc_table_entry (char **str)
2484 {
2485   unsigned int i;
2486   for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2487     {
2488       int length = strlen (reloc_table[i].name);
2489 
2490       if (strncasecmp (reloc_table[i].name, *str, length) == 0
2491 	  && (*str)[length] == ':')
2492 	{
2493 	  *str += (length + 1);
2494 	  return &reloc_table[i];
2495 	}
2496     }
2497 
2498   return NULL;
2499 }
2500 
2501 /* Mode argument to parse_shift and parser_shifter_operand.  */
2502 enum parse_shift_mode
2503 {
2504   SHIFTED_ARITH_IMM,		/* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2505 				   "#imm{,lsl #n}"  */
2506   SHIFTED_LOGIC_IMM,		/* "rn{,lsl|lsr|asl|asr|ror #n}" or
2507 				   "#imm"  */
2508   SHIFTED_LSL,			/* bare "lsl #n"  */
2509   SHIFTED_LSL_MSL,		/* "lsl|msl #n"  */
2510   SHIFTED_REG_OFFSET		/* [su]xtw|sxtx {#n} or lsl #n  */
2511 };
2512 
2513 /* Parse a <shift> operator on an AArch64 data processing instruction.
2514    Return TRUE on success; otherwise return FALSE.  */
2515 static bfd_boolean
2516 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2517 {
2518   const struct aarch64_name_value_pair *shift_op;
2519   enum aarch64_modifier_kind kind;
2520   expressionS exp;
2521   int exp_has_prefix;
2522   char *s = *str;
2523   char *p = s;
2524 
2525   for (p = *str; ISALPHA (*p); p++)
2526     ;
2527 
2528   if (p == *str)
2529     {
2530       set_syntax_error (_("shift expression expected"));
2531       return FALSE;
2532     }
2533 
2534   shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2535 
2536   if (shift_op == NULL)
2537     {
2538       set_syntax_error (_("shift operator expected"));
2539       return FALSE;
2540     }
2541 
2542   kind = aarch64_get_operand_modifier (shift_op);
2543 
2544   if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2545     {
2546       set_syntax_error (_("invalid use of 'MSL'"));
2547       return FALSE;
2548     }
2549 
2550   switch (mode)
2551     {
2552     case SHIFTED_LOGIC_IMM:
2553       if (aarch64_extend_operator_p (kind) == TRUE)
2554 	{
2555 	  set_syntax_error (_("extending shift is not permitted"));
2556 	  return FALSE;
2557 	}
2558       break;
2559 
2560     case SHIFTED_ARITH_IMM:
2561       if (kind == AARCH64_MOD_ROR)
2562 	{
2563 	  set_syntax_error (_("'ROR' shift is not permitted"));
2564 	  return FALSE;
2565 	}
2566       break;
2567 
2568     case SHIFTED_LSL:
2569       if (kind != AARCH64_MOD_LSL)
2570 	{
2571 	  set_syntax_error (_("only 'LSL' shift is permitted"));
2572 	  return FALSE;
2573 	}
2574       break;
2575 
2576     case SHIFTED_REG_OFFSET:
2577       if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2578 	  && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2579 	{
2580 	  set_fatal_syntax_error
2581 	    (_("invalid shift for the register offset addressing mode"));
2582 	  return FALSE;
2583 	}
2584       break;
2585 
2586     case SHIFTED_LSL_MSL:
2587       if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2588 	{
2589 	  set_syntax_error (_("invalid shift operator"));
2590 	  return FALSE;
2591 	}
2592       break;
2593 
2594     default:
2595       abort ();
2596     }
2597 
2598   /* Whitespace can appear here if the next thing is a bare digit.  */
2599   skip_whitespace (p);
2600 
2601   /* Parse shift amount.  */
2602   exp_has_prefix = 0;
2603   if (mode == SHIFTED_REG_OFFSET && *p == ']')
2604     exp.X_op = O_absent;
2605   else
2606     {
2607       if (is_immediate_prefix (*p))
2608 	{
2609 	  p++;
2610 	  exp_has_prefix = 1;
2611 	}
2612       my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2613     }
2614   if (exp.X_op == O_absent)
2615     {
2616       if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2617 	{
2618 	  set_syntax_error (_("missing shift amount"));
2619 	  return FALSE;
2620 	}
2621       operand->shifter.amount = 0;
2622     }
2623   else if (exp.X_op != O_constant)
2624     {
2625       set_syntax_error (_("constant shift amount required"));
2626       return FALSE;
2627     }
2628   else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2629     {
2630       set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2631       return FALSE;
2632     }
2633   else
2634     {
2635       operand->shifter.amount = exp.X_add_number;
2636       operand->shifter.amount_present = 1;
2637     }
2638 
2639   operand->shifter.operator_present = 1;
2640   operand->shifter.kind = kind;
2641 
2642   *str = p;
2643   return TRUE;
2644 }
2645 
2646 /* Parse a <shifter_operand> for a data processing instruction:
2647 
2648       #<immediate>
2649       #<immediate>, LSL #imm
2650 
2651    Validation of immediate operands is deferred to md_apply_fix.
2652 
2653    Return TRUE on success; otherwise return FALSE.  */
2654 
2655 static bfd_boolean
2656 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2657 			   enum parse_shift_mode mode)
2658 {
2659   char *p;
2660 
2661   if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2662     return FALSE;
2663 
2664   p = *str;
2665 
2666   /* Accept an immediate expression.  */
2667   if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2668     return FALSE;
2669 
2670   /* Accept optional LSL for arithmetic immediate values.  */
2671   if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2672     if (! parse_shift (&p, operand, SHIFTED_LSL))
2673       return FALSE;
2674 
2675   /* Not accept any shifter for logical immediate values.  */
2676   if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2677       && parse_shift (&p, operand, mode))
2678     {
2679       set_syntax_error (_("unexpected shift operator"));
2680       return FALSE;
2681     }
2682 
2683   *str = p;
2684   return TRUE;
2685 }
2686 
2687 /* Parse a <shifter_operand> for a data processing instruction:
2688 
2689       <Rm>
2690       <Rm>, <shift>
2691       #<immediate>
2692       #<immediate>, LSL #imm
2693 
2694    where <shift> is handled by parse_shift above, and the last two
2695    cases are handled by the function above.
2696 
2697    Validation of immediate operands is deferred to md_apply_fix.
2698 
2699    Return TRUE on success; otherwise return FALSE.  */
2700 
2701 static bfd_boolean
2702 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2703 		       enum parse_shift_mode mode)
2704 {
2705   int reg;
2706   int isreg32, isregzero;
2707   enum aarch64_operand_class opd_class
2708     = aarch64_get_operand_class (operand->type);
2709 
2710   if ((reg =
2711        aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2712     {
2713       if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2714 	{
2715 	  set_syntax_error (_("unexpected register in the immediate operand"));
2716 	  return FALSE;
2717 	}
2718 
2719       if (!isregzero && reg == REG_SP)
2720 	{
2721 	  set_syntax_error (BAD_SP);
2722 	  return FALSE;
2723 	}
2724 
2725       operand->reg.regno = reg;
2726       operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2727 
2728       /* Accept optional shift operation on register.  */
2729       if (! skip_past_comma (str))
2730 	return TRUE;
2731 
2732       if (! parse_shift (str, operand, mode))
2733 	return FALSE;
2734 
2735       return TRUE;
2736     }
2737   else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2738     {
2739       set_syntax_error
2740 	(_("integer register expected in the extended/shifted operand "
2741 	   "register"));
2742       return FALSE;
2743     }
2744 
2745   /* We have a shifted immediate variable.  */
2746   return parse_shifter_operand_imm (str, operand, mode);
2747 }
2748 
2749 /* Return TRUE on success; return FALSE otherwise.  */
2750 
2751 static bfd_boolean
2752 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2753 			     enum parse_shift_mode mode)
2754 {
2755   char *p = *str;
2756 
2757   /* Determine if we have the sequence of characters #: or just :
2758      coming next.  If we do, then we check for a :rello: relocation
2759      modifier.  If we don't, punt the whole lot to
2760      parse_shifter_operand.  */
2761 
2762   if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2763     {
2764       struct reloc_table_entry *entry;
2765 
2766       if (p[0] == '#')
2767 	p += 2;
2768       else
2769 	p++;
2770       *str = p;
2771 
2772       /* Try to parse a relocation.  Anything else is an error.  */
2773       if (!(entry = find_reloc_table_entry (str)))
2774 	{
2775 	  set_syntax_error (_("unknown relocation modifier"));
2776 	  return FALSE;
2777 	}
2778 
2779       if (entry->add_type == 0)
2780 	{
2781 	  set_syntax_error
2782 	    (_("this relocation modifier is not allowed on this instruction"));
2783 	  return FALSE;
2784 	}
2785 
2786       /* Save str before we decompose it.  */
2787       p = *str;
2788 
2789       /* Next, we parse the expression.  */
2790       if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2791 	return FALSE;
2792 
2793       /* Record the relocation type (use the ADD variant here).  */
2794       inst.reloc.type = entry->add_type;
2795       inst.reloc.pc_rel = entry->pc_rel;
2796 
2797       /* If str is empty, we've reached the end, stop here.  */
2798       if (**str == '\0')
2799 	return TRUE;
2800 
2801       /* Otherwise, we have a shifted reloc modifier, so rewind to
2802          recover the variable name and continue parsing for the shifter.  */
2803       *str = p;
2804       return parse_shifter_operand_imm (str, operand, mode);
2805     }
2806 
2807   return parse_shifter_operand (str, operand, mode);
2808 }
2809 
2810 /* Parse all forms of an address expression.  Information is written
2811    to *OPERAND and/or inst.reloc.
2812 
2813    The A64 instruction set has the following addressing modes:
2814 
2815    Offset
2816      [base]			// in SIMD ld/st structure
2817      [base{,#0}]		// in ld/st exclusive
2818      [base{,#imm}]
2819      [base,Xm{,LSL #imm}]
2820      [base,Xm,SXTX {#imm}]
2821      [base,Wm,(S|U)XTW {#imm}]
2822    Pre-indexed
2823      [base,#imm]!
2824    Post-indexed
2825      [base],#imm
2826      [base],Xm			// in SIMD ld/st structure
2827    PC-relative (literal)
2828      label
2829      =immediate
2830 
2831    (As a convenience, the notation "=immediate" is permitted in conjunction
2832    with the pc-relative literal load instructions to automatically place an
2833    immediate value or symbolic address in a nearby literal pool and generate
2834    a hidden label which references it.)
2835 
2836    Upon a successful parsing, the address structure in *OPERAND will be
2837    filled in the following way:
2838 
2839      .base_regno = <base>
2840      .offset.is_reg	// 1 if the offset is a register
2841      .offset.imm = <imm>
2842      .offset.regno = <Rm>
2843 
2844    For different addressing modes defined in the A64 ISA:
2845 
2846    Offset
2847      .pcrel=0; .preind=1; .postind=0; .writeback=0
2848    Pre-indexed
2849      .pcrel=0; .preind=1; .postind=0; .writeback=1
2850    Post-indexed
2851      .pcrel=0; .preind=0; .postind=1; .writeback=1
2852    PC-relative (literal)
2853      .pcrel=1; .preind=1; .postind=0; .writeback=0
2854 
2855    The shift/extension information, if any, will be stored in .shifter.
2856 
2857    It is the caller's responsibility to check for addressing modes not
2858    supported by the instruction, and to set inst.reloc.type.  */
2859 
2860 static bfd_boolean
2861 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2862 		    int accept_reg_post_index)
2863 {
2864   char *p = *str;
2865   int reg;
2866   int isreg32, isregzero;
2867   expressionS *exp = &inst.reloc.exp;
2868 
2869   if (! skip_past_char (&p, '['))
2870     {
2871       /* =immediate or label.  */
2872       operand->addr.pcrel = 1;
2873       operand->addr.preind = 1;
2874 
2875       if (skip_past_char (&p, '='))
2876 	/* =immediate; need to generate the literal in the liternal pool.  */
2877 	inst.gen_lit_pool = 1;
2878 
2879       if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2880 	{
2881 	  set_syntax_error (_("invalid address"));
2882 	  return FALSE;
2883 	}
2884 
2885       *str = p;
2886       return TRUE;
2887     }
2888 
2889   /* [ */
2890 
2891   /* Accept SP and reject ZR */
2892   reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2893   if (reg == PARSE_FAIL || isreg32)
2894     {
2895       set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2896       return FALSE;
2897     }
2898   operand->addr.base_regno = reg;
2899 
2900   /* [Xn */
2901   if (skip_past_comma (&p))
2902     {
2903       /* [Xn, */
2904       operand->addr.preind = 1;
2905 
2906       /* Reject SP and accept ZR */
2907       reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2908       if (reg != PARSE_FAIL)
2909 	{
2910 	  /* [Xn,Rm  */
2911 	  operand->addr.offset.regno = reg;
2912 	  operand->addr.offset.is_reg = 1;
2913 	  /* Shifted index.  */
2914 	  if (skip_past_comma (&p))
2915 	    {
2916 	      /* [Xn,Rm,  */
2917 	      if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2918 		/* Use the diagnostics set in parse_shift, so not set new
2919 		   error message here.  */
2920 		return FALSE;
2921 	    }
2922 	  /* We only accept:
2923 	     [base,Xm{,LSL #imm}]
2924 	     [base,Xm,SXTX {#imm}]
2925 	     [base,Wm,(S|U)XTW {#imm}]  */
2926 	  if (operand->shifter.kind == AARCH64_MOD_NONE
2927 	      || operand->shifter.kind == AARCH64_MOD_LSL
2928 	      || operand->shifter.kind == AARCH64_MOD_SXTX)
2929 	    {
2930 	      if (isreg32)
2931 		{
2932 		  set_syntax_error (_("invalid use of 32-bit register offset"));
2933 		  return FALSE;
2934 		}
2935 	    }
2936 	  else if (!isreg32)
2937 	    {
2938 	      set_syntax_error (_("invalid use of 64-bit register offset"));
2939 	      return FALSE;
2940 	    }
2941 	}
2942       else
2943 	{
2944 	  /* [Xn,#:<reloc_op>:<symbol>  */
2945 	  skip_past_char (&p, '#');
2946 	  if (reloc && skip_past_char (&p, ':'))
2947 	    {
2948 	      struct reloc_table_entry *entry;
2949 
2950 	      /* Try to parse a relocation modifier.  Anything else is
2951 		 an error.  */
2952 	      if (!(entry = find_reloc_table_entry (&p)))
2953 		{
2954 		  set_syntax_error (_("unknown relocation modifier"));
2955 		  return FALSE;
2956 		}
2957 
2958 	      if (entry->ldst_type == 0)
2959 		{
2960 		  set_syntax_error
2961 		    (_("this relocation modifier is not allowed on this "
2962 		       "instruction"));
2963 		  return FALSE;
2964 		}
2965 
2966 	      /* [Xn,#:<reloc_op>:  */
2967 	      /* We now have the group relocation table entry corresponding to
2968 	         the name in the assembler source.  Next, we parse the
2969 	         expression.  */
2970 	      if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2971 		{
2972 		  set_syntax_error (_("invalid relocation expression"));
2973 		  return FALSE;
2974 		}
2975 
2976 	      /* [Xn,#:<reloc_op>:<expr>  */
2977 	      /* Record the load/store relocation type.  */
2978 	      inst.reloc.type = entry->ldst_type;
2979 	      inst.reloc.pc_rel = entry->pc_rel;
2980 	    }
2981 	  else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
2982 	    {
2983 	      set_syntax_error (_("invalid expression in the address"));
2984 	      return FALSE;
2985 	    }
2986 	  /* [Xn,<expr>  */
2987 	}
2988     }
2989 
2990   if (! skip_past_char (&p, ']'))
2991     {
2992       set_syntax_error (_("']' expected"));
2993       return FALSE;
2994     }
2995 
2996   if (skip_past_char (&p, '!'))
2997     {
2998       if (operand->addr.preind && operand->addr.offset.is_reg)
2999 	{
3000 	  set_syntax_error (_("register offset not allowed in pre-indexed "
3001 			      "addressing mode"));
3002 	  return FALSE;
3003 	}
3004       /* [Xn]! */
3005       operand->addr.writeback = 1;
3006     }
3007   else if (skip_past_comma (&p))
3008     {
3009       /* [Xn], */
3010       operand->addr.postind = 1;
3011       operand->addr.writeback = 1;
3012 
3013       if (operand->addr.preind)
3014 	{
3015 	  set_syntax_error (_("cannot combine pre- and post-indexing"));
3016 	  return FALSE;
3017 	}
3018 
3019       if (accept_reg_post_index
3020 	  && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3021 					     &isregzero)) != PARSE_FAIL)
3022 	{
3023 	  /* [Xn],Xm */
3024 	  if (isreg32)
3025 	    {
3026 	      set_syntax_error (_("invalid 32-bit register offset"));
3027 	      return FALSE;
3028 	    }
3029 	  operand->addr.offset.regno = reg;
3030 	  operand->addr.offset.is_reg = 1;
3031 	}
3032       else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3033 	{
3034 	  /* [Xn],#expr */
3035 	  set_syntax_error (_("invalid expression in the address"));
3036 	  return FALSE;
3037 	}
3038     }
3039 
3040   /* If at this point neither .preind nor .postind is set, we have a
3041      bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0].  */
3042   if (operand->addr.preind == 0 && operand->addr.postind == 0)
3043     {
3044       if (operand->addr.writeback)
3045 	{
3046 	  /* Reject [Rn]!   */
3047 	  set_syntax_error (_("missing offset in the pre-indexed address"));
3048 	  return FALSE;
3049 	}
3050       operand->addr.preind = 1;
3051       inst.reloc.exp.X_op = O_constant;
3052       inst.reloc.exp.X_add_number = 0;
3053     }
3054 
3055   *str = p;
3056   return TRUE;
3057 }
3058 
3059 /* Return TRUE on success; otherwise return FALSE.  */
3060 static bfd_boolean
3061 parse_address (char **str, aarch64_opnd_info *operand,
3062 	       int accept_reg_post_index)
3063 {
3064   return parse_address_main (str, operand, 0, accept_reg_post_index);
3065 }
3066 
3067 /* Return TRUE on success; otherwise return FALSE.  */
3068 static bfd_boolean
3069 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3070 {
3071   return parse_address_main (str, operand, 1, 0);
3072 }
3073 
3074 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3075    Return TRUE on success; otherwise return FALSE.  */
3076 static bfd_boolean
3077 parse_half (char **str, int *internal_fixup_p)
3078 {
3079   char *p, *saved;
3080   int dummy;
3081 
3082   p = *str;
3083   skip_past_char (&p, '#');
3084 
3085   gas_assert (internal_fixup_p);
3086   *internal_fixup_p = 0;
3087 
3088   if (*p == ':')
3089     {
3090       struct reloc_table_entry *entry;
3091 
3092       /* Try to parse a relocation.  Anything else is an error.  */
3093       ++p;
3094       if (!(entry = find_reloc_table_entry (&p)))
3095 	{
3096 	  set_syntax_error (_("unknown relocation modifier"));
3097 	  return FALSE;
3098 	}
3099 
3100       if (entry->movw_type == 0)
3101 	{
3102 	  set_syntax_error
3103 	    (_("this relocation modifier is not allowed on this instruction"));
3104 	  return FALSE;
3105 	}
3106 
3107       inst.reloc.type = entry->movw_type;
3108     }
3109   else
3110     *internal_fixup_p = 1;
3111 
3112   /* Avoid parsing a register as a general symbol.  */
3113   saved = p;
3114   if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3115     return FALSE;
3116   p = saved;
3117 
3118   if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3119     return FALSE;
3120 
3121   *str = p;
3122   return TRUE;
3123 }
3124 
3125 /* Parse an operand for an ADRP instruction:
3126      ADRP <Xd>, <label>
3127    Return TRUE on success; otherwise return FALSE.  */
3128 
3129 static bfd_boolean
3130 parse_adrp (char **str)
3131 {
3132   char *p;
3133 
3134   p = *str;
3135   if (*p == ':')
3136     {
3137       struct reloc_table_entry *entry;
3138 
3139       /* Try to parse a relocation.  Anything else is an error.  */
3140       ++p;
3141       if (!(entry = find_reloc_table_entry (&p)))
3142 	{
3143 	  set_syntax_error (_("unknown relocation modifier"));
3144 	  return FALSE;
3145 	}
3146 
3147       if (entry->adrp_type == 0)
3148 	{
3149 	  set_syntax_error
3150 	    (_("this relocation modifier is not allowed on this instruction"));
3151 	  return FALSE;
3152 	}
3153 
3154       inst.reloc.type = entry->adrp_type;
3155     }
3156   else
3157     inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3158 
3159   inst.reloc.pc_rel = 1;
3160 
3161   if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3162     return FALSE;
3163 
3164   *str = p;
3165   return TRUE;
3166 }
3167 
3168 /* Miscellaneous. */
3169 
3170 /* Parse an option for a preload instruction.  Returns the encoding for the
3171    option, or PARSE_FAIL.  */
3172 
3173 static int
3174 parse_pldop (char **str)
3175 {
3176   char *p, *q;
3177   const struct aarch64_name_value_pair *o;
3178 
3179   p = q = *str;
3180   while (ISALNUM (*q))
3181     q++;
3182 
3183   o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3184   if (!o)
3185     return PARSE_FAIL;
3186 
3187   *str = q;
3188   return o->value;
3189 }
3190 
3191 /* Parse an option for a barrier instruction.  Returns the encoding for the
3192    option, or PARSE_FAIL.  */
3193 
3194 static int
3195 parse_barrier (char **str)
3196 {
3197   char *p, *q;
3198   const asm_barrier_opt *o;
3199 
3200   p = q = *str;
3201   while (ISALPHA (*q))
3202     q++;
3203 
3204   o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3205   if (!o)
3206     return PARSE_FAIL;
3207 
3208   *str = q;
3209   return o->value;
3210 }
3211 
3212 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3213    Returns the encoding for the option, or PARSE_FAIL.
3214 
3215    If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3216    implementation defined system register name S3_<op1>_<Cn>_<Cm>_<op2>.  */
3217 
3218 static int
3219 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3220 {
3221   char *p, *q;
3222   char buf[32];
3223   const struct aarch64_name_value_pair *o;
3224   int value;
3225 
3226   p = buf;
3227   for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3228     if (p < buf + 31)
3229       *p++ = TOLOWER (*q);
3230   *p = '\0';
3231   /* Assert that BUF be large enough.  */
3232   gas_assert (p - buf == q - *str);
3233 
3234   o = hash_find (sys_regs, buf);
3235   if (!o)
3236     {
3237       if (!imple_defined_p)
3238 	return PARSE_FAIL;
3239       else
3240 	{
3241 	  /* Parse S3_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3242 	     registers.  */
3243 	  unsigned int op0, op1, cn, cm, op2;
3244 	  if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3245 	    return PARSE_FAIL;
3246 	  /* Register access is encoded as follows:
3247 	     op0  op1  CRn   CRm   op2
3248 	     11   xxx  1x11  xxxx  xxx.  */
3249 	  if (op0 != 3 || op1 > 7 || (cn | 0x4) != 0xf || cm > 15 || op2 > 7)
3250 	    return PARSE_FAIL;
3251 	  value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3252 	}
3253     }
3254   else
3255     value = o->value;
3256 
3257   *str = q;
3258   return value;
3259 }
3260 
3261 /* Parse a system reg for ic/dc/at/tlbi instructions.  Returns the table entry
3262    for the option, or NULL.  */
3263 
3264 static const aarch64_sys_ins_reg *
3265 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3266 {
3267   char *p, *q;
3268   char buf[32];
3269   const aarch64_sys_ins_reg *o;
3270 
3271   p = buf;
3272   for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3273     if (p < buf + 31)
3274       *p++ = TOLOWER (*q);
3275   *p = '\0';
3276 
3277   o = hash_find (sys_ins_regs, buf);
3278   if (!o)
3279     return NULL;
3280 
3281   *str = q;
3282   return o;
3283 }
3284 
3285 #define po_char_or_fail(chr) do {				\
3286     if (! skip_past_char (&str, chr))				\
3287       goto failure;						\
3288 } while (0)
3289 
3290 #define po_reg_or_fail(regtype) do {				\
3291     val = aarch64_reg_parse (&str, regtype, &rtype, NULL);	\
3292     if (val == PARSE_FAIL)					\
3293       {								\
3294 	set_default_error ();					\
3295 	goto failure;						\
3296       }								\
3297   } while (0)
3298 
3299 #define po_int_reg_or_fail(reject_sp, reject_rz) do {		\
3300     val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz,	\
3301                                    &isreg32, &isregzero);	\
3302     if (val == PARSE_FAIL)					\
3303       {								\
3304 	set_default_error ();					\
3305 	goto failure;						\
3306       }								\
3307     info->reg.regno = val;					\
3308     if (isreg32)						\
3309       info->qualifier = AARCH64_OPND_QLF_W;			\
3310     else							\
3311       info->qualifier = AARCH64_OPND_QLF_X;			\
3312   } while (0)
3313 
3314 #define po_imm_nc_or_fail() do {				\
3315     if (! parse_constant_immediate (&str, &val))		\
3316       goto failure;						\
3317   } while (0)
3318 
3319 #define po_imm_or_fail(min, max) do {				\
3320     if (! parse_constant_immediate (&str, &val))		\
3321       goto failure;						\
3322     if (val < min || val > max)					\
3323       {								\
3324 	set_fatal_syntax_error (_("immediate value out of range "\
3325 #min " to "#max));						\
3326 	goto failure;						\
3327       }								\
3328   } while (0)
3329 
3330 #define po_misc_or_fail(expr) do {				\
3331     if (!expr)							\
3332       goto failure;						\
3333   } while (0)
3334 
3335 /* encode the 12-bit imm field of Add/sub immediate */
3336 static inline uint32_t
3337 encode_addsub_imm (uint32_t imm)
3338 {
3339   return imm << 10;
3340 }
3341 
3342 /* encode the shift amount field of Add/sub immediate */
3343 static inline uint32_t
3344 encode_addsub_imm_shift_amount (uint32_t cnt)
3345 {
3346   return cnt << 22;
3347 }
3348 
3349 
3350 /* encode the imm field of Adr instruction */
3351 static inline uint32_t
3352 encode_adr_imm (uint32_t imm)
3353 {
3354   return (((imm & 0x3) << 29)	/*  [1:0] -> [30:29] */
3355 	  | ((imm & (0x7ffff << 2)) << 3));	/* [20:2] -> [23:5]  */
3356 }
3357 
3358 /* encode the immediate field of Move wide immediate */
3359 static inline uint32_t
3360 encode_movw_imm (uint32_t imm)
3361 {
3362   return imm << 5;
3363 }
3364 
3365 /* encode the 26-bit offset of unconditional branch */
3366 static inline uint32_t
3367 encode_branch_ofs_26 (uint32_t ofs)
3368 {
3369   return ofs & ((1 << 26) - 1);
3370 }
3371 
3372 /* encode the 19-bit offset of conditional branch and compare & branch */
3373 static inline uint32_t
3374 encode_cond_branch_ofs_19 (uint32_t ofs)
3375 {
3376   return (ofs & ((1 << 19) - 1)) << 5;
3377 }
3378 
3379 /* encode the 19-bit offset of ld literal */
3380 static inline uint32_t
3381 encode_ld_lit_ofs_19 (uint32_t ofs)
3382 {
3383   return (ofs & ((1 << 19) - 1)) << 5;
3384 }
3385 
3386 /* Encode the 14-bit offset of test & branch.  */
3387 static inline uint32_t
3388 encode_tst_branch_ofs_14 (uint32_t ofs)
3389 {
3390   return (ofs & ((1 << 14) - 1)) << 5;
3391 }
3392 
3393 /* Encode the 16-bit imm field of svc/hvc/smc.  */
3394 static inline uint32_t
3395 encode_svc_imm (uint32_t imm)
3396 {
3397   return imm << 5;
3398 }
3399 
3400 /* Reencode add(s) to sub(s), or sub(s) to add(s).  */
3401 static inline uint32_t
3402 reencode_addsub_switch_add_sub (uint32_t opcode)
3403 {
3404   return opcode ^ (1 << 30);
3405 }
3406 
3407 static inline uint32_t
3408 reencode_movzn_to_movz (uint32_t opcode)
3409 {
3410   return opcode | (1 << 30);
3411 }
3412 
3413 static inline uint32_t
3414 reencode_movzn_to_movn (uint32_t opcode)
3415 {
3416   return opcode & ~(1 << 30);
3417 }
3418 
3419 /* Overall per-instruction processing.	*/
3420 
3421 /* We need to be able to fix up arbitrary expressions in some statements.
3422    This is so that we can handle symbols that are an arbitrary distance from
3423    the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3424    which returns part of an address in a form which will be valid for
3425    a data instruction.	We do this by pushing the expression into a symbol
3426    in the expr_section, and creating a fix for that.  */
3427 
3428 static fixS *
3429 fix_new_aarch64 (fragS * frag,
3430 		 int where,
3431 		 short int size, expressionS * exp, int pc_rel, int reloc)
3432 {
3433   fixS *new_fix;
3434 
3435   switch (exp->X_op)
3436     {
3437     case O_constant:
3438     case O_symbol:
3439     case O_add:
3440     case O_subtract:
3441       new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3442       break;
3443 
3444     default:
3445       new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3446 			 pc_rel, reloc);
3447       break;
3448     }
3449   return new_fix;
3450 }
3451 
3452 /* Diagnostics on operands errors.  */
3453 
3454 /* By default, output one-line error message only.
3455    Enable the verbose error message by -merror-verbose.  */
3456 static int verbose_error_p = 0;
3457 
3458 #ifdef DEBUG_AARCH64
3459 /* N.B. this is only for the purpose of debugging.  */
3460 const char* operand_mismatch_kind_names[] =
3461 {
3462   "AARCH64_OPDE_NIL",
3463   "AARCH64_OPDE_RECOVERABLE",
3464   "AARCH64_OPDE_SYNTAX_ERROR",
3465   "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3466   "AARCH64_OPDE_INVALID_VARIANT",
3467   "AARCH64_OPDE_OUT_OF_RANGE",
3468   "AARCH64_OPDE_UNALIGNED",
3469   "AARCH64_OPDE_REG_LIST",
3470   "AARCH64_OPDE_OTHER_ERROR",
3471 };
3472 #endif /* DEBUG_AARCH64 */
3473 
3474 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3475 
3476    When multiple errors of different kinds are found in the same assembly
3477    line, only the error of the highest severity will be picked up for
3478    issuing the diagnostics.  */
3479 
3480 static inline bfd_boolean
3481 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3482 				 enum aarch64_operand_error_kind rhs)
3483 {
3484   gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3485   gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3486   gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3487   gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3488   gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3489   gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3490   gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3491   gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3492   return lhs > rhs;
3493 }
3494 
3495 /* Helper routine to get the mnemonic name from the assembly instruction
3496    line; should only be called for the diagnosis purpose, as there is
3497    string copy operation involved, which may affect the runtime
3498    performance if used in elsewhere.  */
3499 
3500 static const char*
3501 get_mnemonic_name (const char *str)
3502 {
3503   static char mnemonic[32];
3504   char *ptr;
3505 
3506   /* Get the first 15 bytes and assume that the full name is included.  */
3507   strncpy (mnemonic, str, 31);
3508   mnemonic[31] = '\0';
3509 
3510   /* Scan up to the end of the mnemonic, which must end in white space,
3511      '.', or end of string.  */
3512   for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3513     ;
3514 
3515   *ptr = '\0';
3516 
3517   /* Append '...' to the truncated long name.  */
3518   if (ptr - mnemonic == 31)
3519     mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3520 
3521   return mnemonic;
3522 }
3523 
3524 static void
3525 reset_aarch64_instruction (aarch64_instruction *instruction)
3526 {
3527   memset (instruction, '\0', sizeof (aarch64_instruction));
3528   instruction->reloc.type = BFD_RELOC_UNUSED;
3529 }
3530 
3531 /* Data strutures storing one user error in the assembly code related to
3532    operands.  */
3533 
3534 struct operand_error_record
3535 {
3536   const aarch64_opcode *opcode;
3537   aarch64_operand_error detail;
3538   struct operand_error_record *next;
3539 };
3540 
3541 typedef struct operand_error_record operand_error_record;
3542 
3543 struct operand_errors
3544 {
3545   operand_error_record *head;
3546   operand_error_record *tail;
3547 };
3548 
3549 typedef struct operand_errors operand_errors;
3550 
3551 /* Top-level data structure reporting user errors for the current line of
3552    the assembly code.
3553    The way md_assemble works is that all opcodes sharing the same mnemonic
3554    name are iterated to find a match to the assembly line.  In this data
3555    structure, each of the such opcodes will have one operand_error_record
3556    allocated and inserted.  In other words, excessive errors related with
3557    a single opcode are disregarded.  */
3558 operand_errors operand_error_report;
3559 
3560 /* Free record nodes.  */
3561 static operand_error_record *free_opnd_error_record_nodes = NULL;
3562 
3563 /* Initialize the data structure that stores the operand mismatch
3564    information on assembling one line of the assembly code.  */
3565 static void
3566 init_operand_error_report (void)
3567 {
3568   if (operand_error_report.head != NULL)
3569     {
3570       gas_assert (operand_error_report.tail != NULL);
3571       operand_error_report.tail->next = free_opnd_error_record_nodes;
3572       free_opnd_error_record_nodes = operand_error_report.head;
3573       operand_error_report.head = NULL;
3574       operand_error_report.tail = NULL;
3575       return;
3576     }
3577   gas_assert (operand_error_report.tail == NULL);
3578 }
3579 
3580 /* Return TRUE if some operand error has been recorded during the
3581    parsing of the current assembly line using the opcode *OPCODE;
3582    otherwise return FALSE.  */
3583 static inline bfd_boolean
3584 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3585 {
3586   operand_error_record *record = operand_error_report.head;
3587   return record && record->opcode == opcode;
3588 }
3589 
3590 /* Add the error record *NEW_RECORD to operand_error_report.  The record's
3591    OPCODE field is initialized with OPCODE.
3592    N.B. only one record for each opcode, i.e. the maximum of one error is
3593    recorded for each instruction template.  */
3594 
3595 static void
3596 add_operand_error_record (const operand_error_record* new_record)
3597 {
3598   const aarch64_opcode *opcode = new_record->opcode;
3599   operand_error_record* record = operand_error_report.head;
3600 
3601   /* The record may have been created for this opcode.  If not, we need
3602      to prepare one.  */
3603   if (! opcode_has_operand_error_p (opcode))
3604     {
3605       /* Get one empty record.  */
3606       if (free_opnd_error_record_nodes == NULL)
3607 	{
3608 	  record = xmalloc (sizeof (operand_error_record));
3609 	  if (record == NULL)
3610 	    abort ();
3611 	}
3612       else
3613 	{
3614 	  record = free_opnd_error_record_nodes;
3615 	  free_opnd_error_record_nodes = record->next;
3616 	}
3617       record->opcode = opcode;
3618       /* Insert at the head.  */
3619       record->next = operand_error_report.head;
3620       operand_error_report.head = record;
3621       if (operand_error_report.tail == NULL)
3622 	operand_error_report.tail = record;
3623     }
3624   else if (record->detail.kind != AARCH64_OPDE_NIL
3625 	   && record->detail.index <= new_record->detail.index
3626 	   && operand_error_higher_severity_p (record->detail.kind,
3627 					       new_record->detail.kind))
3628     {
3629       /* In the case of multiple errors found on operands related with a
3630 	 single opcode, only record the error of the leftmost operand and
3631 	 only if the error is of higher severity.  */
3632       DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3633 		   " the existing error %s on operand %d",
3634 		   operand_mismatch_kind_names[new_record->detail.kind],
3635 		   new_record->detail.index,
3636 		   operand_mismatch_kind_names[record->detail.kind],
3637 		   record->detail.index);
3638       return;
3639     }
3640 
3641   record->detail = new_record->detail;
3642 }
3643 
3644 static inline void
3645 record_operand_error_info (const aarch64_opcode *opcode,
3646 			   aarch64_operand_error *error_info)
3647 {
3648   operand_error_record record;
3649   record.opcode = opcode;
3650   record.detail = *error_info;
3651   add_operand_error_record (&record);
3652 }
3653 
3654 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3655    error message *ERROR, for operand IDX (count from 0).  */
3656 
3657 static void
3658 record_operand_error (const aarch64_opcode *opcode, int idx,
3659 		      enum aarch64_operand_error_kind kind,
3660 		      const char* error)
3661 {
3662   aarch64_operand_error info;
3663   memset(&info, 0, sizeof (info));
3664   info.index = idx;
3665   info.kind = kind;
3666   info.error = error;
3667   record_operand_error_info (opcode, &info);
3668 }
3669 
3670 static void
3671 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3672 				enum aarch64_operand_error_kind kind,
3673 				const char* error, const int *extra_data)
3674 {
3675   aarch64_operand_error info;
3676   info.index = idx;
3677   info.kind = kind;
3678   info.error = error;
3679   info.data[0] = extra_data[0];
3680   info.data[1] = extra_data[1];
3681   info.data[2] = extra_data[2];
3682   record_operand_error_info (opcode, &info);
3683 }
3684 
3685 static void
3686 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3687 				   const char* error, int lower_bound,
3688 				   int upper_bound)
3689 {
3690   int data[3] = {lower_bound, upper_bound, 0};
3691   record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3692 				  error, data);
3693 }
3694 
3695 /* Remove the operand error record for *OPCODE.  */
3696 static void ATTRIBUTE_UNUSED
3697 remove_operand_error_record (const aarch64_opcode *opcode)
3698 {
3699   if (opcode_has_operand_error_p (opcode))
3700     {
3701       operand_error_record* record = operand_error_report.head;
3702       gas_assert (record != NULL && operand_error_report.tail != NULL);
3703       operand_error_report.head = record->next;
3704       record->next = free_opnd_error_record_nodes;
3705       free_opnd_error_record_nodes = record;
3706       if (operand_error_report.head == NULL)
3707 	{
3708 	  gas_assert (operand_error_report.tail == record);
3709 	  operand_error_report.tail = NULL;
3710 	}
3711     }
3712 }
3713 
3714 /* Given the instruction in *INSTR, return the index of the best matched
3715    qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3716 
3717    Return -1 if there is no qualifier sequence; return the first match
3718    if there is multiple matches found.  */
3719 
3720 static int
3721 find_best_match (const aarch64_inst *instr,
3722 		 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3723 {
3724   int i, num_opnds, max_num_matched, idx;
3725 
3726   num_opnds = aarch64_num_of_operands (instr->opcode);
3727   if (num_opnds == 0)
3728     {
3729       DEBUG_TRACE ("no operand");
3730       return -1;
3731     }
3732 
3733   max_num_matched = 0;
3734   idx = -1;
3735 
3736   /* For each pattern.  */
3737   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3738     {
3739       int j, num_matched;
3740       const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3741 
3742       /* Most opcodes has much fewer patterns in the list.  */
3743       if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3744 	{
3745 	  DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3746 	  if (i != 0 && idx == -1)
3747 	    /* If nothing has been matched, return the 1st sequence.  */
3748 	    idx = 0;
3749 	  break;
3750 	}
3751 
3752       for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3753 	if (*qualifiers == instr->operands[j].qualifier)
3754 	  ++num_matched;
3755 
3756       if (num_matched > max_num_matched)
3757 	{
3758 	  max_num_matched = num_matched;
3759 	  idx = i;
3760 	}
3761     }
3762 
3763   DEBUG_TRACE ("return with %d", idx);
3764   return idx;
3765 }
3766 
3767 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3768    corresponding operands in *INSTR.  */
3769 
3770 static inline void
3771 assign_qualifier_sequence (aarch64_inst *instr,
3772 			   const aarch64_opnd_qualifier_t *qualifiers)
3773 {
3774   int i = 0;
3775   int num_opnds = aarch64_num_of_operands (instr->opcode);
3776   gas_assert (num_opnds);
3777   for (i = 0; i < num_opnds; ++i, ++qualifiers)
3778     instr->operands[i].qualifier = *qualifiers;
3779 }
3780 
3781 /* Print operands for the diagnosis purpose.  */
3782 
3783 static void
3784 print_operands (char *buf, const aarch64_opcode *opcode,
3785 		const aarch64_opnd_info *opnds)
3786 {
3787   int i;
3788 
3789   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3790     {
3791       const size_t size = 128;
3792       char str[size];
3793 
3794       /* We regard the opcode operand info more, however we also look into
3795 	 the inst->operands to support the disassembling of the optional
3796 	 operand.
3797 	 The two operand code should be the same in all cases, apart from
3798 	 when the operand can be optional.  */
3799       if (opcode->operands[i] == AARCH64_OPND_NIL
3800 	  || opnds[i].type == AARCH64_OPND_NIL)
3801 	break;
3802 
3803       /* Generate the operand string in STR.  */
3804       aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3805 
3806       /* Delimiter.  */
3807       if (str[0] != '\0')
3808 	strcat (buf, i == 0 ? " " : ",");
3809 
3810       /* Append the operand string.  */
3811       strcat (buf, str);
3812     }
3813 }
3814 
3815 /* Send to stderr a string as information.  */
3816 
3817 static void
3818 output_info (const char *format, ...)
3819 {
3820   char *file;
3821   unsigned int line;
3822   va_list args;
3823 
3824   as_where (&file, &line);
3825   if (file)
3826     {
3827       if (line != 0)
3828 	fprintf (stderr, "%s:%u: ", file, line);
3829       else
3830 	fprintf (stderr, "%s: ", file);
3831     }
3832   fprintf (stderr, _("Info: "));
3833   va_start (args, format);
3834   vfprintf (stderr, format, args);
3835   va_end (args);
3836   (void) putc ('\n', stderr);
3837 }
3838 
3839 /* Output one operand error record.  */
3840 
3841 static void
3842 output_operand_error_record (const operand_error_record *record, char *str)
3843 {
3844   int idx = record->detail.index;
3845   const aarch64_opcode *opcode = record->opcode;
3846   enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3847 				: AARCH64_OPND_NIL);
3848   const aarch64_operand_error *detail = &record->detail;
3849 
3850   switch (detail->kind)
3851     {
3852     case AARCH64_OPDE_NIL:
3853       gas_assert (0);
3854       break;
3855 
3856     case AARCH64_OPDE_SYNTAX_ERROR:
3857     case AARCH64_OPDE_RECOVERABLE:
3858     case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3859     case AARCH64_OPDE_OTHER_ERROR:
3860       gas_assert (idx >= 0);
3861       /* Use the prepared error message if there is, otherwise use the
3862 	 operand description string to describe the error.  */
3863       if (detail->error != NULL)
3864 	{
3865 	  if (detail->index == -1)
3866 	    as_bad (_("%s -- `%s'"), detail->error, str);
3867 	  else
3868 	    as_bad (_("%s at operand %d -- `%s'"),
3869 		    detail->error, detail->index + 1, str);
3870 	}
3871       else
3872 	as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3873 		aarch64_get_operand_desc (opd_code), str);
3874       break;
3875 
3876     case AARCH64_OPDE_INVALID_VARIANT:
3877       as_bad (_("operand mismatch -- `%s'"), str);
3878       if (verbose_error_p)
3879 	{
3880 	  /* We will try to correct the erroneous instruction and also provide
3881 	     more information e.g. all other valid variants.
3882 
3883 	     The string representation of the corrected instruction and other
3884 	     valid variants are generated by
3885 
3886 	     1) obtaining the intermediate representation of the erroneous
3887 	     instruction;
3888 	     2) manipulating the IR, e.g. replacing the operand qualifier;
3889 	     3) printing out the instruction by calling the printer functions
3890 	     shared with the disassembler.
3891 
3892 	     The limitation of this method is that the exact input assembly
3893 	     line cannot be accurately reproduced in some cases, for example an
3894 	     optional operand present in the actual assembly line will be
3895 	     omitted in the output; likewise for the optional syntax rules,
3896 	     e.g. the # before the immediate.  Another limitation is that the
3897 	     assembly symbols and relocation operations in the assembly line
3898 	     currently cannot be printed out in the error report.  Last but not
3899 	     least, when there is other error(s) co-exist with this error, the
3900 	     'corrected' instruction may be still incorrect, e.g.  given
3901 	       'ldnp h0,h1,[x0,#6]!'
3902 	     this diagnosis will provide the version:
3903 	       'ldnp s0,s1,[x0,#6]!'
3904 	     which is still not right.  */
3905 	  size_t len = strlen (get_mnemonic_name (str));
3906 	  int i, qlf_idx;
3907 	  bfd_boolean result;
3908 	  const size_t size = 2048;
3909 	  char buf[size];
3910 	  aarch64_inst *inst_base = &inst.base;
3911 	  const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3912 
3913 	  /* Init inst.  */
3914 	  reset_aarch64_instruction (&inst);
3915 	  inst_base->opcode = opcode;
3916 
3917 	  /* Reset the error report so that there is no side effect on the
3918 	     following operand parsing.  */
3919 	  init_operand_error_report ();
3920 
3921 	  /* Fill inst.  */
3922 	  result = parse_operands (str + len, opcode)
3923 	    && programmer_friendly_fixup (&inst);
3924 	  gas_assert (result);
3925 	  result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3926 					  NULL, NULL);
3927 	  gas_assert (!result);
3928 
3929 	  /* Find the most matched qualifier sequence.  */
3930 	  qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
3931 	  gas_assert (qlf_idx > -1);
3932 
3933 	  /* Assign the qualifiers.  */
3934 	  assign_qualifier_sequence (inst_base,
3935 				     opcode->qualifiers_list[qlf_idx]);
3936 
3937 	  /* Print the hint.  */
3938 	  output_info (_("   did you mean this?"));
3939 	  snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3940 	  print_operands (buf, opcode, inst_base->operands);
3941 	  output_info (_("   %s"), buf);
3942 
3943 	  /* Print out other variant(s) if there is any.  */
3944 	  if (qlf_idx != 0 ||
3945 	      !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
3946 	    output_info (_("   other valid variant(s):"));
3947 
3948 	  /* For each pattern.  */
3949 	  qualifiers_list = opcode->qualifiers_list;
3950 	  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3951 	    {
3952 	      /* Most opcodes has much fewer patterns in the list.
3953 		 First NIL qualifier indicates the end in the list.   */
3954 	      if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
3955 		break;
3956 
3957 	      if (i != qlf_idx)
3958 		{
3959 		  /* Mnemonics name.  */
3960 		  snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3961 
3962 		  /* Assign the qualifiers.  */
3963 		  assign_qualifier_sequence (inst_base, *qualifiers_list);
3964 
3965 		  /* Print instruction.  */
3966 		  print_operands (buf, opcode, inst_base->operands);
3967 
3968 		  output_info (_("   %s"), buf);
3969 		}
3970 	    }
3971 	}
3972       break;
3973 
3974     case AARCH64_OPDE_OUT_OF_RANGE:
3975       as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
3976 	      detail->error ? detail->error : _("immediate value"),
3977 	      detail->data[0], detail->data[1], detail->index + 1, str);
3978       break;
3979 
3980     case AARCH64_OPDE_REG_LIST:
3981       if (detail->data[0] == 1)
3982 	as_bad (_("invalid number of registers in the list; "
3983 		  "only 1 register is expected at operand %d -- `%s'"),
3984 		detail->index + 1, str);
3985       else
3986 	as_bad (_("invalid number of registers in the list; "
3987 		  "%d registers are expected at operand %d -- `%s'"),
3988 	      detail->data[0], detail->index + 1, str);
3989       break;
3990 
3991     case AARCH64_OPDE_UNALIGNED:
3992       as_bad (_("immediate value should be a multiple of "
3993 		"%d at operand %d -- `%s'"),
3994 	      detail->data[0], detail->index + 1, str);
3995       break;
3996 
3997     default:
3998       gas_assert (0);
3999       break;
4000     }
4001 }
4002 
4003 /* Process and output the error message about the operand mismatching.
4004 
4005    When this function is called, the operand error information had
4006    been collected for an assembly line and there will be multiple
4007    errors in the case of mulitple instruction templates; output the
4008    error message that most closely describes the problem.  */
4009 
4010 static void
4011 output_operand_error_report (char *str)
4012 {
4013   int largest_error_pos;
4014   const char *msg = NULL;
4015   enum aarch64_operand_error_kind kind;
4016   operand_error_record *curr;
4017   operand_error_record *head = operand_error_report.head;
4018   operand_error_record *record = NULL;
4019 
4020   /* No error to report.  */
4021   if (head == NULL)
4022     return;
4023 
4024   gas_assert (head != NULL && operand_error_report.tail != NULL);
4025 
4026   /* Only one error.  */
4027   if (head == operand_error_report.tail)
4028     {
4029       DEBUG_TRACE ("single opcode entry with error kind: %s",
4030 		   operand_mismatch_kind_names[head->detail.kind]);
4031       output_operand_error_record (head, str);
4032       return;
4033     }
4034 
4035   /* Find the error kind of the highest severity.  */
4036   DEBUG_TRACE ("multiple opcode entres with error kind");
4037   kind = AARCH64_OPDE_NIL;
4038   for (curr = head; curr != NULL; curr = curr->next)
4039     {
4040       gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4041       DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4042       if (operand_error_higher_severity_p (curr->detail.kind, kind))
4043 	kind = curr->detail.kind;
4044     }
4045   gas_assert (kind != AARCH64_OPDE_NIL);
4046 
4047   /* Pick up one of errors of KIND to report.  */
4048   largest_error_pos = -2; /* Index can be -1 which means unknown index.  */
4049   for (curr = head; curr != NULL; curr = curr->next)
4050     {
4051       if (curr->detail.kind != kind)
4052 	continue;
4053       /* If there are multiple errors, pick up the one with the highest
4054 	 mismatching operand index.  In the case of multiple errors with
4055 	 the equally highest operand index, pick up the first one or the
4056 	 first one with non-NULL error message.  */
4057       if (curr->detail.index > largest_error_pos
4058 	  || (curr->detail.index == largest_error_pos && msg == NULL
4059 	      && curr->detail.error != NULL))
4060 	{
4061 	  largest_error_pos = curr->detail.index;
4062 	  record = curr;
4063 	  msg = record->detail.error;
4064 	}
4065     }
4066 
4067   gas_assert (largest_error_pos != -2 && record != NULL);
4068   DEBUG_TRACE ("Pick up error kind %s to report",
4069 	       operand_mismatch_kind_names[record->detail.kind]);
4070 
4071   /* Output.  */
4072   output_operand_error_record (record, str);
4073 }
4074 
4075 /* Write an AARCH64 instruction to buf - always little-endian.  */
4076 static void
4077 put_aarch64_insn (char *buf, uint32_t insn)
4078 {
4079   unsigned char *where = (unsigned char *) buf;
4080   where[0] = insn;
4081   where[1] = insn >> 8;
4082   where[2] = insn >> 16;
4083   where[3] = insn >> 24;
4084 }
4085 
4086 static uint32_t
4087 get_aarch64_insn (char *buf)
4088 {
4089   unsigned char *where = (unsigned char *) buf;
4090   uint32_t result;
4091   result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4092   return result;
4093 }
4094 
4095 static void
4096 output_inst (struct aarch64_inst *new_inst)
4097 {
4098   char *to = NULL;
4099 
4100   to = frag_more (INSN_SIZE);
4101 
4102   frag_now->tc_frag_data.recorded = 1;
4103 
4104   put_aarch64_insn (to, inst.base.value);
4105 
4106   if (inst.reloc.type != BFD_RELOC_UNUSED)
4107     {
4108       fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4109 				    INSN_SIZE, &inst.reloc.exp,
4110 				    inst.reloc.pc_rel,
4111 				    inst.reloc.type);
4112       DEBUG_TRACE ("Prepared relocation fix up");
4113       /* Don't check the addend value against the instruction size,
4114          that's the job of our code in md_apply_fix(). */
4115       fixp->fx_no_overflow = 1;
4116       if (new_inst != NULL)
4117 	fixp->tc_fix_data.inst = new_inst;
4118       if (aarch64_gas_internal_fixup_p ())
4119 	{
4120 	  gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4121 	  fixp->tc_fix_data.opnd = inst.reloc.opnd;
4122 	  fixp->fx_addnumber = inst.reloc.flags;
4123 	}
4124     }
4125 
4126   dwarf2_emit_insn (INSN_SIZE);
4127 }
4128 
4129 /* Link together opcodes of the same name.  */
4130 
4131 struct templates
4132 {
4133   aarch64_opcode *opcode;
4134   struct templates *next;
4135 };
4136 
4137 typedef struct templates templates;
4138 
4139 static templates *
4140 lookup_mnemonic (const char *start, int len)
4141 {
4142   templates *templ = NULL;
4143 
4144   templ = hash_find_n (aarch64_ops_hsh, start, len);
4145   return templ;
4146 }
4147 
4148 /* Subroutine of md_assemble, responsible for looking up the primary
4149    opcode from the mnemonic the user wrote.  STR points to the
4150    beginning of the mnemonic. */
4151 
4152 static templates *
4153 opcode_lookup (char **str)
4154 {
4155   char *end, *base;
4156   const aarch64_cond *cond;
4157   char condname[16];
4158   int len;
4159 
4160   /* Scan up to the end of the mnemonic, which must end in white space,
4161      '.', or end of string.  */
4162   for (base = end = *str; is_part_of_name(*end); end++)
4163     if (*end == '.')
4164       break;
4165 
4166   if (end == base)
4167     return 0;
4168 
4169   inst.cond = COND_ALWAYS;
4170 
4171   /* Handle a possible condition.  */
4172   if (end[0] == '.')
4173     {
4174       cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4175       if (cond)
4176 	{
4177 	  inst.cond = cond->value;
4178 	  *str = end + 3;
4179 	}
4180       else
4181 	{
4182 	  *str = end;
4183 	  return 0;
4184 	}
4185     }
4186   else
4187     *str = end;
4188 
4189   len = end - base;
4190 
4191   if (inst.cond == COND_ALWAYS)
4192     {
4193       /* Look for unaffixed mnemonic.  */
4194       return lookup_mnemonic (base, len);
4195     }
4196   else if (len <= 13)
4197     {
4198       /* append ".c" to mnemonic if conditional */
4199       memcpy (condname, base, len);
4200       memcpy (condname + len, ".c", 2);
4201       base = condname;
4202       len += 2;
4203       return lookup_mnemonic (base, len);
4204     }
4205 
4206   return NULL;
4207 }
4208 
4209 /* Internal helper routine converting a vector neon_type_el structure
4210    *VECTYPE to a corresponding operand qualifier.  */
4211 
4212 static inline aarch64_opnd_qualifier_t
4213 vectype_to_qualifier (const struct neon_type_el *vectype)
4214 {
4215   /* Element size in bytes indexed by neon_el_type.  */
4216   const unsigned char ele_size[5]
4217     = {1, 2, 4, 8, 16};
4218 
4219   if (!vectype->defined || vectype->type == NT_invtype)
4220     goto vectype_conversion_fail;
4221 
4222   gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4223 
4224   if (vectype->defined & NTA_HASINDEX)
4225     /* Vector element register.  */
4226     return AARCH64_OPND_QLF_S_B + vectype->type;
4227   else
4228     {
4229       /* Vector register.  */
4230       int reg_size = ele_size[vectype->type] * vectype->width;
4231       unsigned offset;
4232       if (reg_size != 16 && reg_size != 8)
4233 	goto vectype_conversion_fail;
4234       /* The conversion is calculated based on the relation of the order of
4235 	 qualifiers to the vector element size and vector register size.  */
4236       offset = (vectype->type == NT_q)
4237 	? 8 : (vectype->type << 1) + (reg_size >> 4);
4238       gas_assert (offset <= 8);
4239       return AARCH64_OPND_QLF_V_8B + offset;
4240     }
4241 
4242 vectype_conversion_fail:
4243   first_error (_("bad vector arrangement type"));
4244   return AARCH64_OPND_QLF_NIL;
4245 }
4246 
4247 /* Process an optional operand that is found omitted from the assembly line.
4248    Fill *OPERAND for such an operand of type TYPE.  OPCODE points to the
4249    instruction's opcode entry while IDX is the index of this omitted operand.
4250    */
4251 
4252 static void
4253 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4254 			 int idx, aarch64_opnd_info *operand)
4255 {
4256   aarch64_insn default_value = get_optional_operand_default_value (opcode);
4257   gas_assert (optional_operand_p (opcode, idx));
4258   gas_assert (!operand->present);
4259 
4260   switch (type)
4261     {
4262     case AARCH64_OPND_Rd:
4263     case AARCH64_OPND_Rn:
4264     case AARCH64_OPND_Rm:
4265     case AARCH64_OPND_Rt:
4266     case AARCH64_OPND_Rt2:
4267     case AARCH64_OPND_Rs:
4268     case AARCH64_OPND_Ra:
4269     case AARCH64_OPND_Rt_SYS:
4270     case AARCH64_OPND_Rd_SP:
4271     case AARCH64_OPND_Rn_SP:
4272     case AARCH64_OPND_Fd:
4273     case AARCH64_OPND_Fn:
4274     case AARCH64_OPND_Fm:
4275     case AARCH64_OPND_Fa:
4276     case AARCH64_OPND_Ft:
4277     case AARCH64_OPND_Ft2:
4278     case AARCH64_OPND_Sd:
4279     case AARCH64_OPND_Sn:
4280     case AARCH64_OPND_Sm:
4281     case AARCH64_OPND_Vd:
4282     case AARCH64_OPND_Vn:
4283     case AARCH64_OPND_Vm:
4284     case AARCH64_OPND_VdD1:
4285     case AARCH64_OPND_VnD1:
4286       operand->reg.regno = default_value;
4287       break;
4288 
4289     case AARCH64_OPND_Ed:
4290     case AARCH64_OPND_En:
4291     case AARCH64_OPND_Em:
4292       operand->reglane.regno = default_value;
4293       break;
4294 
4295     case AARCH64_OPND_IDX:
4296     case AARCH64_OPND_BIT_NUM:
4297     case AARCH64_OPND_IMMR:
4298     case AARCH64_OPND_IMMS:
4299     case AARCH64_OPND_SHLL_IMM:
4300     case AARCH64_OPND_IMM_VLSL:
4301     case AARCH64_OPND_IMM_VLSR:
4302     case AARCH64_OPND_CCMP_IMM:
4303     case AARCH64_OPND_FBITS:
4304     case AARCH64_OPND_UIMM4:
4305     case AARCH64_OPND_UIMM3_OP1:
4306     case AARCH64_OPND_UIMM3_OP2:
4307     case AARCH64_OPND_IMM:
4308     case AARCH64_OPND_WIDTH:
4309     case AARCH64_OPND_UIMM7:
4310     case AARCH64_OPND_NZCV:
4311       operand->imm.value = default_value;
4312       break;
4313 
4314     case AARCH64_OPND_EXCEPTION:
4315       inst.reloc.type = BFD_RELOC_UNUSED;
4316       break;
4317 
4318     case AARCH64_OPND_BARRIER_ISB:
4319       operand->barrier = aarch64_barrier_options + default_value;
4320 
4321     default:
4322       break;
4323     }
4324 }
4325 
4326 /* Process the relocation type for move wide instructions.
4327    Return TRUE on success; otherwise return FALSE.  */
4328 
4329 static bfd_boolean
4330 process_movw_reloc_info (void)
4331 {
4332   int is32;
4333   unsigned shift;
4334 
4335   is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4336 
4337   if (inst.base.opcode->op == OP_MOVK)
4338     switch (inst.reloc.type)
4339       {
4340       case BFD_RELOC_AARCH64_MOVW_G0_S:
4341       case BFD_RELOC_AARCH64_MOVW_G1_S:
4342       case BFD_RELOC_AARCH64_MOVW_G2_S:
4343       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4344       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4345       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4346       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4347       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4348 	set_syntax_error
4349 	  (_("the specified relocation type is not allowed for MOVK"));
4350 	return FALSE;
4351       default:
4352 	break;
4353       }
4354 
4355   switch (inst.reloc.type)
4356     {
4357     case BFD_RELOC_AARCH64_MOVW_G0:
4358     case BFD_RELOC_AARCH64_MOVW_G0_S:
4359     case BFD_RELOC_AARCH64_MOVW_G0_NC:
4360     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4361     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4362       shift = 0;
4363       break;
4364     case BFD_RELOC_AARCH64_MOVW_G1:
4365     case BFD_RELOC_AARCH64_MOVW_G1_S:
4366     case BFD_RELOC_AARCH64_MOVW_G1_NC:
4367     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4368     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4369       shift = 16;
4370       break;
4371     case BFD_RELOC_AARCH64_MOVW_G2:
4372     case BFD_RELOC_AARCH64_MOVW_G2_S:
4373     case BFD_RELOC_AARCH64_MOVW_G2_NC:
4374     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4375       if (is32)
4376 	{
4377 	  set_fatal_syntax_error
4378 	    (_("the specified relocation type is not allowed for 32-bit "
4379 	       "register"));
4380 	  return FALSE;
4381 	}
4382       shift = 32;
4383       break;
4384     case BFD_RELOC_AARCH64_MOVW_G3:
4385       if (is32)
4386 	{
4387 	  set_fatal_syntax_error
4388 	    (_("the specified relocation type is not allowed for 32-bit "
4389 	       "register"));
4390 	  return FALSE;
4391 	}
4392       shift = 48;
4393       break;
4394     default:
4395       /* More cases should be added when more MOVW-related relocation types
4396          are supported in GAS.  */
4397       gas_assert (aarch64_gas_internal_fixup_p ());
4398       /* The shift amount should have already been set by the parser.  */
4399       return TRUE;
4400     }
4401   inst.base.operands[1].shifter.amount = shift;
4402   return TRUE;
4403 }
4404 
4405 /* A primitive log caculator.  */
4406 
4407 static inline unsigned int
4408 get_logsz (unsigned int size)
4409 {
4410   const unsigned char ls[16] =
4411     {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4412   if (size > 16)
4413     {
4414       gas_assert (0);
4415       return -1;
4416     }
4417   gas_assert (ls[size - 1] != (unsigned char)-1);
4418   return ls[size - 1];
4419 }
4420 
4421 /* Determine and return the real reloc type code for an instruction
4422    with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12.  */
4423 
4424 static inline bfd_reloc_code_real_type
4425 ldst_lo12_determine_real_reloc_type (void)
4426 {
4427   int logsz;
4428   enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4429   enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4430 
4431   const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4432       BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4433       BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4434       BFD_RELOC_AARCH64_LDST128_LO12
4435   };
4436 
4437   gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4438   gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4439 
4440   if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4441     opd1_qlf =
4442       aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4443 				      1, opd0_qlf, 0);
4444   gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4445 
4446   logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4447   gas_assert (logsz >= 0 && logsz <= 4);
4448 
4449   return reloc_ldst_lo12[logsz];
4450 }
4451 
4452 /* Check whether a register list REGINFO is valid.  The registers must be
4453    numbered in increasing order (modulo 32), in increments of one or two.
4454 
4455    If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4456    increments of two.
4457 
4458    Return FALSE if such a register list is invalid, otherwise return TRUE.  */
4459 
4460 static bfd_boolean
4461 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4462 {
4463   uint32_t i, nb_regs, prev_regno, incr;
4464 
4465   nb_regs = 1 + (reginfo & 0x3);
4466   reginfo >>= 2;
4467   prev_regno = reginfo & 0x1f;
4468   incr = accept_alternate ? 2 : 1;
4469 
4470   for (i = 1; i < nb_regs; ++i)
4471     {
4472       uint32_t curr_regno;
4473       reginfo >>= 5;
4474       curr_regno = reginfo & 0x1f;
4475       if (curr_regno != ((prev_regno + incr) & 0x1f))
4476 	return FALSE;
4477       prev_regno = curr_regno;
4478     }
4479 
4480   return TRUE;
4481 }
4482 
4483 /* Generic instruction operand parser.	This does no encoding and no
4484    semantic validation; it merely squirrels values away in the inst
4485    structure.  Returns TRUE or FALSE depending on whether the
4486    specified grammar matched.  */
4487 
4488 static bfd_boolean
4489 parse_operands (char *str, const aarch64_opcode *opcode)
4490 {
4491   int i;
4492   char *backtrack_pos = 0;
4493   const enum aarch64_opnd *operands = opcode->operands;
4494 
4495   clear_error ();
4496   skip_whitespace (str);
4497 
4498   for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4499     {
4500       int64_t val;
4501       int isreg32, isregzero;
4502       int comma_skipped_p = 0;
4503       aarch64_reg_type rtype;
4504       struct neon_type_el vectype;
4505       aarch64_opnd_info *info = &inst.base.operands[i];
4506 
4507       DEBUG_TRACE ("parse operand %d", i);
4508 
4509       /* Assign the operand code.  */
4510       info->type = operands[i];
4511 
4512       if (optional_operand_p (opcode, i))
4513 	{
4514 	  /* Remember where we are in case we need to backtrack.  */
4515 	  gas_assert (!backtrack_pos);
4516 	  backtrack_pos = str;
4517 	}
4518 
4519       /* Expect comma between operands; the backtrack mechanizm will take
4520 	 care of cases of omitted optional operand.  */
4521       if (i > 0 && ! skip_past_char (&str, ','))
4522 	{
4523 	  set_syntax_error (_("comma expected between operands"));
4524 	  goto failure;
4525 	}
4526       else
4527 	comma_skipped_p = 1;
4528 
4529       switch (operands[i])
4530 	{
4531 	case AARCH64_OPND_Rd:
4532 	case AARCH64_OPND_Rn:
4533 	case AARCH64_OPND_Rm:
4534 	case AARCH64_OPND_Rt:
4535 	case AARCH64_OPND_Rt2:
4536 	case AARCH64_OPND_Rs:
4537 	case AARCH64_OPND_Ra:
4538 	case AARCH64_OPND_Rt_SYS:
4539 	  po_int_reg_or_fail (1, 0);
4540 	  break;
4541 
4542 	case AARCH64_OPND_Rd_SP:
4543 	case AARCH64_OPND_Rn_SP:
4544 	  po_int_reg_or_fail (0, 1);
4545 	  break;
4546 
4547 	case AARCH64_OPND_Rm_EXT:
4548 	case AARCH64_OPND_Rm_SFT:
4549 	  po_misc_or_fail (parse_shifter_operand
4550 			   (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4551 					 ? SHIFTED_ARITH_IMM
4552 					 : SHIFTED_LOGIC_IMM)));
4553 	  if (!info->shifter.operator_present)
4554 	    {
4555 	      /* Default to LSL if not present.  Libopcodes prefers shifter
4556 		 kind to be explicit.  */
4557 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4558 	      info->shifter.kind = AARCH64_MOD_LSL;
4559 	      /* For Rm_EXT, libopcodes will carry out further check on whether
4560 		 or not stack pointer is used in the instruction (Recall that
4561 		 "the extend operator is not optional unless at least one of
4562 		 "Rd" or "Rn" is '11111' (i.e. WSP)").  */
4563 	    }
4564 	  break;
4565 
4566 	case AARCH64_OPND_Fd:
4567 	case AARCH64_OPND_Fn:
4568 	case AARCH64_OPND_Fm:
4569 	case AARCH64_OPND_Fa:
4570 	case AARCH64_OPND_Ft:
4571 	case AARCH64_OPND_Ft2:
4572 	case AARCH64_OPND_Sd:
4573 	case AARCH64_OPND_Sn:
4574 	case AARCH64_OPND_Sm:
4575 	  val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4576 	  if (val == PARSE_FAIL)
4577 	    {
4578 	      first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4579 	      goto failure;
4580 	    }
4581 	  gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4582 
4583 	  info->reg.regno = val;
4584 	  info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4585 	  break;
4586 
4587 	case AARCH64_OPND_Vd:
4588 	case AARCH64_OPND_Vn:
4589 	case AARCH64_OPND_Vm:
4590 	  val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4591 	  if (val == PARSE_FAIL)
4592 	    {
4593 	      first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4594 	      goto failure;
4595 	    }
4596 	  if (vectype.defined & NTA_HASINDEX)
4597 	    goto failure;
4598 
4599 	  info->reg.regno = val;
4600 	  info->qualifier = vectype_to_qualifier (&vectype);
4601 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
4602 	    goto failure;
4603 	  break;
4604 
4605 	case AARCH64_OPND_VdD1:
4606 	case AARCH64_OPND_VnD1:
4607 	  val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4608 	  if (val == PARSE_FAIL)
4609 	    {
4610 	      set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4611 	      goto failure;
4612 	    }
4613 	  if (vectype.type != NT_d || vectype.index != 1)
4614 	    {
4615 	      set_fatal_syntax_error
4616 		(_("the top half of a 128-bit FP/SIMD register is expected"));
4617 	      goto failure;
4618 	    }
4619 	  info->reg.regno = val;
4620 	  /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4621 	     here; it is correct for the purpose of encoding/decoding since
4622 	     only the register number is explicitly encoded in the related
4623 	     instructions, although this appears a bit hacky.  */
4624 	  info->qualifier = AARCH64_OPND_QLF_S_D;
4625 	  break;
4626 
4627 	case AARCH64_OPND_Ed:
4628 	case AARCH64_OPND_En:
4629 	case AARCH64_OPND_Em:
4630 	  val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4631 	  if (val == PARSE_FAIL)
4632 	    {
4633 	      first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4634 	      goto failure;
4635 	    }
4636 	  if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4637 	    goto failure;
4638 
4639 	  info->reglane.regno = val;
4640 	  info->reglane.index = vectype.index;
4641 	  info->qualifier = vectype_to_qualifier (&vectype);
4642 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
4643 	    goto failure;
4644 	  break;
4645 
4646 	case AARCH64_OPND_LVn:
4647 	case AARCH64_OPND_LVt:
4648 	case AARCH64_OPND_LVt_AL:
4649 	case AARCH64_OPND_LEt:
4650 	  if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4651 	    goto failure;
4652 	  if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4653 	    {
4654 	      set_fatal_syntax_error (_("invalid register list"));
4655 	      goto failure;
4656 	    }
4657 	  info->reglist.first_regno = (val >> 2) & 0x1f;
4658 	  info->reglist.num_regs = (val & 0x3) + 1;
4659 	  if (operands[i] == AARCH64_OPND_LEt)
4660 	    {
4661 	      if (!(vectype.defined & NTA_HASINDEX))
4662 		goto failure;
4663 	      info->reglist.has_index = 1;
4664 	      info->reglist.index = vectype.index;
4665 	    }
4666 	  else if (!(vectype.defined & NTA_HASTYPE))
4667 	    goto failure;
4668 	  info->qualifier = vectype_to_qualifier (&vectype);
4669 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
4670 	    goto failure;
4671 	  break;
4672 
4673 	case AARCH64_OPND_Cn:
4674 	case AARCH64_OPND_Cm:
4675 	  po_reg_or_fail (REG_TYPE_CN);
4676 	  if (val > 15)
4677 	    {
4678 	      set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4679 	      goto failure;
4680 	    }
4681 	  inst.base.operands[i].reg.regno = val;
4682 	  break;
4683 
4684 	case AARCH64_OPND_SHLL_IMM:
4685 	case AARCH64_OPND_IMM_VLSR:
4686 	  po_imm_or_fail (1, 64);
4687 	  info->imm.value = val;
4688 	  break;
4689 
4690 	case AARCH64_OPND_CCMP_IMM:
4691 	case AARCH64_OPND_FBITS:
4692 	case AARCH64_OPND_UIMM4:
4693 	case AARCH64_OPND_UIMM3_OP1:
4694 	case AARCH64_OPND_UIMM3_OP2:
4695 	case AARCH64_OPND_IMM_VLSL:
4696 	case AARCH64_OPND_IMM:
4697 	case AARCH64_OPND_WIDTH:
4698 	  po_imm_nc_or_fail ();
4699 	  info->imm.value = val;
4700 	  break;
4701 
4702 	case AARCH64_OPND_UIMM7:
4703 	  po_imm_or_fail (0, 127);
4704 	  info->imm.value = val;
4705 	  break;
4706 
4707 	case AARCH64_OPND_IDX:
4708 	case AARCH64_OPND_BIT_NUM:
4709 	case AARCH64_OPND_IMMR:
4710 	case AARCH64_OPND_IMMS:
4711 	  po_imm_or_fail (0, 63);
4712 	  info->imm.value = val;
4713 	  break;
4714 
4715 	case AARCH64_OPND_IMM0:
4716 	  po_imm_nc_or_fail ();
4717 	  if (val != 0)
4718 	    {
4719 	      set_fatal_syntax_error (_("immediate zero expected"));
4720 	      goto failure;
4721 	    }
4722 	  info->imm.value = 0;
4723 	  break;
4724 
4725 	case AARCH64_OPND_FPIMM0:
4726 	  {
4727 	    int qfloat;
4728 	    bfd_boolean res1 = FALSE, res2 = FALSE;
4729 	    /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4730 	       it is probably not worth the effort to support it.  */
4731 	    if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4732 		&& !(res2 = parse_constant_immediate (&str, &val)))
4733 	      goto failure;
4734 	    if ((res1 && qfloat == 0) || (res2 && val == 0))
4735 	      {
4736 		info->imm.value = 0;
4737 		info->imm.is_fp = 1;
4738 		break;
4739 	      }
4740 	    set_fatal_syntax_error (_("immediate zero expected"));
4741 	    goto failure;
4742 	  }
4743 
4744 	case AARCH64_OPND_IMM_MOV:
4745 	  {
4746 	    char *saved = str;
4747 	    if (reg_name_p (str, REG_TYPE_R_Z_SP))
4748 	      goto failure;
4749 	    str = saved;
4750 	    po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4751 						GE_OPT_PREFIX, 1));
4752 	    /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4753 	       later.  fix_mov_imm_insn will try to determine a machine
4754 	       instruction (MOVZ, MOVN or ORR) for it and will issue an error
4755 	       message if the immediate cannot be moved by a single
4756 	       instruction.  */
4757 	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4758 	    inst.base.operands[i].skip = 1;
4759 	  }
4760 	  break;
4761 
4762 	case AARCH64_OPND_SIMD_IMM:
4763 	case AARCH64_OPND_SIMD_IMM_SFT:
4764 	  if (! parse_big_immediate (&str, &val))
4765 	    goto failure;
4766 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4767 					      /* addr_off_p */ 0,
4768 					      /* need_libopcodes_p */ 1,
4769 					      /* skip_p */ 1);
4770 	  /* Parse shift.
4771 	     N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4772 	     shift, we don't check it here; we leave the checking to
4773 	     the libopcodes (operand_general_constraint_met_p).  By
4774 	     doing this, we achieve better diagnostics.  */
4775 	  if (skip_past_comma (&str)
4776 	      && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4777 	    goto failure;
4778 	  if (!info->shifter.operator_present
4779 	      && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4780 	    {
4781 	      /* Default to LSL if not present.  Libopcodes prefers shifter
4782 		 kind to be explicit.  */
4783 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4784 	      info->shifter.kind = AARCH64_MOD_LSL;
4785 	    }
4786 	  break;
4787 
4788 	case AARCH64_OPND_FPIMM:
4789 	case AARCH64_OPND_SIMD_FPIMM:
4790 	  {
4791 	    int qfloat;
4792 	    bfd_boolean dp_p
4793 	      = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4794 		 == 8);
4795 	    if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4796 	      goto failure;
4797 	    if (qfloat == 0)
4798 	      {
4799 		set_fatal_syntax_error (_("invalid floating-point constant"));
4800 		goto failure;
4801 	      }
4802 	    inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4803 	    inst.base.operands[i].imm.is_fp = 1;
4804 	  }
4805 	  break;
4806 
4807 	case AARCH64_OPND_LIMM:
4808 	  po_misc_or_fail (parse_shifter_operand (&str, info,
4809 						  SHIFTED_LOGIC_IMM));
4810 	  if (info->shifter.operator_present)
4811 	    {
4812 	      set_fatal_syntax_error
4813 		(_("shift not allowed for bitmask immediate"));
4814 	      goto failure;
4815 	    }
4816 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4817 					      /* addr_off_p */ 0,
4818 					      /* need_libopcodes_p */ 1,
4819 					      /* skip_p */ 1);
4820 	  break;
4821 
4822 	case AARCH64_OPND_AIMM:
4823 	  if (opcode->op == OP_ADD)
4824 	    /* ADD may have relocation types.  */
4825 	    po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4826 							  SHIFTED_ARITH_IMM));
4827 	  else
4828 	    po_misc_or_fail (parse_shifter_operand (&str, info,
4829 						    SHIFTED_ARITH_IMM));
4830 	  switch (inst.reloc.type)
4831 	    {
4832 	    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4833 	      info->shifter.amount = 12;
4834 	      break;
4835 	    case BFD_RELOC_UNUSED:
4836 	      aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4837 	      if (info->shifter.kind != AARCH64_MOD_NONE)
4838 		inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4839 	      inst.reloc.pc_rel = 0;
4840 	      break;
4841 	    default:
4842 	      break;
4843 	    }
4844 	  info->imm.value = 0;
4845 	  if (!info->shifter.operator_present)
4846 	    {
4847 	      /* Default to LSL if not present.  Libopcodes prefers shifter
4848 		 kind to be explicit.  */
4849 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4850 	      info->shifter.kind = AARCH64_MOD_LSL;
4851 	    }
4852 	  break;
4853 
4854 	case AARCH64_OPND_HALF:
4855 	    {
4856 	      /* #<imm16> or relocation.  */
4857 	      int internal_fixup_p;
4858 	      po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4859 	      if (internal_fixup_p)
4860 		aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4861 	      skip_whitespace (str);
4862 	      if (skip_past_comma (&str))
4863 		{
4864 		  /* {, LSL #<shift>}  */
4865 		  if (! aarch64_gas_internal_fixup_p ())
4866 		    {
4867 		      set_fatal_syntax_error (_("can't mix relocation modifier "
4868 						"with explicit shift"));
4869 		      goto failure;
4870 		    }
4871 		  po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4872 		}
4873 	      else
4874 		inst.base.operands[i].shifter.amount = 0;
4875 	      inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4876 	      inst.base.operands[i].imm.value = 0;
4877 	      if (! process_movw_reloc_info ())
4878 		goto failure;
4879 	    }
4880 	  break;
4881 
4882 	case AARCH64_OPND_EXCEPTION:
4883 	  po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4884 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4885 					      /* addr_off_p */ 0,
4886 					      /* need_libopcodes_p */ 0,
4887 					      /* skip_p */ 1);
4888 	  break;
4889 
4890 	case AARCH64_OPND_NZCV:
4891 	  {
4892 	    const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4893 	    if (nzcv != NULL)
4894 	      {
4895 		str += 4;
4896 		info->imm.value = nzcv->value;
4897 		break;
4898 	      }
4899 	    po_imm_or_fail (0, 15);
4900 	    info->imm.value = val;
4901 	  }
4902 	  break;
4903 
4904 	case AARCH64_OPND_COND:
4905 	  info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4906 	  str += 2;
4907 	  if (info->cond == NULL)
4908 	    {
4909 	      set_syntax_error (_("invalid condition"));
4910 	      goto failure;
4911 	    }
4912 	  break;
4913 
4914 	case AARCH64_OPND_ADDR_ADRP:
4915 	  po_misc_or_fail (parse_adrp (&str));
4916 	  /* Clear the value as operand needs to be relocated.  */
4917 	  info->imm.value = 0;
4918 	  break;
4919 
4920 	case AARCH64_OPND_ADDR_PCREL14:
4921 	case AARCH64_OPND_ADDR_PCREL19:
4922 	case AARCH64_OPND_ADDR_PCREL21:
4923 	case AARCH64_OPND_ADDR_PCREL26:
4924 	  po_misc_or_fail (parse_address_reloc (&str, info));
4925 	  if (!info->addr.pcrel)
4926 	    {
4927 	      set_syntax_error (_("invalid pc-relative address"));
4928 	      goto failure;
4929 	    }
4930 	  if (inst.gen_lit_pool
4931 	      && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
4932 	    {
4933 	      /* Only permit "=value" in the literal load instructions.
4934 		 The literal will be generated by programmer_friendly_fixup.  */
4935 	      set_syntax_error (_("invalid use of \"=immediate\""));
4936 	      goto failure;
4937 	    }
4938 	  if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
4939 	    {
4940 	      set_syntax_error (_("unrecognized relocation suffix"));
4941 	      goto failure;
4942 	    }
4943 	  if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
4944 	    {
4945 	      info->imm.value = inst.reloc.exp.X_add_number;
4946 	      inst.reloc.type = BFD_RELOC_UNUSED;
4947 	    }
4948 	  else
4949 	    {
4950 	      info->imm.value = 0;
4951 	      switch (opcode->iclass)
4952 		{
4953 		case compbranch:
4954 		case condbranch:
4955 		  /* e.g. CBZ or B.COND  */
4956 		  gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4957 		  inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
4958 		  break;
4959 		case testbranch:
4960 		  /* e.g. TBZ  */
4961 		  gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
4962 		  inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
4963 		  break;
4964 		case branch_imm:
4965 		  /* e.g. B or BL  */
4966 		  gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
4967 		  inst.reloc.type = (opcode->op == OP_BL)
4968 		    ? BFD_RELOC_AARCH64_CALL26 : BFD_RELOC_AARCH64_JUMP26;
4969 		  break;
4970 		case loadlit:
4971 		  gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4972 		  inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
4973 		  break;
4974 		case pcreladdr:
4975 		  gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
4976 		  inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
4977 		  break;
4978 		default:
4979 		  gas_assert (0);
4980 		  abort ();
4981 		}
4982 	      inst.reloc.pc_rel = 1;
4983 	    }
4984 	  break;
4985 
4986 	case AARCH64_OPND_ADDR_SIMPLE:
4987 	case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4988 	  /* [<Xn|SP>{, #<simm>}]  */
4989 	  po_char_or_fail ('[');
4990 	  po_reg_or_fail (REG_TYPE_R64_SP);
4991 	  /* Accept optional ", #0".  */
4992 	  if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
4993 	      && skip_past_char (&str, ','))
4994 	    {
4995 	      skip_past_char (&str, '#');
4996 	      if (! skip_past_char (&str, '0'))
4997 		{
4998 		  set_fatal_syntax_error
4999 		    (_("the optional immediate offset can only be 0"));
5000 		  goto failure;
5001 		}
5002 	    }
5003 	  po_char_or_fail (']');
5004 	  info->addr.base_regno = val;
5005 	  break;
5006 
5007 	case AARCH64_OPND_ADDR_REGOFF:
5008 	  /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}]  */
5009 	  po_misc_or_fail (parse_address (&str, info, 0));
5010 	  if (info->addr.pcrel || !info->addr.offset.is_reg
5011 	      || !info->addr.preind || info->addr.postind
5012 	      || info->addr.writeback)
5013 	    {
5014 	      set_syntax_error (_("invalid addressing mode"));
5015 	      goto failure;
5016 	    }
5017 	  if (!info->shifter.operator_present)
5018 	    {
5019 	      /* Default to LSL if not present.  Libopcodes prefers shifter
5020 		 kind to be explicit.  */
5021 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5022 	      info->shifter.kind = AARCH64_MOD_LSL;
5023 	    }
5024 	  /* Qualifier to be deduced by libopcodes.  */
5025 	  break;
5026 
5027 	case AARCH64_OPND_ADDR_SIMM7:
5028 	  po_misc_or_fail (parse_address (&str, info, 0));
5029 	  if (info->addr.pcrel || info->addr.offset.is_reg
5030 	      || (!info->addr.preind && !info->addr.postind))
5031 	    {
5032 	      set_syntax_error (_("invalid addressing mode"));
5033 	      goto failure;
5034 	    }
5035 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5036 					      /* addr_off_p */ 1,
5037 					      /* need_libopcodes_p */ 1,
5038 					      /* skip_p */ 0);
5039 	  break;
5040 
5041 	case AARCH64_OPND_ADDR_SIMM9:
5042 	case AARCH64_OPND_ADDR_SIMM9_2:
5043 	  po_misc_or_fail (parse_address_reloc (&str, info));
5044 	  if (info->addr.pcrel || info->addr.offset.is_reg
5045 	      || (!info->addr.preind && !info->addr.postind)
5046 	      || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5047 		  && info->addr.writeback))
5048 	    {
5049 	      set_syntax_error (_("invalid addressing mode"));
5050 	      goto failure;
5051 	    }
5052 	  if (inst.reloc.type != BFD_RELOC_UNUSED)
5053 	    {
5054 	      set_syntax_error (_("relocation not allowed"));
5055 	      goto failure;
5056 	    }
5057 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5058 					      /* addr_off_p */ 1,
5059 					      /* need_libopcodes_p */ 1,
5060 					      /* skip_p */ 0);
5061 	  break;
5062 
5063 	case AARCH64_OPND_ADDR_UIMM12:
5064 	  po_misc_or_fail (parse_address_reloc (&str, info));
5065 	  if (info->addr.pcrel || info->addr.offset.is_reg
5066 	      || !info->addr.preind || info->addr.writeback)
5067 	    {
5068 	      set_syntax_error (_("invalid addressing mode"));
5069 	      goto failure;
5070 	    }
5071 	  if (inst.reloc.type == BFD_RELOC_UNUSED)
5072 	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5073 	  else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5074 	    inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5075 	  /* Leave qualifier to be determined by libopcodes.  */
5076 	  break;
5077 
5078 	case AARCH64_OPND_SIMD_ADDR_POST:
5079 	  /* [<Xn|SP>], <Xm|#<amount>>  */
5080 	  po_misc_or_fail (parse_address (&str, info, 1));
5081 	  if (!info->addr.postind || !info->addr.writeback)
5082 	    {
5083 	      set_syntax_error (_("invalid addressing mode"));
5084 	      goto failure;
5085 	    }
5086 	  if (!info->addr.offset.is_reg)
5087 	    {
5088 	      if (inst.reloc.exp.X_op == O_constant)
5089 		info->addr.offset.imm = inst.reloc.exp.X_add_number;
5090 	      else
5091 		{
5092 		  set_fatal_syntax_error
5093 		    (_("writeback value should be an immediate constant"));
5094 		  goto failure;
5095 		}
5096 	    }
5097 	  /* No qualifier.  */
5098 	  break;
5099 
5100 	case AARCH64_OPND_SYSREG:
5101 	  if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5102 	      == PARSE_FAIL)
5103 	    {
5104 	      set_syntax_error (_("unknown or missing system register name"));
5105 	      goto failure;
5106 	    }
5107 	  inst.base.operands[i].sysreg = val;
5108 	  break;
5109 
5110 	case AARCH64_OPND_PSTATEFIELD:
5111 	  if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5112 	      == PARSE_FAIL)
5113 	    {
5114 	      set_syntax_error (_("unknown or missing PSTATE field name"));
5115 	      goto failure;
5116 	    }
5117 	  inst.base.operands[i].pstatefield = val;
5118 	  break;
5119 
5120 	case AARCH64_OPND_SYSREG_IC:
5121 	  inst.base.operands[i].sysins_op =
5122 	    parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5123 	  goto sys_reg_ins;
5124 	case AARCH64_OPND_SYSREG_DC:
5125 	  inst.base.operands[i].sysins_op =
5126 	    parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5127 	  goto sys_reg_ins;
5128 	case AARCH64_OPND_SYSREG_AT:
5129 	  inst.base.operands[i].sysins_op =
5130 	    parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5131 	  goto sys_reg_ins;
5132 	case AARCH64_OPND_SYSREG_TLBI:
5133 	  inst.base.operands[i].sysins_op =
5134 	    parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5135 sys_reg_ins:
5136 	  if (inst.base.operands[i].sysins_op == NULL)
5137 	    {
5138 	      set_fatal_syntax_error ( _("unknown or missing operation name"));
5139 	      goto failure;
5140 	    }
5141 	  break;
5142 
5143 	case AARCH64_OPND_BARRIER:
5144 	case AARCH64_OPND_BARRIER_ISB:
5145 	  val = parse_barrier (&str);
5146 	  if (val != PARSE_FAIL
5147 	      && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5148 	    {
5149 	      /* ISB only accepts options name 'sy'.  */
5150 	      set_syntax_error
5151 		(_("the specified option is not accepted in ISB"));
5152 	      /* Turn off backtrack as this optional operand is present.  */
5153 	      backtrack_pos = 0;
5154 	      goto failure;
5155 	    }
5156 	  /* This is an extension to accept a 0..15 immediate.  */
5157 	  if (val == PARSE_FAIL)
5158 	    po_imm_or_fail (0, 15);
5159 	  info->barrier = aarch64_barrier_options + val;
5160 	  break;
5161 
5162 	case AARCH64_OPND_PRFOP:
5163 	  val = parse_pldop (&str);
5164 	  /* This is an extension to accept a 0..31 immediate.  */
5165 	  if (val == PARSE_FAIL)
5166 	    po_imm_or_fail (0, 31);
5167 	  inst.base.operands[i].prfop = aarch64_prfops + val;
5168 	  break;
5169 
5170 	default:
5171 	  as_fatal (_("unhandled operand code %d"), operands[i]);
5172 	}
5173 
5174       /* If we get here, this operand was successfully parsed.  */
5175       inst.base.operands[i].present = 1;
5176       continue;
5177 
5178 failure:
5179       /* The parse routine should already have set the error, but in case
5180 	 not, set a default one here.  */
5181       if (! error_p ())
5182 	set_default_error ();
5183 
5184       if (! backtrack_pos)
5185 	goto parse_operands_return;
5186 
5187       /* Reaching here means we are dealing with an optional operand that is
5188 	 omitted from the assembly line.  */
5189       gas_assert (optional_operand_p (opcode, i));
5190       info->present = 0;
5191       process_omitted_operand (operands[i], opcode, i, info);
5192 
5193       /* Try again, skipping the optional operand at backtrack_pos.  */
5194       str = backtrack_pos;
5195       backtrack_pos = 0;
5196 
5197       /* If this is the last operand that is optional and omitted, but without
5198 	 the presence of a comma.  */
5199       if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5200 	{
5201 	  set_fatal_syntax_error
5202 	    (_("unexpected comma before the omitted optional operand"));
5203 	  goto parse_operands_return;
5204 	}
5205 
5206       /* Clear any error record after the omitted optional operand has been
5207 	 successfully handled.  */
5208       clear_error ();
5209     }
5210 
5211   /* Check if we have parsed all the operands.  */
5212   if (*str != '\0' && ! error_p ())
5213     {
5214       /* Set I to the index of the last present operand; this is
5215 	 for the purpose of diagnostics.  */
5216       for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5217 	;
5218       set_fatal_syntax_error
5219 	(_("unexpected characters following instruction"));
5220     }
5221 
5222 parse_operands_return:
5223 
5224   if (error_p ())
5225     {
5226       DEBUG_TRACE ("parsing FAIL: %s - %s",
5227 		   operand_mismatch_kind_names[get_error_kind ()],
5228 		   get_error_message ());
5229       /* Record the operand error properly; this is useful when there
5230 	 are multiple instruction templates for a mnemonic name, so that
5231 	 later on, we can select the error that most closely describes
5232 	 the problem.  */
5233       record_operand_error (opcode, i, get_error_kind (),
5234 			    get_error_message ());
5235       return FALSE;
5236     }
5237   else
5238     {
5239       DEBUG_TRACE ("parsing SUCCESS");
5240       return TRUE;
5241     }
5242 }
5243 
5244 /* It does some fix-up to provide some programmer friendly feature while
5245    keeping the libopcodes happy, i.e. libopcodes only accepts
5246    the preferred architectural syntax.
5247    Return FALSE if there is any failure; otherwise return TRUE.  */
5248 
5249 static bfd_boolean
5250 programmer_friendly_fixup (aarch64_instruction *instr)
5251 {
5252   aarch64_inst *base = &instr->base;
5253   const aarch64_opcode *opcode = base->opcode;
5254   enum aarch64_op op = opcode->op;
5255   aarch64_opnd_info *operands = base->operands;
5256 
5257   DEBUG_TRACE ("enter");
5258 
5259   switch (opcode->iclass)
5260     {
5261     case testbranch:
5262       /* TBNZ Xn|Wn, #uimm6, label
5263 	 Test and Branch Not Zero: conditionally jumps to label if bit number
5264 	 uimm6 in register Xn is not zero.  The bit number implies the width of
5265 	 the register, which may be written and should be disassembled as Wn if
5266 	 uimm is less than 32.  */
5267       if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5268 	{
5269 	  if (operands[1].imm.value >= 32)
5270 	    {
5271 	      record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5272 						 0, 31);
5273 	      return FALSE;
5274 	    }
5275 	  operands[0].qualifier = AARCH64_OPND_QLF_X;
5276 	}
5277       break;
5278     case loadlit:
5279       /* LDR Wt, label | =value
5280 	 As a convenience assemblers will typically permit the notation
5281 	 "=value" in conjunction with the pc-relative literal load instructions
5282 	 to automatically place an immediate value or symbolic address in a
5283 	 nearby literal pool and generate a hidden label which references it.
5284 	 ISREG has been set to 0 in the case of =value.  */
5285       if (instr->gen_lit_pool
5286 	  && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5287 	{
5288 	  int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5289 	  if (op == OP_LDRSW_LIT)
5290 	    size = 4;
5291 	  if (instr->reloc.exp.X_op != O_constant
5292 	      && instr->reloc.exp.X_op != O_big
5293 	      && instr->reloc.exp.X_op != O_symbol)
5294 	    {
5295 	      record_operand_error (opcode, 1,
5296 				    AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5297 				    _("constant expression expected"));
5298 	      return FALSE;
5299 	    }
5300 	  if (! add_to_lit_pool (&instr->reloc.exp, size))
5301 	    {
5302 	      record_operand_error (opcode, 1,
5303 				    AARCH64_OPDE_OTHER_ERROR,
5304 				    _("literal pool insertion failed"));
5305 	      return FALSE;
5306 	    }
5307 	}
5308       break;
5309     case asimdimm:
5310       /* Allow MOVI V0.16B, 97, LSL 0, although the preferred architectural
5311 	 syntax requires that the LSL shifter can only be used when the
5312 	 destination register has the shape of 4H, 8H, 2S or 4S.  */
5313       if (op == OP_V_MOVI_B && operands[1].shifter.kind == AARCH64_MOD_LSL
5314 	  && (operands[0].qualifier == AARCH64_OPND_QLF_V_8B
5315 	      || operands[0].qualifier == AARCH64_OPND_QLF_V_16B))
5316 	{
5317 	  if (operands[1].shifter.amount != 0)
5318 	    {
5319 	      record_operand_error (opcode, 1,
5320 				    AARCH64_OPDE_OTHER_ERROR,
5321 				    _("shift amount non-zero"));
5322 	      return FALSE;
5323 	    }
5324 	  operands[1].shifter.kind = AARCH64_MOD_NONE;
5325 	  operands[1].qualifier = AARCH64_OPND_QLF_NIL;
5326 	}
5327       break;
5328     case log_shift:
5329     case bitfield:
5330       /* UXT[BHW] Wd, Wn
5331 	 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5332 	 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5333 	 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5334 	 A programmer-friendly assembler should accept a destination Xd in
5335 	 place of Wd, however that is not the preferred form for disassembly.
5336 	 */
5337       if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5338 	  && operands[1].qualifier == AARCH64_OPND_QLF_W
5339 	  && operands[0].qualifier == AARCH64_OPND_QLF_X)
5340 	operands[0].qualifier = AARCH64_OPND_QLF_W;
5341       break;
5342 
5343     case addsub_ext:
5344 	{
5345 	  /* In the 64-bit form, the final register operand is written as Wm
5346 	     for all but the (possibly omitted) UXTX/LSL and SXTX
5347 	     operators.
5348 	     As a programmer-friendly assembler, we accept e.g.
5349 	     ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5350 	     ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}.  */
5351 	  int idx = aarch64_operand_index (opcode->operands,
5352 					   AARCH64_OPND_Rm_EXT);
5353 	  gas_assert (idx == 1 || idx == 2);
5354 	  if (operands[0].qualifier == AARCH64_OPND_QLF_X
5355 	      && operands[idx].qualifier == AARCH64_OPND_QLF_X
5356 	      && operands[idx].shifter.kind != AARCH64_MOD_LSL
5357 	      && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5358 	      && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5359 	    operands[idx].qualifier = AARCH64_OPND_QLF_W;
5360 	}
5361       break;
5362 
5363     default:
5364       break;
5365     }
5366 
5367   DEBUG_TRACE ("exit with SUCCESS");
5368   return TRUE;
5369 }
5370 
5371 /* A wrapper function to interface with libopcodes on encoding and
5372    record the error message if there is any.
5373 
5374    Return TRUE on success; otherwise return FALSE.  */
5375 
5376 static bfd_boolean
5377 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5378 	   aarch64_insn *code)
5379 {
5380   aarch64_operand_error error_info;
5381   error_info.kind = AARCH64_OPDE_NIL;
5382   if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5383     return TRUE;
5384   else
5385     {
5386       gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5387       record_operand_error_info (opcode, &error_info);
5388       return FALSE;
5389     }
5390 }
5391 
5392 #ifdef DEBUG_AARCH64
5393 static inline void
5394 dump_opcode_operands (const aarch64_opcode *opcode)
5395 {
5396   int i = 0;
5397   while (opcode->operands[i] != AARCH64_OPND_NIL)
5398     {
5399       aarch64_verbose ("\t\t opnd%d: %s", i,
5400 		       aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5401 		       ? aarch64_get_operand_name (opcode->operands[i])
5402 		       : aarch64_get_operand_desc (opcode->operands[i]));
5403       ++i;
5404     }
5405 }
5406 #endif /* DEBUG_AARCH64 */
5407 
5408 /* This is the guts of the machine-dependent assembler.  STR points to a
5409    machine dependent instruction.  This function is supposed to emit
5410    the frags/bytes it assembles to.  */
5411 
5412 void
5413 md_assemble (char *str)
5414 {
5415   char *p = str;
5416   templates *template;
5417   aarch64_opcode *opcode;
5418   aarch64_inst *inst_base;
5419   unsigned saved_cond;
5420 
5421   /* Align the previous label if needed.  */
5422   if (last_label_seen != NULL)
5423     {
5424       symbol_set_frag (last_label_seen, frag_now);
5425       S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5426       S_SET_SEGMENT (last_label_seen, now_seg);
5427     }
5428 
5429   inst.reloc.type = BFD_RELOC_UNUSED;
5430 
5431   DEBUG_TRACE ("\n\n");
5432   DEBUG_TRACE ("==============================");
5433   DEBUG_TRACE ("Enter md_assemble with %s", str);
5434 
5435   template = opcode_lookup (&p);
5436   if (!template)
5437     {
5438       /* It wasn't an instruction, but it might be a register alias of
5439          the form alias .req reg directive.  */
5440       if (!create_register_alias (str, p))
5441 	as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5442 		str);
5443       return;
5444     }
5445 
5446   skip_whitespace (p);
5447   if (*p == ',')
5448     {
5449       as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5450 	      get_mnemonic_name (str), str);
5451       return;
5452     }
5453 
5454   init_operand_error_report ();
5455 
5456   saved_cond = inst.cond;
5457   reset_aarch64_instruction (&inst);
5458   inst.cond = saved_cond;
5459 
5460   /* Iterate through all opcode entries with the same mnemonic name.  */
5461   do
5462     {
5463       opcode = template->opcode;
5464 
5465       DEBUG_TRACE ("opcode %s found", opcode->name);
5466 #ifdef DEBUG_AARCH64
5467       if (debug_dump)
5468 	dump_opcode_operands (opcode);
5469 #endif /* DEBUG_AARCH64 */
5470 
5471       /* Check that this instruction is supported for this CPU.  */
5472       if (!opcode->avariant
5473 	  || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5474 	{
5475 	  as_bad (_("selected processor does not support `%s'"), str);
5476 	  return;
5477 	}
5478 
5479       mapping_state (MAP_INSN);
5480 
5481       inst_base = &inst.base;
5482       inst_base->opcode = opcode;
5483 
5484       /* Truly conditionally executed instructions, e.g. b.cond.  */
5485       if (opcode->flags & F_COND)
5486 	{
5487 	  gas_assert (inst.cond != COND_ALWAYS);
5488 	  inst_base->cond = get_cond_from_value (inst.cond);
5489 	  DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5490 	}
5491       else if (inst.cond != COND_ALWAYS)
5492 	{
5493 	  /* It shouldn't arrive here, where the assembly looks like a
5494 	     conditional instruction but the found opcode is unconditional.  */
5495 	  gas_assert (0);
5496 	  continue;
5497 	}
5498 
5499       if (parse_operands (p, opcode)
5500 	  && programmer_friendly_fixup (&inst)
5501 	  && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5502 	{
5503 	  if (inst.reloc.type == BFD_RELOC_UNUSED
5504 	      || !inst.reloc.need_libopcodes_p)
5505 	    output_inst (NULL);
5506 	  else
5507 	    {
5508 	      /* If there is relocation generated for the instruction,
5509 	         store the instruction information for the future fix-up.  */
5510 	      struct aarch64_inst *copy;
5511 	      gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5512 	      if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5513 		abort ();
5514 	      memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5515 	      output_inst (copy);
5516 	    }
5517 	  return;
5518 	}
5519 
5520       template = template->next;
5521       if (template != NULL)
5522 	{
5523 	  reset_aarch64_instruction (&inst);
5524 	  inst.cond = saved_cond;
5525 	}
5526     }
5527   while (template != NULL);
5528 
5529   /* Issue the error messages if any.  */
5530   output_operand_error_report (str);
5531 }
5532 
5533 /* Various frobbings of labels and their addresses.  */
5534 
5535 void
5536 aarch64_start_line_hook (void)
5537 {
5538   last_label_seen = NULL;
5539 }
5540 
5541 void
5542 aarch64_frob_label (symbolS * sym)
5543 {
5544   last_label_seen = sym;
5545 
5546   dwarf2_emit_label (sym);
5547 }
5548 
5549 int
5550 aarch64_data_in_code (void)
5551 {
5552   if (!strncmp (input_line_pointer + 1, "data:", 5))
5553     {
5554       *input_line_pointer = '/';
5555       input_line_pointer += 5;
5556       *input_line_pointer = 0;
5557       return 1;
5558     }
5559 
5560   return 0;
5561 }
5562 
5563 char *
5564 aarch64_canonicalize_symbol_name (char *name)
5565 {
5566   int len;
5567 
5568   if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5569     *(name + len - 5) = 0;
5570 
5571   return name;
5572 }
5573 
5574 /* Table of all register names defined by default.  The user can
5575    define additional names with .req.  Note that all register names
5576    should appear in both upper and lowercase variants.	Some registers
5577    also have mixed-case names.	*/
5578 
5579 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5580 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5581 #define REGSET31(p,t) \
5582   REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5583   REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5584   REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5585   REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5586   REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5587   REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5588   REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5589   REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5590 #define REGSET(p,t) \
5591   REGSET31(p,t), REGNUM(p,31,t)
5592 
5593 /* These go into aarch64_reg_hsh hash-table.  */
5594 static const reg_entry reg_names[] = {
5595   /* Integer registers.  */
5596   REGSET31 (x, R_64), REGSET31 (X, R_64),
5597   REGSET31 (w, R_32), REGSET31 (W, R_32),
5598 
5599   REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5600   REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5601 
5602   REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5603   REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5604 
5605   /* Coprocessor register numbers.  */
5606   REGSET (c, CN), REGSET (C, CN),
5607 
5608   /* Floating-point single precision registers.  */
5609   REGSET (s, FP_S), REGSET (S, FP_S),
5610 
5611   /* Floating-point double precision registers.  */
5612   REGSET (d, FP_D), REGSET (D, FP_D),
5613 
5614   /* Floating-point half precision registers.  */
5615   REGSET (h, FP_H), REGSET (H, FP_H),
5616 
5617   /* Floating-point byte precision registers.  */
5618   REGSET (b, FP_B), REGSET (B, FP_B),
5619 
5620   /* Floating-point quad precision registers.  */
5621   REGSET (q, FP_Q), REGSET (Q, FP_Q),
5622 
5623   /* FP/SIMD registers.  */
5624   REGSET (v, VN), REGSET (V, VN),
5625 };
5626 
5627 #undef REGDEF
5628 #undef REGNUM
5629 #undef REGSET
5630 
5631 #define N 1
5632 #define n 0
5633 #define Z 1
5634 #define z 0
5635 #define C 1
5636 #define c 0
5637 #define V 1
5638 #define v 0
5639 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5640 static const asm_nzcv nzcv_names[] = {
5641   {"nzcv", B (n, z, c, v)},
5642   {"nzcV", B (n, z, c, V)},
5643   {"nzCv", B (n, z, C, v)},
5644   {"nzCV", B (n, z, C, V)},
5645   {"nZcv", B (n, Z, c, v)},
5646   {"nZcV", B (n, Z, c, V)},
5647   {"nZCv", B (n, Z, C, v)},
5648   {"nZCV", B (n, Z, C, V)},
5649   {"Nzcv", B (N, z, c, v)},
5650   {"NzcV", B (N, z, c, V)},
5651   {"NzCv", B (N, z, C, v)},
5652   {"NzCV", B (N, z, C, V)},
5653   {"NZcv", B (N, Z, c, v)},
5654   {"NZcV", B (N, Z, c, V)},
5655   {"NZCv", B (N, Z, C, v)},
5656   {"NZCV", B (N, Z, C, V)}
5657 };
5658 
5659 #undef N
5660 #undef n
5661 #undef Z
5662 #undef z
5663 #undef C
5664 #undef c
5665 #undef V
5666 #undef v
5667 #undef B
5668 
5669 /* MD interface: bits in the object file.  */
5670 
5671 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5672    for use in the a.out file, and stores them in the array pointed to by buf.
5673    This knows about the endian-ness of the target machine and does
5674    THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
5675    2 (short) and 4 (long)  Floating numbers are put out as a series of
5676    LITTLENUMS (shorts, here at least).	*/
5677 
5678 void
5679 md_number_to_chars (char *buf, valueT val, int n)
5680 {
5681   if (target_big_endian)
5682     number_to_chars_bigendian (buf, val, n);
5683   else
5684     number_to_chars_littleendian (buf, val, n);
5685 }
5686 
5687 /* MD interface: Sections.  */
5688 
5689 /* Estimate the size of a frag before relaxing.  Assume everything fits in
5690    4 bytes.  */
5691 
5692 int
5693 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5694 {
5695   fragp->fr_var = 4;
5696   return 4;
5697 }
5698 
5699 /* Round up a section size to the appropriate boundary.	 */
5700 
5701 valueT
5702 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5703 {
5704   return size;
5705 }
5706 
5707 /* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
5708    of an rs_align_code fragment.  */
5709 
5710 void
5711 aarch64_handle_align (fragS * fragP)
5712 {
5713   /* NOP = d503201f */
5714   /* AArch64 instructions are always little-endian.  */
5715   static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5716 
5717   int bytes, fix, noop_size;
5718   char *p;
5719   const char *noop;
5720 
5721   if (fragP->fr_type != rs_align_code)
5722     return;
5723 
5724   bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5725   p = fragP->fr_literal + fragP->fr_fix;
5726   fix = 0;
5727 
5728   if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5729     bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5730 
5731 #ifdef OBJ_ELF
5732   gas_assert (fragP->tc_frag_data.recorded);
5733 #endif
5734 
5735   noop = aarch64_noop;
5736   noop_size = sizeof (aarch64_noop);
5737   fragP->fr_var = noop_size;
5738 
5739   if (bytes & (noop_size - 1))
5740     {
5741       fix = bytes & (noop_size - 1);
5742 #ifdef OBJ_ELF
5743       insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5744 #endif
5745       memset (p, 0, fix);
5746       p += fix;
5747       bytes -= fix;
5748     }
5749 
5750   while (bytes >= noop_size)
5751     {
5752       memcpy (p, noop, noop_size);
5753       p += noop_size;
5754       bytes -= noop_size;
5755       fix += noop_size;
5756     }
5757 
5758   fragP->fr_fix += fix;
5759 }
5760 
5761 /* Called from md_do_align.  Used to create an alignment
5762    frag in a code section.  */
5763 
5764 void
5765 aarch64_frag_align_code (int n, int max)
5766 {
5767   char *p;
5768 
5769   /* We assume that there will never be a requirement
5770      to support alignments greater than x bytes.  */
5771   if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5772     as_fatal (_
5773 	      ("alignments greater than %d bytes not supported in .text sections"),
5774 	      MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5775 
5776   p = frag_var (rs_align_code,
5777 		MAX_MEM_FOR_RS_ALIGN_CODE,
5778 		1,
5779 		(relax_substateT) max,
5780 		(symbolS *) NULL, (offsetT) n, (char *) NULL);
5781   *p = 0;
5782 }
5783 
5784 /* Perform target specific initialisation of a frag.
5785    Note - despite the name this initialisation is not done when the frag
5786    is created, but only when its type is assigned.  A frag can be created
5787    and used a long time before its type is set, so beware of assuming that
5788    this initialisationis performed first.  */
5789 
5790 #ifndef OBJ_ELF
5791 void
5792 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5793 		   int max_chars ATTRIBUTE_UNUSED)
5794 {
5795 }
5796 
5797 #else /* OBJ_ELF is defined.  */
5798 void
5799 aarch64_init_frag (fragS * fragP, int max_chars)
5800 {
5801   /* Record a mapping symbol for alignment frags.  We will delete this
5802      later if the alignment ends up empty.  */
5803   if (!fragP->tc_frag_data.recorded)
5804     {
5805       fragP->tc_frag_data.recorded = 1;
5806       switch (fragP->fr_type)
5807 	{
5808 	case rs_align:
5809 	case rs_align_test:
5810 	case rs_fill:
5811 	  mapping_state_2 (MAP_DATA, max_chars);
5812 	  break;
5813 	case rs_align_code:
5814 	  mapping_state_2 (MAP_INSN, max_chars);
5815 	  break;
5816 	default:
5817 	  break;
5818 	}
5819     }
5820 }
5821 
5822 /* Initialize the DWARF-2 unwind information for this procedure.  */
5823 
5824 void
5825 tc_aarch64_frame_initial_instructions (void)
5826 {
5827   cfi_add_CFA_def_cfa (REG_SP, 0);
5828 }
5829 #endif /* OBJ_ELF */
5830 
5831 /* Convert REGNAME to a DWARF-2 register number.  */
5832 
5833 int
5834 tc_aarch64_regname_to_dw2regnum (char *regname)
5835 {
5836   const reg_entry *reg = parse_reg (&regname);
5837   if (reg == NULL)
5838     return -1;
5839 
5840   switch (reg->type)
5841     {
5842     case REG_TYPE_SP_32:
5843     case REG_TYPE_SP_64:
5844     case REG_TYPE_R_32:
5845     case REG_TYPE_R_64:
5846     case REG_TYPE_FP_B:
5847     case REG_TYPE_FP_H:
5848     case REG_TYPE_FP_S:
5849     case REG_TYPE_FP_D:
5850     case REG_TYPE_FP_Q:
5851       return reg->number;
5852     default:
5853       break;
5854     }
5855   return -1;
5856 }
5857 
5858 /* MD interface: Symbol and relocation handling.  */
5859 
5860 /* Return the address within the segment that a PC-relative fixup is
5861    relative to.  For AArch64 PC-relative fixups applied to instructions
5862    are generally relative to the location plus AARCH64_PCREL_OFFSET bytes.  */
5863 
5864 long
5865 md_pcrel_from_section (fixS * fixP, segT seg)
5866 {
5867   offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5868 
5869   /* If this is pc-relative and we are going to emit a relocation
5870      then we just want to put out any pipeline compensation that the linker
5871      will need.  Otherwise we want to use the calculated base.  */
5872   if (fixP->fx_pcrel
5873       && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5874 	  || aarch64_force_relocation (fixP)))
5875     base = 0;
5876 
5877   /* AArch64 should be consistent for all pc-relative relocations.  */
5878   return base + AARCH64_PCREL_OFFSET;
5879 }
5880 
5881 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5882    Otherwise we have no need to default values of symbols.  */
5883 
5884 symbolS *
5885 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5886 {
5887 #ifdef OBJ_ELF
5888   if (name[0] == '_' && name[1] == 'G'
5889       && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5890     {
5891       if (!GOT_symbol)
5892 	{
5893 	  if (symbol_find (name))
5894 	    as_bad (_("GOT already in the symbol table"));
5895 
5896 	  GOT_symbol = symbol_new (name, undefined_section,
5897 				   (valueT) 0, &zero_address_frag);
5898 	}
5899 
5900       return GOT_symbol;
5901     }
5902 #endif
5903 
5904   return 0;
5905 }
5906 
5907 /* Return non-zero if the indicated VALUE has overflowed the maximum
5908    range expressible by a unsigned number with the indicated number of
5909    BITS.  */
5910 
5911 static bfd_boolean
5912 unsigned_overflow (valueT value, unsigned bits)
5913 {
5914   valueT lim;
5915   if (bits >= sizeof (valueT) * 8)
5916     return FALSE;
5917   lim = (valueT) 1 << bits;
5918   return (value >= lim);
5919 }
5920 
5921 
5922 /* Return non-zero if the indicated VALUE has overflowed the maximum
5923    range expressible by an signed number with the indicated number of
5924    BITS.  */
5925 
5926 static bfd_boolean
5927 signed_overflow (offsetT value, unsigned bits)
5928 {
5929   offsetT lim;
5930   if (bits >= sizeof (offsetT) * 8)
5931     return FALSE;
5932   lim = (offsetT) 1 << (bits - 1);
5933   return (value < -lim || value >= lim);
5934 }
5935 
5936 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
5937    unsigned immediate offset load/store instruction, try to encode it as
5938    an unscaled, 9-bit, signed immediate offset load/store instruction.
5939    Return TRUE if it is successful; otherwise return FALSE.
5940 
5941    As a programmer-friendly assembler, LDUR/STUR instructions can be generated
5942    in response to the standard LDR/STR mnemonics when the immediate offset is
5943    unambiguous, i.e. when it is negative or unaligned.  */
5944 
5945 static bfd_boolean
5946 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
5947 {
5948   int idx;
5949   enum aarch64_op new_op;
5950   const aarch64_opcode *new_opcode;
5951 
5952   gas_assert (instr->opcode->iclass == ldst_pos);
5953 
5954   switch (instr->opcode->op)
5955     {
5956     case OP_LDRB_POS:new_op = OP_LDURB; break;
5957     case OP_STRB_POS: new_op = OP_STURB; break;
5958     case OP_LDRSB_POS: new_op = OP_LDURSB; break;
5959     case OP_LDRH_POS: new_op = OP_LDURH; break;
5960     case OP_STRH_POS: new_op = OP_STURH; break;
5961     case OP_LDRSH_POS: new_op = OP_LDURSH; break;
5962     case OP_LDR_POS: new_op = OP_LDUR; break;
5963     case OP_STR_POS: new_op = OP_STUR; break;
5964     case OP_LDRF_POS: new_op = OP_LDURV; break;
5965     case OP_STRF_POS: new_op = OP_STURV; break;
5966     case OP_LDRSW_POS: new_op = OP_LDURSW; break;
5967     case OP_PRFM_POS: new_op = OP_PRFUM; break;
5968     default: new_op = OP_NIL; break;
5969     }
5970 
5971   if (new_op == OP_NIL)
5972     return FALSE;
5973 
5974   new_opcode = aarch64_get_opcode (new_op);
5975   gas_assert (new_opcode != NULL);
5976 
5977   DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
5978 	       instr->opcode->op, new_opcode->op);
5979 
5980   aarch64_replace_opcode (instr, new_opcode);
5981 
5982   /* Clear up the ADDR_SIMM9's qualifier; otherwise the
5983      qualifier matching may fail because the out-of-date qualifier will
5984      prevent the operand being updated with a new and correct qualifier.  */
5985   idx = aarch64_operand_index (instr->opcode->operands,
5986 			       AARCH64_OPND_ADDR_SIMM9);
5987   gas_assert (idx == 1);
5988   instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
5989 
5990   DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
5991 
5992   if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
5993     return FALSE;
5994 
5995   return TRUE;
5996 }
5997 
5998 /* Called by fix_insn to fix a MOV immediate alias instruction.
5999 
6000    Operand for a generic move immediate instruction, which is an alias
6001    instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6002    a 32-bit/64-bit immediate value into general register.  An assembler error
6003    shall result if the immediate cannot be created by a single one of these
6004    instructions. If there is a choice, then to ensure reversability an
6005    assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR.  */
6006 
6007 static void
6008 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6009 {
6010   const aarch64_opcode *opcode;
6011 
6012   /* Need to check if the destination is SP/ZR.  The check has to be done
6013      before any aarch64_replace_opcode.  */
6014   int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6015   int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6016 
6017   instr->operands[1].imm.value = value;
6018   instr->operands[1].skip = 0;
6019 
6020   if (try_mov_wide_p)
6021     {
6022       /* Try the MOVZ alias.  */
6023       opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6024       aarch64_replace_opcode (instr, opcode);
6025       if (aarch64_opcode_encode (instr->opcode, instr,
6026 				 &instr->value, NULL, NULL))
6027 	{
6028 	  put_aarch64_insn (buf, instr->value);
6029 	  return;
6030 	}
6031       /* Try the MOVK alias.  */
6032       opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6033       aarch64_replace_opcode (instr, opcode);
6034       if (aarch64_opcode_encode (instr->opcode, instr,
6035 				 &instr->value, NULL, NULL))
6036 	{
6037 	  put_aarch64_insn (buf, instr->value);
6038 	  return;
6039 	}
6040     }
6041 
6042   if (try_mov_bitmask_p)
6043     {
6044       /* Try the ORR alias.  */
6045       opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6046       aarch64_replace_opcode (instr, opcode);
6047       if (aarch64_opcode_encode (instr->opcode, instr,
6048 				 &instr->value, NULL, NULL))
6049 	{
6050 	  put_aarch64_insn (buf, instr->value);
6051 	  return;
6052 	}
6053     }
6054 
6055   as_bad_where (fixP->fx_file, fixP->fx_line,
6056 		_("immediate cannot be moved by a single instruction"));
6057 }
6058 
6059 /* An instruction operand which is immediate related may have symbol used
6060    in the assembly, e.g.
6061 
6062      mov     w0, u32
6063      .set    u32,    0x00ffff00
6064 
6065    At the time when the assembly instruction is parsed, a referenced symbol,
6066    like 'u32' in the above example may not have been seen; a fixS is created
6067    in such a case and is handled here after symbols have been resolved.
6068    Instruction is fixed up with VALUE using the information in *FIXP plus
6069    extra information in FLAGS.
6070 
6071    This function is called by md_apply_fix to fix up instructions that need
6072    a fix-up described above but does not involve any linker-time relocation.  */
6073 
6074 static void
6075 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6076 {
6077   int idx;
6078   uint32_t insn;
6079   char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6080   enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6081   aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6082 
6083   if (new_inst)
6084     {
6085       /* Now the instruction is about to be fixed-up, so the operand that
6086 	 was previously marked as 'ignored' needs to be unmarked in order
6087 	 to get the encoding done properly.  */
6088       idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6089       new_inst->operands[idx].skip = 0;
6090     }
6091 
6092   gas_assert (opnd != AARCH64_OPND_NIL);
6093 
6094   switch (opnd)
6095     {
6096     case AARCH64_OPND_EXCEPTION:
6097       if (unsigned_overflow (value, 16))
6098 	as_bad_where (fixP->fx_file, fixP->fx_line,
6099 		      _("immediate out of range"));
6100       insn = get_aarch64_insn (buf);
6101       insn |= encode_svc_imm (value);
6102       put_aarch64_insn (buf, insn);
6103       break;
6104 
6105     case AARCH64_OPND_AIMM:
6106       /* ADD or SUB with immediate.
6107 	 NOTE this assumes we come here with a add/sub shifted reg encoding
6108 		  3  322|2222|2  2  2 21111 111111
6109 		  1  098|7654|3  2  1 09876 543210 98765 43210
6110 	 0b000000 sf 000|1011|shift 0 Rm    imm6   Rn    Rd    ADD
6111 	 2b000000 sf 010|1011|shift 0 Rm    imm6   Rn    Rd    ADDS
6112 	 4b000000 sf 100|1011|shift 0 Rm    imm6   Rn    Rd    SUB
6113 	 6b000000 sf 110|1011|shift 0 Rm    imm6   Rn    Rd    SUBS
6114 	 ->
6115 		  3  322|2222|2 2   221111111111
6116 		  1  098|7654|3 2   109876543210 98765 43210
6117 	 11000000 sf 001|0001|shift imm12        Rn    Rd    ADD
6118 	 31000000 sf 011|0001|shift imm12        Rn    Rd    ADDS
6119 	 51000000 sf 101|0001|shift imm12        Rn    Rd    SUB
6120 	 71000000 sf 111|0001|shift imm12        Rn    Rd    SUBS
6121 	 Fields sf Rn Rd are already set.  */
6122       insn = get_aarch64_insn (buf);
6123       if (value < 0)
6124 	{
6125 	  /* Add <-> sub.  */
6126 	  insn = reencode_addsub_switch_add_sub (insn);
6127 	  value = -value;
6128 	}
6129 
6130       if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6131 	  && unsigned_overflow (value, 12))
6132 	{
6133 	  /* Try to shift the value by 12 to make it fit.  */
6134 	  if (((value >> 12) << 12) == value
6135 	      && ! unsigned_overflow (value, 12 + 12))
6136 	    {
6137 	      value >>= 12;
6138 	      insn |= encode_addsub_imm_shift_amount (1);
6139 	    }
6140 	}
6141 
6142       if (unsigned_overflow (value, 12))
6143 	as_bad_where (fixP->fx_file, fixP->fx_line,
6144 		      _("immediate out of range"));
6145 
6146       insn |= encode_addsub_imm (value);
6147 
6148       put_aarch64_insn (buf, insn);
6149       break;
6150 
6151     case AARCH64_OPND_SIMD_IMM:
6152     case AARCH64_OPND_SIMD_IMM_SFT:
6153     case AARCH64_OPND_LIMM:
6154       /* Bit mask immediate.  */
6155       gas_assert (new_inst != NULL);
6156       idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6157       new_inst->operands[idx].imm.value = value;
6158       if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6159 				 &new_inst->value, NULL, NULL))
6160 	put_aarch64_insn (buf, new_inst->value);
6161       else
6162 	as_bad_where (fixP->fx_file, fixP->fx_line,
6163 		      _("invalid immediate"));
6164       break;
6165 
6166     case AARCH64_OPND_HALF:
6167       /* 16-bit unsigned immediate.  */
6168       if (unsigned_overflow (value, 16))
6169 	as_bad_where (fixP->fx_file, fixP->fx_line,
6170 		      _("immediate out of range"));
6171       insn = get_aarch64_insn (buf);
6172       insn |= encode_movw_imm (value & 0xffff);
6173       put_aarch64_insn (buf, insn);
6174       break;
6175 
6176     case AARCH64_OPND_IMM_MOV:
6177       /* Operand for a generic move immediate instruction, which is
6178 	 an alias instruction that generates a single MOVZ, MOVN or ORR
6179 	 instruction to loads a 32-bit/64-bit immediate value into general
6180 	 register.  An assembler error shall result if the immediate cannot be
6181 	 created by a single one of these instructions. If there is a choice,
6182 	 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6183 	 and MOVZ or MOVN to ORR.  */
6184       gas_assert (new_inst != NULL);
6185       fix_mov_imm_insn (fixP, buf, new_inst, value);
6186       break;
6187 
6188     case AARCH64_OPND_ADDR_SIMM7:
6189     case AARCH64_OPND_ADDR_SIMM9:
6190     case AARCH64_OPND_ADDR_SIMM9_2:
6191     case AARCH64_OPND_ADDR_UIMM12:
6192       /* Immediate offset in an address.  */
6193       insn = get_aarch64_insn (buf);
6194 
6195       gas_assert (new_inst != NULL && new_inst->value == insn);
6196       gas_assert (new_inst->opcode->operands[1] == opnd
6197 		  || new_inst->opcode->operands[2] == opnd);
6198 
6199       /* Get the index of the address operand.  */
6200       if (new_inst->opcode->operands[1] == opnd)
6201 	/* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
6202 	idx = 1;
6203       else
6204 	/* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}].  */
6205 	idx = 2;
6206 
6207       /* Update the resolved offset value.  */
6208       new_inst->operands[idx].addr.offset.imm = value;
6209 
6210       /* Encode/fix-up.  */
6211       if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6212 				 &new_inst->value, NULL, NULL))
6213 	{
6214 	  put_aarch64_insn (buf, new_inst->value);
6215 	  break;
6216 	}
6217       else if (new_inst->opcode->iclass == ldst_pos
6218 	       && try_to_encode_as_unscaled_ldst (new_inst))
6219 	{
6220 	  put_aarch64_insn (buf, new_inst->value);
6221 	  break;
6222 	}
6223 
6224       as_bad_where (fixP->fx_file, fixP->fx_line,
6225 		    _("immediate offset out of range"));
6226       break;
6227 
6228     default:
6229       gas_assert (0);
6230       as_fatal (_("unhandled operand code %d"), opnd);
6231     }
6232 }
6233 
6234 /* Apply a fixup (fixP) to segment data, once it has been determined
6235    by our caller that we have all the info we need to fix it up.
6236 
6237    Parameter valP is the pointer to the value of the bits.  */
6238 
6239 void
6240 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6241 {
6242   offsetT value = *valP;
6243   uint32_t insn;
6244   char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6245   int scale;
6246   unsigned flags = fixP->fx_addnumber;
6247 
6248   DEBUG_TRACE ("\n\n");
6249   DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6250   DEBUG_TRACE ("Enter md_apply_fix");
6251 
6252   gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6253 
6254   /* Note whether this will delete the relocation.  */
6255 
6256   if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6257     fixP->fx_done = 1;
6258 
6259   /* Process the relocations.  */
6260   switch (fixP->fx_r_type)
6261     {
6262     case BFD_RELOC_NONE:
6263       /* This will need to go in the object file.  */
6264       fixP->fx_done = 0;
6265       break;
6266 
6267     case BFD_RELOC_8:
6268     case BFD_RELOC_8_PCREL:
6269       if (fixP->fx_done || !seg->use_rela_p)
6270 	md_number_to_chars (buf, value, 1);
6271       break;
6272 
6273     case BFD_RELOC_16:
6274     case BFD_RELOC_16_PCREL:
6275       if (fixP->fx_done || !seg->use_rela_p)
6276 	md_number_to_chars (buf, value, 2);
6277       break;
6278 
6279     case BFD_RELOC_32:
6280     case BFD_RELOC_32_PCREL:
6281       if (fixP->fx_done || !seg->use_rela_p)
6282 	md_number_to_chars (buf, value, 4);
6283       break;
6284 
6285     case BFD_RELOC_64:
6286     case BFD_RELOC_64_PCREL:
6287       if (fixP->fx_done || !seg->use_rela_p)
6288 	md_number_to_chars (buf, value, 8);
6289       break;
6290 
6291     case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6292       /* We claim that these fixups have been processed here, even if
6293          in fact we generate an error because we do not have a reloc
6294          for them, so tc_gen_reloc() will reject them.  */
6295       fixP->fx_done = 1;
6296       if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6297 	{
6298 	  as_bad_where (fixP->fx_file, fixP->fx_line,
6299 			_("undefined symbol %s used as an immediate value"),
6300 			S_GET_NAME (fixP->fx_addsy));
6301 	  goto apply_fix_return;
6302 	}
6303       fix_insn (fixP, flags, value);
6304       break;
6305 
6306     case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6307       if (value & 3)
6308 	as_bad_where (fixP->fx_file, fixP->fx_line,
6309 		      _("pc-relative load offset not word aligned"));
6310       if (signed_overflow (value, 21))
6311 	as_bad_where (fixP->fx_file, fixP->fx_line,
6312 		      _("pc-relative load offset out of range"));
6313       if (fixP->fx_done || !seg->use_rela_p)
6314 	{
6315 	  insn = get_aarch64_insn (buf);
6316 	  insn |= encode_ld_lit_ofs_19 (value >> 2);
6317 	  put_aarch64_insn (buf, insn);
6318 	}
6319       break;
6320 
6321     case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6322       if (signed_overflow (value, 21))
6323 	as_bad_where (fixP->fx_file, fixP->fx_line,
6324 		      _("pc-relative address offset out of range"));
6325       if (fixP->fx_done || !seg->use_rela_p)
6326 	{
6327 	  insn = get_aarch64_insn (buf);
6328 	  insn |= encode_adr_imm (value);
6329 	  put_aarch64_insn (buf, insn);
6330 	}
6331       break;
6332 
6333     case BFD_RELOC_AARCH64_BRANCH19:
6334       if (value & 3)
6335 	as_bad_where (fixP->fx_file, fixP->fx_line,
6336 		      _("conditional branch target not word aligned"));
6337       if (signed_overflow (value, 21))
6338 	as_bad_where (fixP->fx_file, fixP->fx_line,
6339 		      _("conditional branch out of range"));
6340       if (fixP->fx_done || !seg->use_rela_p)
6341 	{
6342 	  insn = get_aarch64_insn (buf);
6343 	  insn |= encode_cond_branch_ofs_19 (value >> 2);
6344 	  put_aarch64_insn (buf, insn);
6345 	}
6346       break;
6347 
6348     case BFD_RELOC_AARCH64_TSTBR14:
6349       if (value & 3)
6350 	as_bad_where (fixP->fx_file, fixP->fx_line,
6351 		      _("conditional branch target not word aligned"));
6352       if (signed_overflow (value, 16))
6353 	as_bad_where (fixP->fx_file, fixP->fx_line,
6354 		      _("conditional branch out of range"));
6355       if (fixP->fx_done || !seg->use_rela_p)
6356 	{
6357 	  insn = get_aarch64_insn (buf);
6358 	  insn |= encode_tst_branch_ofs_14 (value >> 2);
6359 	  put_aarch64_insn (buf, insn);
6360 	}
6361       break;
6362 
6363     case BFD_RELOC_AARCH64_JUMP26:
6364     case BFD_RELOC_AARCH64_CALL26:
6365       if (value & 3)
6366 	as_bad_where (fixP->fx_file, fixP->fx_line,
6367 		      _("branch target not word aligned"));
6368       if (signed_overflow (value, 28))
6369 	as_bad_where (fixP->fx_file, fixP->fx_line, _("branch out of range"));
6370       if (fixP->fx_done || !seg->use_rela_p)
6371 	{
6372 	  insn = get_aarch64_insn (buf);
6373 	  insn |= encode_branch_ofs_26 (value >> 2);
6374 	  put_aarch64_insn (buf, insn);
6375 	}
6376       break;
6377 
6378     case BFD_RELOC_AARCH64_MOVW_G0:
6379     case BFD_RELOC_AARCH64_MOVW_G0_S:
6380     case BFD_RELOC_AARCH64_MOVW_G0_NC:
6381       scale = 0;
6382       goto movw_common;
6383     case BFD_RELOC_AARCH64_MOVW_G1:
6384     case BFD_RELOC_AARCH64_MOVW_G1_S:
6385     case BFD_RELOC_AARCH64_MOVW_G1_NC:
6386       scale = 16;
6387       goto movw_common;
6388     case BFD_RELOC_AARCH64_MOVW_G2:
6389     case BFD_RELOC_AARCH64_MOVW_G2_S:
6390     case BFD_RELOC_AARCH64_MOVW_G2_NC:
6391       scale = 32;
6392       goto movw_common;
6393     case BFD_RELOC_AARCH64_MOVW_G3:
6394       scale = 48;
6395     movw_common:
6396       if (fixP->fx_done || !seg->use_rela_p)
6397 	{
6398 	  insn = get_aarch64_insn (buf);
6399 
6400 	  if (!fixP->fx_done)
6401 	    {
6402 	      /* REL signed addend must fit in 16 bits */
6403 	      if (signed_overflow (value, 16))
6404 		as_bad_where (fixP->fx_file, fixP->fx_line,
6405 			      _("offset out of range"));
6406 	    }
6407 	  else
6408 	    {
6409 	      /* Check for overflow and scale. */
6410 	      switch (fixP->fx_r_type)
6411 		{
6412 		case BFD_RELOC_AARCH64_MOVW_G0:
6413 		case BFD_RELOC_AARCH64_MOVW_G1:
6414 		case BFD_RELOC_AARCH64_MOVW_G2:
6415 		case BFD_RELOC_AARCH64_MOVW_G3:
6416 		  if (unsigned_overflow (value, scale + 16))
6417 		    as_bad_where (fixP->fx_file, fixP->fx_line,
6418 				  _("unsigned value out of range"));
6419 		  break;
6420 		case BFD_RELOC_AARCH64_MOVW_G0_S:
6421 		case BFD_RELOC_AARCH64_MOVW_G1_S:
6422 		case BFD_RELOC_AARCH64_MOVW_G2_S:
6423 		  /* NOTE: We can only come here with movz or movn. */
6424 		  if (signed_overflow (value, scale + 16))
6425 		    as_bad_where (fixP->fx_file, fixP->fx_line,
6426 				  _("signed value out of range"));
6427 		  if (value < 0)
6428 		    {
6429 		      /* Force use of MOVN.  */
6430 		      value = ~value;
6431 		      insn = reencode_movzn_to_movn (insn);
6432 		    }
6433 		  else
6434 		    {
6435 		      /* Force use of MOVZ.  */
6436 		      insn = reencode_movzn_to_movz (insn);
6437 		    }
6438 		  break;
6439 		default:
6440 		  /* Unchecked relocations.  */
6441 		  break;
6442 		}
6443 	      value >>= scale;
6444 	    }
6445 
6446 	  /* Insert value into MOVN/MOVZ/MOVK instruction. */
6447 	  insn |= encode_movw_imm (value & 0xffff);
6448 
6449 	  put_aarch64_insn (buf, insn);
6450 	}
6451       break;
6452 
6453     case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6454     case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6455     case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6456     case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6457     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6458     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6459     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6460     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6461     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6462     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6463     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6464     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6465     case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6466     case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6467     case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6468       S_SET_THREAD_LOCAL (fixP->fx_addsy);
6469       /* Should always be exported to object file, see
6470 	 aarch64_force_relocation().  */
6471       gas_assert (!fixP->fx_done);
6472       gas_assert (seg->use_rela_p);
6473       break;
6474 
6475     case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6476     case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6477     case BFD_RELOC_AARCH64_ADD_LO12:
6478     case BFD_RELOC_AARCH64_LDST8_LO12:
6479     case BFD_RELOC_AARCH64_LDST16_LO12:
6480     case BFD_RELOC_AARCH64_LDST32_LO12:
6481     case BFD_RELOC_AARCH64_LDST64_LO12:
6482     case BFD_RELOC_AARCH64_LDST128_LO12:
6483     case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6484     case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6485       /* Should always be exported to object file, see
6486 	 aarch64_force_relocation().  */
6487       gas_assert (!fixP->fx_done);
6488       gas_assert (seg->use_rela_p);
6489       break;
6490 
6491     case BFD_RELOC_AARCH64_TLSDESC_ADD:
6492     case BFD_RELOC_AARCH64_TLSDESC_LDR:
6493     case BFD_RELOC_AARCH64_TLSDESC_CALL:
6494       break;
6495 
6496     default:
6497       as_bad_where (fixP->fx_file, fixP->fx_line,
6498 		    _("unexpected %s fixup"),
6499 		    bfd_get_reloc_code_name (fixP->fx_r_type));
6500       break;
6501     }
6502 
6503 apply_fix_return:
6504   /* Free the allocated the struct aarch64_inst.
6505      N.B. currently there are very limited number of fix-up types actually use
6506      this field, so the impact on the performance should be minimal .  */
6507   if (fixP->tc_fix_data.inst != NULL)
6508     free (fixP->tc_fix_data.inst);
6509 
6510   return;
6511 }
6512 
6513 /* Translate internal representation of relocation info to BFD target
6514    format.  */
6515 
6516 arelent *
6517 tc_gen_reloc (asection * section, fixS * fixp)
6518 {
6519   arelent *reloc;
6520   bfd_reloc_code_real_type code;
6521 
6522   reloc = xmalloc (sizeof (arelent));
6523 
6524   reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6525   *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6526   reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6527 
6528   if (fixp->fx_pcrel)
6529     {
6530       if (section->use_rela_p)
6531 	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6532       else
6533 	fixp->fx_offset = reloc->address;
6534     }
6535   reloc->addend = fixp->fx_offset;
6536 
6537   code = fixp->fx_r_type;
6538   switch (code)
6539     {
6540     case BFD_RELOC_16:
6541       if (fixp->fx_pcrel)
6542 	code = BFD_RELOC_16_PCREL;
6543       break;
6544 
6545     case BFD_RELOC_32:
6546       if (fixp->fx_pcrel)
6547 	code = BFD_RELOC_32_PCREL;
6548       break;
6549 
6550     case BFD_RELOC_64:
6551       if (fixp->fx_pcrel)
6552 	code = BFD_RELOC_64_PCREL;
6553       break;
6554 
6555     default:
6556       break;
6557     }
6558 
6559   reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6560   if (reloc->howto == NULL)
6561     {
6562       as_bad_where (fixp->fx_file, fixp->fx_line,
6563 		    _
6564 		    ("cannot represent %s relocation in this object file format"),
6565 		    bfd_get_reloc_code_name (code));
6566       return NULL;
6567     }
6568 
6569   return reloc;
6570 }
6571 
6572 /* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
6573 
6574 void
6575 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6576 {
6577   bfd_reloc_code_real_type type;
6578   int pcrel = 0;
6579 
6580   /* Pick a reloc.
6581      FIXME: @@ Should look at CPU word size.  */
6582   switch (size)
6583     {
6584     case 1:
6585       type = BFD_RELOC_8;
6586       break;
6587     case 2:
6588       type = BFD_RELOC_16;
6589       break;
6590     case 4:
6591       type = BFD_RELOC_32;
6592       break;
6593     case 8:
6594       type = BFD_RELOC_64;
6595       break;
6596     default:
6597       as_bad (_("cannot do %u-byte relocation"), size);
6598       type = BFD_RELOC_UNUSED;
6599       break;
6600     }
6601 
6602   fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6603 }
6604 
6605 int
6606 aarch64_force_relocation (struct fix *fixp)
6607 {
6608   switch (fixp->fx_r_type)
6609     {
6610     case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6611       /* Perform these "immediate" internal relocations
6612          even if the symbol is extern or weak.  */
6613       return 0;
6614 
6615     case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6616     case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6617     case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6618     case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6619     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6620     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6621     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6622     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6623     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6624     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6625     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6626     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6627     case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6628     case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6629     case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6630     case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6631     case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6632     case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6633     case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6634     case BFD_RELOC_AARCH64_ADD_LO12:
6635     case BFD_RELOC_AARCH64_LDST8_LO12:
6636     case BFD_RELOC_AARCH64_LDST16_LO12:
6637     case BFD_RELOC_AARCH64_LDST32_LO12:
6638     case BFD_RELOC_AARCH64_LDST64_LO12:
6639     case BFD_RELOC_AARCH64_LDST128_LO12:
6640       /* Always leave these relocations for the linker.  */
6641       return 1;
6642 
6643     default:
6644       break;
6645     }
6646 
6647   return generic_force_reloc (fixp);
6648 }
6649 
6650 #ifdef OBJ_ELF
6651 
6652 const char *
6653 elf64_aarch64_target_format (void)
6654 {
6655   if (target_big_endian)
6656     return "elf64-bigaarch64";
6657   else
6658     return "elf64-littleaarch64";
6659 }
6660 
6661 void
6662 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6663 {
6664   elf_frob_symbol (symp, puntp);
6665 }
6666 #endif
6667 
6668 /* MD interface: Finalization.	*/
6669 
6670 /* A good place to do this, although this was probably not intended
6671    for this kind of use.  We need to dump the literal pool before
6672    references are made to a null symbol pointer.  */
6673 
6674 void
6675 aarch64_cleanup (void)
6676 {
6677   literal_pool *pool;
6678 
6679   for (pool = list_of_pools; pool; pool = pool->next)
6680     {
6681       /* Put it at the end of the relevant section.  */
6682       subseg_set (pool->section, pool->sub_section);
6683       s_ltorg (0);
6684     }
6685 }
6686 
6687 #ifdef OBJ_ELF
6688 /* Remove any excess mapping symbols generated for alignment frags in
6689    SEC.  We may have created a mapping symbol before a zero byte
6690    alignment; remove it if there's a mapping symbol after the
6691    alignment.  */
6692 static void
6693 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6694 		       void *dummy ATTRIBUTE_UNUSED)
6695 {
6696   segment_info_type *seginfo = seg_info (sec);
6697   fragS *fragp;
6698 
6699   if (seginfo == NULL || seginfo->frchainP == NULL)
6700     return;
6701 
6702   for (fragp = seginfo->frchainP->frch_root;
6703        fragp != NULL; fragp = fragp->fr_next)
6704     {
6705       symbolS *sym = fragp->tc_frag_data.last_map;
6706       fragS *next = fragp->fr_next;
6707 
6708       /* Variable-sized frags have been converted to fixed size by
6709          this point.  But if this was variable-sized to start with,
6710          there will be a fixed-size frag after it.  So don't handle
6711          next == NULL.  */
6712       if (sym == NULL || next == NULL)
6713 	continue;
6714 
6715       if (S_GET_VALUE (sym) < next->fr_address)
6716 	/* Not at the end of this frag.  */
6717 	continue;
6718       know (S_GET_VALUE (sym) == next->fr_address);
6719 
6720       do
6721 	{
6722 	  if (next->tc_frag_data.first_map != NULL)
6723 	    {
6724 	      /* Next frag starts with a mapping symbol.  Discard this
6725 	         one.  */
6726 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6727 	      break;
6728 	    }
6729 
6730 	  if (next->fr_next == NULL)
6731 	    {
6732 	      /* This mapping symbol is at the end of the section.  Discard
6733 	         it.  */
6734 	      know (next->fr_fix == 0 && next->fr_var == 0);
6735 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6736 	      break;
6737 	    }
6738 
6739 	  /* As long as we have empty frags without any mapping symbols,
6740 	     keep looking.  */
6741 	  /* If the next frag is non-empty and does not start with a
6742 	     mapping symbol, then this mapping symbol is required.  */
6743 	  if (next->fr_address != next->fr_next->fr_address)
6744 	    break;
6745 
6746 	  next = next->fr_next;
6747 	}
6748       while (next != NULL);
6749     }
6750 }
6751 #endif
6752 
6753 /* Adjust the symbol table.  */
6754 
6755 void
6756 aarch64_adjust_symtab (void)
6757 {
6758 #ifdef OBJ_ELF
6759   /* Remove any overlapping mapping symbols generated by alignment frags.  */
6760   bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6761   /* Now do generic ELF adjustments.  */
6762   elf_adjust_symtab ();
6763 #endif
6764 }
6765 
6766 static void
6767 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6768 {
6769   const char *hash_err;
6770 
6771   hash_err = hash_insert (table, key, value);
6772   if (hash_err)
6773     printf ("Internal Error:  Can't hash %s\n", key);
6774 }
6775 
6776 static void
6777 fill_instruction_hash_table (void)
6778 {
6779   aarch64_opcode *opcode = aarch64_opcode_table;
6780 
6781   while (opcode->name != NULL)
6782     {
6783       templates *templ, *new_templ;
6784       templ = hash_find (aarch64_ops_hsh, opcode->name);
6785 
6786       new_templ = (templates *) xmalloc (sizeof (templates));
6787       new_templ->opcode = opcode;
6788       new_templ->next = NULL;
6789 
6790       if (!templ)
6791 	checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6792       else
6793 	{
6794 	  new_templ->next = templ->next;
6795 	  templ->next = new_templ;
6796 	}
6797       ++opcode;
6798     }
6799 }
6800 
6801 static inline void
6802 convert_to_upper (char *dst, const char *src, size_t num)
6803 {
6804   unsigned int i;
6805   for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6806     *dst = TOUPPER (*src);
6807   *dst = '\0';
6808 }
6809 
6810 /* Assume STR point to a lower-case string, allocate, convert and return
6811    the corresponding upper-case string.  */
6812 static inline const char*
6813 get_upper_str (const char *str)
6814 {
6815   char *ret;
6816   size_t len = strlen (str);
6817   if ((ret = xmalloc (len + 1)) == NULL)
6818     abort ();
6819   convert_to_upper (ret, str, len);
6820   return ret;
6821 }
6822 
6823 /* MD interface: Initialization.  */
6824 
6825 void
6826 md_begin (void)
6827 {
6828   unsigned mach;
6829   unsigned int i;
6830 
6831   if ((aarch64_ops_hsh = hash_new ()) == NULL
6832       || (aarch64_cond_hsh = hash_new ()) == NULL
6833       || (aarch64_shift_hsh = hash_new ()) == NULL
6834       || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6835       || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6836       || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6837       || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6838       || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6839       || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6840       || (aarch64_reg_hsh = hash_new ()) == NULL
6841       || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6842       || (aarch64_nzcv_hsh = hash_new ()) == NULL
6843       || (aarch64_pldop_hsh = hash_new ()) == NULL)
6844     as_fatal (_("virtual memory exhausted"));
6845 
6846   fill_instruction_hash_table ();
6847 
6848   for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6849     checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6850 			 (void *) (aarch64_sys_regs + i));
6851 
6852   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6853     checked_hash_insert (aarch64_pstatefield_hsh,
6854 			 aarch64_pstatefields[i].name,
6855 			 (void *) (aarch64_pstatefields + i));
6856 
6857   for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6858     checked_hash_insert (aarch64_sys_regs_ic_hsh,
6859 			 aarch64_sys_regs_ic[i].template,
6860 			 (void *) (aarch64_sys_regs_ic + i));
6861 
6862   for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6863     checked_hash_insert (aarch64_sys_regs_dc_hsh,
6864 			 aarch64_sys_regs_dc[i].template,
6865 			 (void *) (aarch64_sys_regs_dc + i));
6866 
6867   for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6868     checked_hash_insert (aarch64_sys_regs_at_hsh,
6869 			 aarch64_sys_regs_at[i].template,
6870 			 (void *) (aarch64_sys_regs_at + i));
6871 
6872   for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6873     checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
6874 			 aarch64_sys_regs_tlbi[i].template,
6875 			 (void *) (aarch64_sys_regs_tlbi + i));
6876 
6877   for (i = 0; i < ARRAY_SIZE (reg_names); i++)
6878     checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
6879 			 (void *) (reg_names + i));
6880 
6881   for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
6882     checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
6883 			 (void *) (nzcv_names + i));
6884 
6885   for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
6886     {
6887       const char *name = aarch64_operand_modifiers[i].name;
6888       checked_hash_insert (aarch64_shift_hsh, name,
6889 			   (void *) (aarch64_operand_modifiers + i));
6890       /* Also hash the name in the upper case.  */
6891       checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
6892 			   (void *) (aarch64_operand_modifiers + i));
6893     }
6894 
6895   for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
6896     {
6897       unsigned int j;
6898       /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
6899 	 the same condition code.  */
6900       for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
6901 	{
6902 	  const char *name = aarch64_conds[i].names[j];
6903 	  if (name == NULL)
6904 	    break;
6905 	  checked_hash_insert (aarch64_cond_hsh, name,
6906 			       (void *) (aarch64_conds + i));
6907 	  /* Also hash the name in the upper case.  */
6908 	  checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
6909 			       (void *) (aarch64_conds + i));
6910 	}
6911     }
6912 
6913   for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
6914     {
6915       const char *name = aarch64_barrier_options[i].name;
6916       /* Skip xx00 - the unallocated values of option.  */
6917       if ((i & 0x3) == 0)
6918 	continue;
6919       checked_hash_insert (aarch64_barrier_opt_hsh, name,
6920 			   (void *) (aarch64_barrier_options + i));
6921       /* Also hash the name in the upper case.  */
6922       checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
6923 			   (void *) (aarch64_barrier_options + i));
6924     }
6925 
6926   for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
6927     {
6928       const char* name = aarch64_prfops[i].name;
6929       /* Skip the unallocated hint encodings.  */
6930       if (name == NULL)
6931 	continue;
6932       checked_hash_insert (aarch64_pldop_hsh, name,
6933 			   (void *) (aarch64_prfops + i));
6934       /* Also hash the name in the upper case.  */
6935       checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
6936 			   (void *) (aarch64_prfops + i));
6937     }
6938 
6939   /* Set the cpu variant based on the command-line options.  */
6940   if (!mcpu_cpu_opt)
6941     mcpu_cpu_opt = march_cpu_opt;
6942 
6943   if (!mcpu_cpu_opt)
6944     mcpu_cpu_opt = &cpu_default;
6945 
6946   cpu_variant = *mcpu_cpu_opt;
6947 
6948   /* Record the CPU type.  */
6949   mach = bfd_mach_aarch64;
6950 
6951   bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
6952 }
6953 
6954 /* Command line processing.  */
6955 
6956 const char *md_shortopts = "m:";
6957 
6958 #ifdef AARCH64_BI_ENDIAN
6959 #define OPTION_EB (OPTION_MD_BASE + 0)
6960 #define OPTION_EL (OPTION_MD_BASE + 1)
6961 #else
6962 #if TARGET_BYTES_BIG_ENDIAN
6963 #define OPTION_EB (OPTION_MD_BASE + 0)
6964 #else
6965 #define OPTION_EL (OPTION_MD_BASE + 1)
6966 #endif
6967 #endif
6968 
6969 struct option md_longopts[] = {
6970 #ifdef OPTION_EB
6971   {"EB", no_argument, NULL, OPTION_EB},
6972 #endif
6973 #ifdef OPTION_EL
6974   {"EL", no_argument, NULL, OPTION_EL},
6975 #endif
6976   {NULL, no_argument, NULL, 0}
6977 };
6978 
6979 size_t md_longopts_size = sizeof (md_longopts);
6980 
6981 struct aarch64_option_table
6982 {
6983   char *option;			/* Option name to match.  */
6984   char *help;			/* Help information.  */
6985   int *var;			/* Variable to change.  */
6986   int value;			/* What to change it to.  */
6987   char *deprecated;		/* If non-null, print this message.  */
6988 };
6989 
6990 static struct aarch64_option_table aarch64_opts[] = {
6991   {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
6992   {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
6993    NULL},
6994 #ifdef DEBUG_AARCH64
6995   {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
6996 #endif /* DEBUG_AARCH64 */
6997   {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
6998    NULL},
6999   {NULL, NULL, NULL, 0, NULL}
7000 };
7001 
7002 struct aarch64_cpu_option_table
7003 {
7004   char *name;
7005   const aarch64_feature_set value;
7006   /* The canonical name of the CPU, or NULL to use NAME converted to upper
7007      case.  */
7008   const char *canonical_name;
7009 };
7010 
7011 /* This list should, at a minimum, contain all the cpu names
7012    recognized by GCC.  */
7013 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7014   {"all", AARCH64_ANY, NULL},
7015   {"generic", AARCH64_ARCH_V8, NULL},
7016 
7017   /* These two are example CPUs supported in GCC, once we have real
7018      CPUs they will be removed.  */
7019   {"example-1",	AARCH64_ARCH_V8, NULL},
7020   {"example-2",	AARCH64_ARCH_V8, NULL},
7021 
7022   {NULL, AARCH64_ARCH_NONE, NULL}
7023 };
7024 
7025 struct aarch64_arch_option_table
7026 {
7027   char *name;
7028   const aarch64_feature_set value;
7029 };
7030 
7031 /* This list should, at a minimum, contain all the architecture names
7032    recognized by GCC.  */
7033 static const struct aarch64_arch_option_table aarch64_archs[] = {
7034   {"all", AARCH64_ANY},
7035   {"armv8-a", AARCH64_ARCH_V8},
7036   {NULL, AARCH64_ARCH_NONE}
7037 };
7038 
7039 /* ISA extensions.  */
7040 struct aarch64_option_cpu_value_table
7041 {
7042   char *name;
7043   const aarch64_feature_set value;
7044 };
7045 
7046 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7047   {"crypto",		AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7048   {"fp",		AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7049   {"simd",		AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7050   {NULL,		AARCH64_ARCH_NONE}
7051 };
7052 
7053 struct aarch64_long_option_table
7054 {
7055   char *option;			/* Substring to match.  */
7056   char *help;			/* Help information.  */
7057   int (*func) (char *subopt);	/* Function to decode sub-option.  */
7058   char *deprecated;		/* If non-null, print this message.  */
7059 };
7060 
7061 static int
7062 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7063 {
7064   /* We insist on extensions being added before being removed.  We achieve
7065      this by using the ADDING_VALUE variable to indicate whether we are
7066      adding an extension (1) or removing it (0) and only allowing it to
7067      change in the order -1 -> 1 -> 0.  */
7068   int adding_value = -1;
7069   aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7070 
7071   /* Copy the feature set, so that we can modify it.  */
7072   *ext_set = **opt_p;
7073   *opt_p = ext_set;
7074 
7075   while (str != NULL && *str != 0)
7076     {
7077       const struct aarch64_option_cpu_value_table *opt;
7078       char *ext;
7079       int optlen;
7080 
7081       if (*str != '+')
7082 	{
7083 	  as_bad (_("invalid architectural extension"));
7084 	  return 0;
7085 	}
7086 
7087       str++;
7088       ext = strchr (str, '+');
7089 
7090       if (ext != NULL)
7091 	optlen = ext - str;
7092       else
7093 	optlen = strlen (str);
7094 
7095       if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7096 	{
7097 	  if (adding_value != 0)
7098 	    adding_value = 0;
7099 	  optlen -= 2;
7100 	  str += 2;
7101 	}
7102       else if (optlen > 0)
7103 	{
7104 	  if (adding_value == -1)
7105 	    adding_value = 1;
7106 	  else if (adding_value != 1)
7107 	    {
7108 	      as_bad (_("must specify extensions to add before specifying "
7109 			"those to remove"));
7110 	      return FALSE;
7111 	    }
7112 	}
7113 
7114       if (optlen == 0)
7115 	{
7116 	  as_bad (_("missing architectural extension"));
7117 	  return 0;
7118 	}
7119 
7120       gas_assert (adding_value != -1);
7121 
7122       for (opt = aarch64_features; opt->name != NULL; opt++)
7123 	if (strncmp (opt->name, str, optlen) == 0)
7124 	  {
7125 	    /* Add or remove the extension.  */
7126 	    if (adding_value)
7127 	      AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7128 	    else
7129 	      AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7130 	    break;
7131 	  }
7132 
7133       if (opt->name == NULL)
7134 	{
7135 	  as_bad (_("unknown architectural extension `%s'"), str);
7136 	  return 0;
7137 	}
7138 
7139       str = ext;
7140     };
7141 
7142   return 1;
7143 }
7144 
7145 static int
7146 aarch64_parse_cpu (char *str)
7147 {
7148   const struct aarch64_cpu_option_table *opt;
7149   char *ext = strchr (str, '+');
7150   size_t optlen;
7151 
7152   if (ext != NULL)
7153     optlen = ext - str;
7154   else
7155     optlen = strlen (str);
7156 
7157   if (optlen == 0)
7158     {
7159       as_bad (_("missing cpu name `%s'"), str);
7160       return 0;
7161     }
7162 
7163   for (opt = aarch64_cpus; opt->name != NULL; opt++)
7164     if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7165       {
7166 	mcpu_cpu_opt = &opt->value;
7167 	if (ext != NULL)
7168 	  return aarch64_parse_features (ext, &mcpu_cpu_opt);
7169 
7170 	return 1;
7171       }
7172 
7173   as_bad (_("unknown cpu `%s'"), str);
7174   return 0;
7175 }
7176 
7177 static int
7178 aarch64_parse_arch (char *str)
7179 {
7180   const struct aarch64_arch_option_table *opt;
7181   char *ext = strchr (str, '+');
7182   size_t optlen;
7183 
7184   if (ext != NULL)
7185     optlen = ext - str;
7186   else
7187     optlen = strlen (str);
7188 
7189   if (optlen == 0)
7190     {
7191       as_bad (_("missing architecture name `%s'"), str);
7192       return 0;
7193     }
7194 
7195   for (opt = aarch64_archs; opt->name != NULL; opt++)
7196     if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7197       {
7198 	march_cpu_opt = &opt->value;
7199 	if (ext != NULL)
7200 	  return aarch64_parse_features (ext, &march_cpu_opt);
7201 
7202 	return 1;
7203       }
7204 
7205   as_bad (_("unknown architecture `%s'\n"), str);
7206   return 0;
7207 }
7208 
7209 static struct aarch64_long_option_table aarch64_long_opts[] = {
7210   {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
7211    aarch64_parse_cpu, NULL},
7212   {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
7213    aarch64_parse_arch, NULL},
7214   {NULL, NULL, 0, NULL}
7215 };
7216 
7217 int
7218 md_parse_option (int c, char *arg)
7219 {
7220   struct aarch64_option_table *opt;
7221   struct aarch64_long_option_table *lopt;
7222 
7223   switch (c)
7224     {
7225 #ifdef OPTION_EB
7226     case OPTION_EB:
7227       target_big_endian = 1;
7228       break;
7229 #endif
7230 
7231 #ifdef OPTION_EL
7232     case OPTION_EL:
7233       target_big_endian = 0;
7234       break;
7235 #endif
7236 
7237     case 'a':
7238       /* Listing option.  Just ignore these, we don't support additional
7239          ones.  */
7240       return 0;
7241 
7242     default:
7243       for (opt = aarch64_opts; opt->option != NULL; opt++)
7244 	{
7245 	  if (c == opt->option[0]
7246 	      && ((arg == NULL && opt->option[1] == 0)
7247 		  || streq (arg, opt->option + 1)))
7248 	    {
7249 	      /* If the option is deprecated, tell the user.  */
7250 	      if (opt->deprecated != NULL)
7251 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7252 			   arg ? arg : "", _(opt->deprecated));
7253 
7254 	      if (opt->var != NULL)
7255 		*opt->var = opt->value;
7256 
7257 	      return 1;
7258 	    }
7259 	}
7260 
7261       for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7262 	{
7263 	  /* These options are expected to have an argument.  */
7264 	  if (c == lopt->option[0]
7265 	      && arg != NULL
7266 	      && strncmp (arg, lopt->option + 1,
7267 			  strlen (lopt->option + 1)) == 0)
7268 	    {
7269 	      /* If the option is deprecated, tell the user.  */
7270 	      if (lopt->deprecated != NULL)
7271 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7272 			   _(lopt->deprecated));
7273 
7274 	      /* Call the sup-option parser.  */
7275 	      return lopt->func (arg + strlen (lopt->option) - 1);
7276 	    }
7277 	}
7278 
7279       return 0;
7280     }
7281 
7282   return 1;
7283 }
7284 
7285 void
7286 md_show_usage (FILE * fp)
7287 {
7288   struct aarch64_option_table *opt;
7289   struct aarch64_long_option_table *lopt;
7290 
7291   fprintf (fp, _(" AArch64-specific assembler options:\n"));
7292 
7293   for (opt = aarch64_opts; opt->option != NULL; opt++)
7294     if (opt->help != NULL)
7295       fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
7296 
7297   for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7298     if (lopt->help != NULL)
7299       fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
7300 
7301 #ifdef OPTION_EB
7302   fprintf (fp, _("\
7303   -EB                     assemble code for a big-endian cpu\n"));
7304 #endif
7305 
7306 #ifdef OPTION_EL
7307   fprintf (fp, _("\
7308   -EL                     assemble code for a little-endian cpu\n"));
7309 #endif
7310 }
7311 
7312 /* Parse a .cpu directive.  */
7313 
7314 static void
7315 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7316 {
7317   const struct aarch64_cpu_option_table *opt;
7318   char saved_char;
7319   char *name;
7320   char *ext;
7321   size_t optlen;
7322 
7323   name = input_line_pointer;
7324   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7325     input_line_pointer++;
7326   saved_char = *input_line_pointer;
7327   *input_line_pointer = 0;
7328 
7329   ext = strchr (name, '+');
7330 
7331   if (ext != NULL)
7332     optlen = ext - name;
7333   else
7334     optlen = strlen (name);
7335 
7336   /* Skip the first "all" entry.  */
7337   for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7338     if (strlen (opt->name) == optlen
7339 	&& strncmp (name, opt->name, optlen) == 0)
7340       {
7341 	mcpu_cpu_opt = &opt->value;
7342 	if (ext != NULL)
7343 	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7344 	    return;
7345 
7346 	cpu_variant = *mcpu_cpu_opt;
7347 
7348 	*input_line_pointer = saved_char;
7349 	demand_empty_rest_of_line ();
7350 	return;
7351       }
7352   as_bad (_("unknown cpu `%s'"), name);
7353   *input_line_pointer = saved_char;
7354   ignore_rest_of_line ();
7355 }
7356 
7357 
7358 /* Parse a .arch directive.  */
7359 
7360 static void
7361 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7362 {
7363   const struct aarch64_arch_option_table *opt;
7364   char saved_char;
7365   char *name;
7366   char *ext;
7367   size_t optlen;
7368 
7369   name = input_line_pointer;
7370   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7371     input_line_pointer++;
7372   saved_char = *input_line_pointer;
7373   *input_line_pointer = 0;
7374 
7375   ext = strchr (name, '+');
7376 
7377   if (ext != NULL)
7378     optlen = ext - name;
7379   else
7380     optlen = strlen (name);
7381 
7382   /* Skip the first "all" entry.  */
7383   for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7384     if (strlen (opt->name) == optlen
7385 	&& strncmp (name, opt->name, optlen) == 0)
7386       {
7387 	mcpu_cpu_opt = &opt->value;
7388 	if (ext != NULL)
7389 	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7390 	    return;
7391 
7392 	cpu_variant = *mcpu_cpu_opt;
7393 
7394 	*input_line_pointer = saved_char;
7395 	demand_empty_rest_of_line ();
7396 	return;
7397       }
7398 
7399   as_bad (_("unknown architecture `%s'\n"), name);
7400   *input_line_pointer = saved_char;
7401   ignore_rest_of_line ();
7402 }
7403 
7404 /* Copy symbol information.  */
7405 
7406 void
7407 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7408 {
7409   AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7410 }
7411