xref: /netbsd-src/external/gpl3/binutils/dist/gas/config/tc-arm.c (revision 49d8c9ecf4abd21261269266ef64939f71b3cd09)
1 /* tc-arm.c -- Assemble for the ARM
2    Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3    2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
4    Free Software Foundation, Inc.
5    Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 	Modified by David Taylor (dtaylor@armltd.co.uk)
7 	Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 	Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10 
11    This file is part of GAS, the GNU Assembler.
12 
13    GAS is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 3, or (at your option)
16    any later version.
17 
18    GAS is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
21    GNU General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with GAS; see the file COPYING.  If not, write to the Free
25    Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26    02110-1301, USA.  */
27 
28 #include "as.h"
29 #include <limits.h>
30 #include <stdarg.h>
31 #define	 NO_RELOC 0
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35 #include "libiberty.h"
36 #include "opcode/arm.h"
37 
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42 
43 #include "dwarf2dbg.h"
44 
45 #ifdef OBJ_ELF
46 /* Must be at least the size of the largest unwind opcode (currently two).  */
47 #define ARM_OPCODE_CHUNK_SIZE 8
48 
49 /* This structure holds the unwinding state.  */
50 
51 static struct
52 {
53   symbolS *	  proc_start;
54   symbolS *	  table_entry;
55   symbolS *	  personality_routine;
56   int		  personality_index;
57   /* The segment containing the function.  */
58   segT		  saved_seg;
59   subsegT	  saved_subseg;
60   /* Opcodes generated from this function.  */
61   unsigned char * opcodes;
62   int		  opcode_count;
63   int		  opcode_alloc;
64   /* The number of bytes pushed to the stack.  */
65   offsetT	  frame_size;
66   /* We don't add stack adjustment opcodes immediately so that we can merge
67      multiple adjustments.  We can also omit the final adjustment
68      when using a frame pointer.  */
69   offsetT	  pending_offset;
70   /* These two fields are set by both unwind_movsp and unwind_setfp.  They
71      hold the reg+offset to use when restoring sp from a frame pointer.	 */
72   offsetT	  fp_offset;
73   int		  fp_reg;
74   /* Nonzero if an unwind_setfp directive has been seen.  */
75   unsigned	  fp_used:1;
76   /* Nonzero if the last opcode restores sp from fp_reg.  */
77   unsigned	  sp_restored:1;
78 } unwind;
79 
80 #endif /* OBJ_ELF */
81 
82 /* Results from operand parsing worker functions.  */
83 
84 typedef enum
85 {
86   PARSE_OPERAND_SUCCESS,
87   PARSE_OPERAND_FAIL,
88   PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result;
90 
91 enum arm_float_abi
92 {
93   ARM_FLOAT_ABI_HARD,
94   ARM_FLOAT_ABI_SOFTFP,
95   ARM_FLOAT_ABI_SOFT
96 };
97 
98 /* Types of processor to assemble for.	*/
99 #ifndef CPU_DEFAULT
100 /* The code that was here used to select a default CPU depending on compiler
101    pre-defines which were only present when doing native builds, thus
102    changing gas' default behaviour depending upon the build host.
103 
104    If you have a target that requires a default CPU option then the you
105    should define CPU_DEFAULT here.  */
106 #endif
107 
108 #ifndef FPU_DEFAULT
109 # ifdef TE_LINUX
110 #  define FPU_DEFAULT FPU_ARCH_FPA
111 # elif defined (TE_NetBSD)
112 #  ifdef OBJ_ELF
113 #   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
114 #  else
115     /* Legacy a.out format.  */
116 #   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
117 #  endif
118 # elif defined (TE_VXWORKS)
119 #  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
120 # else
121    /* For backwards compatibility, default to FPA.  */
122 #  define FPU_DEFAULT FPU_ARCH_FPA
123 # endif
124 #endif /* ifndef FPU_DEFAULT */
125 
126 #define streq(a, b)	      (strcmp (a, b) == 0)
127 
128 static arm_feature_set cpu_variant;
129 static arm_feature_set arm_arch_used;
130 static arm_feature_set thumb_arch_used;
131 
132 /* Flags stored in private area of BFD structure.  */
133 static int uses_apcs_26	     = FALSE;
134 static int atpcs	     = FALSE;
135 static int support_interwork = FALSE;
136 static int uses_apcs_float   = FALSE;
137 static int pic_code	     = FALSE;
138 static int fix_v4bx	     = FALSE;
139 /* Warn on using deprecated features.  */
140 static int warn_on_deprecated = TRUE;
141 
142 
143 /* Variables that we set while parsing command-line options.  Once all
144    options have been read we re-process these values to set the real
145    assembly flags.  */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148 
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155 
156 /* Constants for known architecture features.  */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166 
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170 
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180   ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
199 static const arm_feature_set arm_ext_m =
200   ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
206 
207 static const arm_feature_set arm_arch_any = ARM_ANY;
208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
209 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
210 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
211 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
212 
213 static const arm_feature_set arm_cext_iwmmxt2 =
214   ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
215 static const arm_feature_set arm_cext_iwmmxt =
216   ARM_FEATURE (0, ARM_CEXT_IWMMXT);
217 static const arm_feature_set arm_cext_xscale =
218   ARM_FEATURE (0, ARM_CEXT_XSCALE);
219 static const arm_feature_set arm_cext_maverick =
220   ARM_FEATURE (0, ARM_CEXT_MAVERICK);
221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
223 static const arm_feature_set fpu_vfp_ext_v1xd =
224   ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
229 static const arm_feature_set fpu_vfp_ext_d32 =
230   ARM_FEATURE (0, FPU_VFP_EXT_D32);
231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
232 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
233   ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
237 static const arm_feature_set fpu_vfp_ext_armv8 =
238   ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
239 static const arm_feature_set fpu_neon_ext_armv8 =
240   ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
241 static const arm_feature_set fpu_crypto_ext_armv8 =
242   ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
243 
244 static int mfloat_abi_opt = -1;
245 /* Record user cpu selection for object attributes.  */
246 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
247 /* Must be long enough to hold any of the names in arm_cpus.  */
248 static char selected_cpu_name[16];
249 
250 /* Return if no cpu was selected on command-line.  */
251 static bfd_boolean
252 no_cpu_selected (void)
253 {
254   return selected_cpu.core == arm_arch_none.core
255     && selected_cpu.coproc == arm_arch_none.coproc;
256 }
257 
258 #ifdef OBJ_ELF
259 # ifdef EABI_DEFAULT
260 static int meabi_flags = EABI_DEFAULT;
261 # else
262 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
263 # endif
264 
265 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
266 
267 bfd_boolean
268 arm_is_eabi (void)
269 {
270   return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
271 }
272 #endif
273 
274 #ifdef OBJ_ELF
275 /* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
276 symbolS * GOT_symbol;
277 #endif
278 
279 /* 0: assemble for ARM,
280    1: assemble for Thumb,
281    2: assemble for Thumb even though target CPU does not support thumb
282       instructions.  */
283 static int thumb_mode = 0;
284 /* A value distinct from the possible values for thumb_mode that we
285    can use to record whether thumb_mode has been copied into the
286    tc_frag_data field of a frag.  */
287 #define MODE_RECORDED (1 << 4)
288 
289 /* Specifies the intrinsic IT insn behavior mode.  */
290 enum implicit_it_mode
291 {
292   IMPLICIT_IT_MODE_NEVER  = 0x00,
293   IMPLICIT_IT_MODE_ARM    = 0x01,
294   IMPLICIT_IT_MODE_THUMB  = 0x02,
295   IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
296 };
297 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
298 
299 /* If unified_syntax is true, we are processing the new unified
300    ARM/Thumb syntax.  Important differences from the old ARM mode:
301 
302      - Immediate operands do not require a # prefix.
303      - Conditional affixes always appear at the end of the
304        instruction.  (For backward compatibility, those instructions
305        that formerly had them in the middle, continue to accept them
306        there.)
307      - The IT instruction may appear, and if it does is validated
308        against subsequent conditional affixes.  It does not generate
309        machine code.
310 
311    Important differences from the old Thumb mode:
312 
313      - Immediate operands do not require a # prefix.
314      - Most of the V6T2 instructions are only available in unified mode.
315      - The .N and .W suffixes are recognized and honored (it is an error
316        if they cannot be honored).
317      - All instructions set the flags if and only if they have an 's' affix.
318      - Conditional affixes may be used.  They are validated against
319        preceding IT instructions.  Unlike ARM mode, you cannot use a
320        conditional affix except in the scope of an IT instruction.  */
321 
322 static bfd_boolean unified_syntax = FALSE;
323 
324 /* An immediate operand can start with #, and ld*, st*, pld operands
325    can contain [ and ].  We need to tell APP not to elide whitespace
326    before a [, which can appear as the first operand for pld.  */
327 const char arm_symbol_chars[] = "#[]";
328 
329 enum neon_el_type
330 {
331   NT_invtype,
332   NT_untyped,
333   NT_integer,
334   NT_float,
335   NT_poly,
336   NT_signed,
337   NT_unsigned
338 };
339 
340 struct neon_type_el
341 {
342   enum neon_el_type type;
343   unsigned size;
344 };
345 
346 #define NEON_MAX_TYPE_ELS 4
347 
348 struct neon_type
349 {
350   struct neon_type_el el[NEON_MAX_TYPE_ELS];
351   unsigned elems;
352 };
353 
354 enum it_instruction_type
355 {
356    OUTSIDE_IT_INSN,
357    INSIDE_IT_INSN,
358    INSIDE_IT_LAST_INSN,
359    IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
360                               if inside, should be the last one.  */
361    NEUTRAL_IT_INSN,        /* This could be either inside or outside,
362                               i.e. BKPT and NOP.  */
363    IT_INSN                 /* The IT insn has been parsed.  */
364 };
365 
366 /* The maximum number of operands we need.  */
367 #define ARM_IT_MAX_OPERANDS 6
368 
369 struct arm_it
370 {
371   const char *	error;
372   unsigned long instruction;
373   int		size;
374   int		size_req;
375   int		cond;
376   /* "uncond_value" is set to the value in place of the conditional field in
377      unconditional versions of the instruction, or -1 if nothing is
378      appropriate.  */
379   int		uncond_value;
380   struct neon_type vectype;
381   /* This does not indicate an actual NEON instruction, only that
382      the mnemonic accepts neon-style type suffixes.  */
383   int		is_neon;
384   /* Set to the opcode if the instruction needs relaxation.
385      Zero if the instruction is not relaxed.  */
386   unsigned long	relax;
387   struct
388   {
389     bfd_reloc_code_real_type type;
390     expressionS		     exp;
391     int			     pc_rel;
392   } reloc;
393 
394   enum it_instruction_type it_insn_type;
395 
396   struct
397   {
398     unsigned reg;
399     signed int imm;
400     struct neon_type_el vectype;
401     unsigned present	: 1;  /* Operand present.  */
402     unsigned isreg	: 1;  /* Operand was a register.  */
403     unsigned immisreg	: 1;  /* .imm field is a second register.  */
404     unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
405     unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
406     unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
407     /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
408        instructions. This allows us to disambiguate ARM <-> vector insns.  */
409     unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
410     unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
411     unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
412     unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
413     unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
414     unsigned writeback	: 1;  /* Operand has trailing !  */
415     unsigned preind	: 1;  /* Preindexed address.  */
416     unsigned postind	: 1;  /* Postindexed address.  */
417     unsigned negative	: 1;  /* Index register was negated.  */
418     unsigned shifted	: 1;  /* Shift applied to operation.  */
419     unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
420   } operands[ARM_IT_MAX_OPERANDS];
421 };
422 
423 static struct arm_it inst;
424 
425 #define NUM_FLOAT_VALS 8
426 
427 const char * fp_const[] =
428 {
429   "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
430 };
431 
432 /* Number of littlenums required to hold an extended precision number.	*/
433 #define MAX_LITTLENUMS 6
434 
435 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
436 
437 #define FAIL	(-1)
438 #define SUCCESS (0)
439 
440 #define SUFF_S 1
441 #define SUFF_D 2
442 #define SUFF_E 3
443 #define SUFF_P 4
444 
445 #define CP_T_X	 0x00008000
446 #define CP_T_Y	 0x00400000
447 
448 #define CONDS_BIT	 0x00100000
449 #define LOAD_BIT	 0x00100000
450 
451 #define DOUBLE_LOAD_FLAG 0x00000001
452 
453 struct asm_cond
454 {
455   const char *	 template_name;
456   unsigned long  value;
457 };
458 
459 #define COND_ALWAYS 0xE
460 
461 struct asm_psr
462 {
463   const char *   template_name;
464   unsigned long  field;
465 };
466 
467 struct asm_barrier_opt
468 {
469   const char *    template_name;
470   unsigned long   value;
471   const arm_feature_set arch;
472 };
473 
474 /* The bit that distinguishes CPSR and SPSR.  */
475 #define SPSR_BIT   (1 << 22)
476 
477 /* The individual PSR flag bits.  */
478 #define PSR_c	(1 << 16)
479 #define PSR_x	(1 << 17)
480 #define PSR_s	(1 << 18)
481 #define PSR_f	(1 << 19)
482 
483 struct reloc_entry
484 {
485   char *                    name;
486   bfd_reloc_code_real_type  reloc;
487 };
488 
489 enum vfp_reg_pos
490 {
491   VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
492   VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
493 };
494 
495 enum vfp_ldstm_type
496 {
497   VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
498 };
499 
500 /* Bits for DEFINED field in neon_typed_alias.  */
501 #define NTA_HASTYPE  1
502 #define NTA_HASINDEX 2
503 
504 struct neon_typed_alias
505 {
506   unsigned char        defined;
507   unsigned char        index;
508   struct neon_type_el  eltype;
509 };
510 
511 /* ARM register categories.  This includes coprocessor numbers and various
512    architecture extensions' registers.	*/
513 enum arm_reg_type
514 {
515   REG_TYPE_RN,
516   REG_TYPE_CP,
517   REG_TYPE_CN,
518   REG_TYPE_FN,
519   REG_TYPE_VFS,
520   REG_TYPE_VFD,
521   REG_TYPE_NQ,
522   REG_TYPE_VFSD,
523   REG_TYPE_NDQ,
524   REG_TYPE_NSDQ,
525   REG_TYPE_VFC,
526   REG_TYPE_MVF,
527   REG_TYPE_MVD,
528   REG_TYPE_MVFX,
529   REG_TYPE_MVDX,
530   REG_TYPE_MVAX,
531   REG_TYPE_DSPSC,
532   REG_TYPE_MMXWR,
533   REG_TYPE_MMXWC,
534   REG_TYPE_MMXWCG,
535   REG_TYPE_XSCALE,
536   REG_TYPE_RNB
537 };
538 
539 /* Structure for a hash table entry for a register.
540    If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
541    information which states whether a vector type or index is specified (for a
542    register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
543 struct reg_entry
544 {
545   const char *               name;
546   unsigned int               number;
547   unsigned char              type;
548   unsigned char              builtin;
549   struct neon_typed_alias *  neon;
550 };
551 
552 /* Diagnostics used when we don't get a register of the expected type.	*/
553 const char * const reg_expected_msgs[] =
554 {
555   N_("ARM register expected"),
556   N_("bad or missing co-processor number"),
557   N_("co-processor register expected"),
558   N_("FPA register expected"),
559   N_("VFP single precision register expected"),
560   N_("VFP/Neon double precision register expected"),
561   N_("Neon quad precision register expected"),
562   N_("VFP single or double precision register expected"),
563   N_("Neon double or quad precision register expected"),
564   N_("VFP single, double or Neon quad precision register expected"),
565   N_("VFP system register expected"),
566   N_("Maverick MVF register expected"),
567   N_("Maverick MVD register expected"),
568   N_("Maverick MVFX register expected"),
569   N_("Maverick MVDX register expected"),
570   N_("Maverick MVAX register expected"),
571   N_("Maverick DSPSC register expected"),
572   N_("iWMMXt data register expected"),
573   N_("iWMMXt control register expected"),
574   N_("iWMMXt scalar register expected"),
575   N_("XScale accumulator register expected"),
576 };
577 
578 /* Some well known registers that we refer to directly elsewhere.  */
579 #define REG_R12	12
580 #define REG_SP	13
581 #define REG_LR	14
582 #define REG_PC	15
583 
584 /* ARM instructions take 4bytes in the object file, Thumb instructions
585    take 2:  */
586 #define INSN_SIZE	4
587 
588 struct asm_opcode
589 {
590   /* Basic string to match.  */
591   const char * template_name;
592 
593   /* Parameters to instruction.	 */
594   unsigned int operands[8];
595 
596   /* Conditional tag - see opcode_lookup.  */
597   unsigned int tag : 4;
598 
599   /* Basic instruction code.  */
600   unsigned int avalue : 28;
601 
602   /* Thumb-format instruction code.  */
603   unsigned int tvalue;
604 
605   /* Which architecture variant provides this instruction.  */
606   const arm_feature_set * avariant;
607   const arm_feature_set * tvariant;
608 
609   /* Function to call to encode instruction in ARM format.  */
610   void (* aencode) (void);
611 
612   /* Function to call to encode instruction in Thumb format.  */
613   void (* tencode) (void);
614 };
615 
616 /* Defines for various bits that we will want to toggle.  */
617 #define INST_IMMEDIATE	0x02000000
618 #define OFFSET_REG	0x02000000
619 #define HWOFFSET_IMM	0x00400000
620 #define SHIFT_BY_REG	0x00000010
621 #define PRE_INDEX	0x01000000
622 #define INDEX_UP	0x00800000
623 #define WRITE_BACK	0x00200000
624 #define LDM_TYPE_2_OR_3	0x00400000
625 #define CPSI_MMOD	0x00020000
626 
627 #define LITERAL_MASK	0xf000f000
628 #define OPCODE_MASK	0xfe1fffff
629 #define V4_STR_BIT	0x00000020
630 
631 #define T2_SUBS_PC_LR	0xf3de8f00
632 
633 #define DATA_OP_SHIFT	21
634 
635 #define T2_OPCODE_MASK	0xfe1fffff
636 #define T2_DATA_OP_SHIFT 21
637 
638 #define A_COND_MASK         0xf0000000
639 #define A_PUSH_POP_OP_MASK  0x0fff0000
640 
641 /* Opcodes for pushing/poping registers to/from the stack.  */
642 #define A1_OPCODE_PUSH    0x092d0000
643 #define A2_OPCODE_PUSH    0x052d0004
644 #define A2_OPCODE_POP     0x049d0004
645 
646 /* Codes to distinguish the arithmetic instructions.  */
647 #define OPCODE_AND	0
648 #define OPCODE_EOR	1
649 #define OPCODE_SUB	2
650 #define OPCODE_RSB	3
651 #define OPCODE_ADD	4
652 #define OPCODE_ADC	5
653 #define OPCODE_SBC	6
654 #define OPCODE_RSC	7
655 #define OPCODE_TST	8
656 #define OPCODE_TEQ	9
657 #define OPCODE_CMP	10
658 #define OPCODE_CMN	11
659 #define OPCODE_ORR	12
660 #define OPCODE_MOV	13
661 #define OPCODE_BIC	14
662 #define OPCODE_MVN	15
663 
664 #define T2_OPCODE_AND	0
665 #define T2_OPCODE_BIC	1
666 #define T2_OPCODE_ORR	2
667 #define T2_OPCODE_ORN	3
668 #define T2_OPCODE_EOR	4
669 #define T2_OPCODE_ADD	8
670 #define T2_OPCODE_ADC	10
671 #define T2_OPCODE_SBC	11
672 #define T2_OPCODE_SUB	13
673 #define T2_OPCODE_RSB	14
674 
675 #define T_OPCODE_MUL 0x4340
676 #define T_OPCODE_TST 0x4200
677 #define T_OPCODE_CMN 0x42c0
678 #define T_OPCODE_NEG 0x4240
679 #define T_OPCODE_MVN 0x43c0
680 
681 #define T_OPCODE_ADD_R3	0x1800
682 #define T_OPCODE_SUB_R3 0x1a00
683 #define T_OPCODE_ADD_HI 0x4400
684 #define T_OPCODE_ADD_ST 0xb000
685 #define T_OPCODE_SUB_ST 0xb080
686 #define T_OPCODE_ADD_SP 0xa800
687 #define T_OPCODE_ADD_PC 0xa000
688 #define T_OPCODE_ADD_I8 0x3000
689 #define T_OPCODE_SUB_I8 0x3800
690 #define T_OPCODE_ADD_I3 0x1c00
691 #define T_OPCODE_SUB_I3 0x1e00
692 
693 #define T_OPCODE_ASR_R	0x4100
694 #define T_OPCODE_LSL_R	0x4080
695 #define T_OPCODE_LSR_R	0x40c0
696 #define T_OPCODE_ROR_R	0x41c0
697 #define T_OPCODE_ASR_I	0x1000
698 #define T_OPCODE_LSL_I	0x0000
699 #define T_OPCODE_LSR_I	0x0800
700 
701 #define T_OPCODE_MOV_I8	0x2000
702 #define T_OPCODE_CMP_I8 0x2800
703 #define T_OPCODE_CMP_LR 0x4280
704 #define T_OPCODE_MOV_HR 0x4600
705 #define T_OPCODE_CMP_HR 0x4500
706 
707 #define T_OPCODE_LDR_PC 0x4800
708 #define T_OPCODE_LDR_SP 0x9800
709 #define T_OPCODE_STR_SP 0x9000
710 #define T_OPCODE_LDR_IW 0x6800
711 #define T_OPCODE_STR_IW 0x6000
712 #define T_OPCODE_LDR_IH 0x8800
713 #define T_OPCODE_STR_IH 0x8000
714 #define T_OPCODE_LDR_IB 0x7800
715 #define T_OPCODE_STR_IB 0x7000
716 #define T_OPCODE_LDR_RW 0x5800
717 #define T_OPCODE_STR_RW 0x5000
718 #define T_OPCODE_LDR_RH 0x5a00
719 #define T_OPCODE_STR_RH 0x5200
720 #define T_OPCODE_LDR_RB 0x5c00
721 #define T_OPCODE_STR_RB 0x5400
722 
723 #define T_OPCODE_PUSH	0xb400
724 #define T_OPCODE_POP	0xbc00
725 
726 #define T_OPCODE_BRANCH 0xe000
727 
728 #define THUMB_SIZE	2	/* Size of thumb instruction.  */
729 #define THUMB_PP_PC_LR 0x0100
730 #define THUMB_LOAD_BIT 0x0800
731 #define THUMB2_LOAD_BIT 0x00100000
732 
733 #define BAD_ARGS	_("bad arguments to instruction")
734 #define BAD_SP          _("r13 not allowed here")
735 #define BAD_PC		_("r15 not allowed here")
736 #define BAD_COND	_("instruction cannot be conditional")
737 #define BAD_OVERLAP	_("registers may not be the same")
738 #define BAD_HIREG	_("lo register required")
739 #define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
740 #define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
741 #define BAD_BRANCH	_("branch must be last instruction in IT block")
742 #define BAD_NOT_IT	_("instruction not allowed in IT block")
743 #define BAD_FPU		_("selected FPU does not support instruction")
744 #define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
745 #define BAD_IT_COND	_("incorrect condition in IT block")
746 #define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
747 #define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
748 #define BAD_PC_ADDRESSING \
749 	_("cannot use register index with PC-relative addressing")
750 #define BAD_PC_WRITEBACK \
751 	_("cannot use writeback with PC-relative addressing")
752 #define BAD_RANGE     _("branch out of range")
753 
754 static struct hash_control * arm_ops_hsh;
755 static struct hash_control * arm_cond_hsh;
756 static struct hash_control * arm_shift_hsh;
757 static struct hash_control * arm_psr_hsh;
758 static struct hash_control * arm_v7m_psr_hsh;
759 static struct hash_control * arm_reg_hsh;
760 static struct hash_control * arm_reloc_hsh;
761 static struct hash_control * arm_barrier_opt_hsh;
762 
763 /* Stuff needed to resolve the label ambiguity
764    As:
765      ...
766      label:   <insn>
767    may differ from:
768      ...
769      label:
770 	      <insn>  */
771 
772 symbolS *  last_label_seen;
773 static int label_is_thumb_function_name = FALSE;
774 
775 /* Literal pool structure.  Held on a per-section
776    and per-sub-section basis.  */
777 
778 #define MAX_LITERAL_POOL_SIZE 1024
779 typedef struct literal_pool
780 {
781   expressionS	         literals [MAX_LITERAL_POOL_SIZE];
782   unsigned int	         next_free_entry;
783   unsigned int	         id;
784   symbolS *	         symbol;
785   segT		         section;
786   subsegT	         sub_section;
787 #ifdef OBJ_ELF
788   struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
789 #endif
790   struct literal_pool *  next;
791 } literal_pool;
792 
793 /* Pointer to a linked list of literal pools.  */
794 literal_pool * list_of_pools = NULL;
795 
796 #ifdef OBJ_ELF
797 #  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
798 #else
799 static struct current_it now_it;
800 #endif
801 
802 static inline int
803 now_it_compatible (int cond)
804 {
805   return (cond & ~1) == (now_it.cc & ~1);
806 }
807 
808 static inline int
809 conditional_insn (void)
810 {
811   return inst.cond != COND_ALWAYS;
812 }
813 
814 static int in_it_block (void);
815 
816 static int handle_it_state (void);
817 
818 static void force_automatic_it_block_close (void);
819 
820 static void it_fsm_post_encode (void);
821 
822 #define set_it_insn_type(type)			\
823   do						\
824     {						\
825       inst.it_insn_type = type;			\
826       if (handle_it_state () == FAIL)		\
827         return;					\
828     }						\
829   while (0)
830 
831 #define set_it_insn_type_nonvoid(type, failret) \
832   do						\
833     {                                           \
834       inst.it_insn_type = type;			\
835       if (handle_it_state () == FAIL)		\
836         return failret;				\
837     }						\
838   while(0)
839 
840 #define set_it_insn_type_last()				\
841   do							\
842     {							\
843       if (inst.cond == COND_ALWAYS)			\
844         set_it_insn_type (IF_INSIDE_IT_LAST_INSN);	\
845       else						\
846         set_it_insn_type (INSIDE_IT_LAST_INSN);		\
847     }							\
848   while (0)
849 
850 /* Pure syntax.	 */
851 
852 /* This array holds the chars that always start a comment.  If the
853    pre-processor is disabled, these aren't very useful.	 */
854 const char comment_chars[] = "@";
855 
856 /* This array holds the chars that only start a comment at the beginning of
857    a line.  If the line seems to have the form '# 123 filename'
858    .line and .file directives will appear in the pre-processed output.	*/
859 /* Note that input_file.c hand checks for '#' at the beginning of the
860    first line of the input file.  This is because the compiler outputs
861    #NO_APP at the beginning of its output.  */
862 /* Also note that comments like this one will always work.  */
863 const char line_comment_chars[] = "#";
864 
865 const char line_separator_chars[] = ";";
866 
867 /* Chars that can be used to separate mant
868    from exp in floating point numbers.	*/
869 const char EXP_CHARS[] = "eE";
870 
871 /* Chars that mean this number is a floating point constant.  */
872 /* As in 0f12.456  */
873 /* or	 0d1.2345e12  */
874 
875 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
876 
877 /* Prefix characters that indicate the start of an immediate
878    value.  */
879 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
880 
881 /* Separator character handling.  */
882 
883 #define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
884 
885 static inline int
886 skip_past_char (char ** str, char c)
887 {
888   if (**str == c)
889     {
890       (*str)++;
891       return SUCCESS;
892     }
893   else
894     return FAIL;
895 }
896 
897 #define skip_past_comma(str) skip_past_char (str, ',')
898 
899 /* Arithmetic expressions (possibly involving symbols).	 */
900 
901 /* Return TRUE if anything in the expression is a bignum.  */
902 
903 static int
904 walk_no_bignums (symbolS * sp)
905 {
906   if (symbol_get_value_expression (sp)->X_op == O_big)
907     return 1;
908 
909   if (symbol_get_value_expression (sp)->X_add_symbol)
910     {
911       return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
912 	      || (symbol_get_value_expression (sp)->X_op_symbol
913 		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
914     }
915 
916   return 0;
917 }
918 
919 static int in_my_get_expression = 0;
920 
921 /* Third argument to my_get_expression.	 */
922 #define GE_NO_PREFIX 0
923 #define GE_IMM_PREFIX 1
924 #define GE_OPT_PREFIX 2
925 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
926    immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
927 #define GE_OPT_PREFIX_BIG 3
928 
929 static int
930 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
931 {
932   char * save_in;
933   segT	 seg;
934 
935   /* In unified syntax, all prefixes are optional.  */
936   if (unified_syntax)
937     prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
938                   : GE_OPT_PREFIX;
939 
940   switch (prefix_mode)
941     {
942     case GE_NO_PREFIX: break;
943     case GE_IMM_PREFIX:
944       if (!is_immediate_prefix (**str))
945 	{
946 	  inst.error = _("immediate expression requires a # prefix");
947 	  return FAIL;
948 	}
949       (*str)++;
950       break;
951     case GE_OPT_PREFIX:
952     case GE_OPT_PREFIX_BIG:
953       if (is_immediate_prefix (**str))
954 	(*str)++;
955       break;
956     default: abort ();
957     }
958 
959   memset (ep, 0, sizeof (expressionS));
960 
961   save_in = input_line_pointer;
962   input_line_pointer = *str;
963   in_my_get_expression = 1;
964   seg = expression (ep);
965   in_my_get_expression = 0;
966 
967   if (ep->X_op == O_illegal || ep->X_op == O_absent)
968     {
969       /* We found a bad or missing expression in md_operand().  */
970       *str = input_line_pointer;
971       input_line_pointer = save_in;
972       if (inst.error == NULL)
973 	inst.error = (ep->X_op == O_absent
974 		      ? _("missing expression") :_("bad expression"));
975       return 1;
976     }
977 
978 #ifdef OBJ_AOUT
979   if (seg != absolute_section
980       && seg != text_section
981       && seg != data_section
982       && seg != bss_section
983       && seg != undefined_section)
984     {
985       inst.error = _("bad segment");
986       *str = input_line_pointer;
987       input_line_pointer = save_in;
988       return 1;
989     }
990 #else
991   (void) seg;
992 #endif
993 
994   /* Get rid of any bignums now, so that we don't generate an error for which
995      we can't establish a line number later on.	 Big numbers are never valid
996      in instructions, which is where this routine is always called.  */
997   if (prefix_mode != GE_OPT_PREFIX_BIG
998       && (ep->X_op == O_big
999           || (ep->X_add_symbol
1000 	      && (walk_no_bignums (ep->X_add_symbol)
1001 	          || (ep->X_op_symbol
1002 		      && walk_no_bignums (ep->X_op_symbol))))))
1003     {
1004       inst.error = _("invalid constant");
1005       *str = input_line_pointer;
1006       input_line_pointer = save_in;
1007       return 1;
1008     }
1009 
1010   *str = input_line_pointer;
1011   input_line_pointer = save_in;
1012   return 0;
1013 }
1014 
1015 /* Turn a string in input_line_pointer into a floating point constant
1016    of type TYPE, and store the appropriate bytes in *LITP.  The number
1017    of LITTLENUMS emitted is stored in *SIZEP.  An error message is
1018    returned, or NULL on OK.
1019 
1020    Note that fp constants aren't represent in the normal way on the ARM.
1021    In big endian mode, things are as expected.	However, in little endian
1022    mode fp constants are big-endian word-wise, and little-endian byte-wise
1023    within the words.  For example, (double) 1.1 in big endian mode is
1024    the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1025    the byte sequence 99 99 f1 3f 9a 99 99 99.
1026 
1027    ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1028 
1029 char *
1030 md_atof (int type, char * litP, int * sizeP)
1031 {
1032   int prec;
1033   LITTLENUM_TYPE words[MAX_LITTLENUMS];
1034   char *t;
1035   int i;
1036 
1037   switch (type)
1038     {
1039     case 'f':
1040     case 'F':
1041     case 's':
1042     case 'S':
1043       prec = 2;
1044       break;
1045 
1046     case 'd':
1047     case 'D':
1048     case 'r':
1049     case 'R':
1050       prec = 4;
1051       break;
1052 
1053     case 'x':
1054     case 'X':
1055       prec = 5;
1056       break;
1057 
1058     case 'p':
1059     case 'P':
1060       prec = 5;
1061       break;
1062 
1063     default:
1064       *sizeP = 0;
1065       return _("Unrecognized or unsupported floating point constant");
1066     }
1067 
1068   t = atof_ieee (input_line_pointer, type, words);
1069   if (t)
1070     input_line_pointer = t;
1071   *sizeP = prec * sizeof (LITTLENUM_TYPE);
1072 
1073   if (target_big_endian)
1074     {
1075       for (i = 0; i < prec; i++)
1076 	{
1077 	  md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1078 	  litP += sizeof (LITTLENUM_TYPE);
1079 	}
1080     }
1081   else
1082     {
1083       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1084 	for (i = prec - 1; i >= 0; i--)
1085 	  {
1086 	    md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1087 	    litP += sizeof (LITTLENUM_TYPE);
1088 	  }
1089       else
1090 	/* For a 4 byte float the order of elements in `words' is 1 0.
1091 	   For an 8 byte float the order is 1 0 3 2.  */
1092 	for (i = 0; i < prec; i += 2)
1093 	  {
1094 	    md_number_to_chars (litP, (valueT) words[i + 1],
1095 				sizeof (LITTLENUM_TYPE));
1096 	    md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1097 				(valueT) words[i], sizeof (LITTLENUM_TYPE));
1098 	    litP += 2 * sizeof (LITTLENUM_TYPE);
1099 	  }
1100     }
1101 
1102   return NULL;
1103 }
1104 
1105 /* We handle all bad expressions here, so that we can report the faulty
1106    instruction in the error message.  */
1107 void
1108 md_operand (expressionS * exp)
1109 {
1110   if (in_my_get_expression)
1111     exp->X_op = O_illegal;
1112 }
1113 
1114 /* Immediate values.  */
1115 
1116 /* Generic immediate-value read function for use in directives.
1117    Accepts anything that 'expression' can fold to a constant.
1118    *val receives the number.  */
1119 #ifdef OBJ_ELF
1120 static int
1121 immediate_for_directive (int *val)
1122 {
1123   expressionS exp;
1124   exp.X_op = O_illegal;
1125 
1126   if (is_immediate_prefix (*input_line_pointer))
1127     {
1128       input_line_pointer++;
1129       expression (&exp);
1130     }
1131 
1132   if (exp.X_op != O_constant)
1133     {
1134       as_bad (_("expected #constant"));
1135       ignore_rest_of_line ();
1136       return FAIL;
1137     }
1138   *val = exp.X_add_number;
1139   return SUCCESS;
1140 }
1141 #endif
1142 
1143 /* Register parsing.  */
1144 
1145 /* Generic register parser.  CCP points to what should be the
1146    beginning of a register name.  If it is indeed a valid register
1147    name, advance CCP over it and return the reg_entry structure;
1148    otherwise return NULL.  Does not issue diagnostics.	*/
1149 
1150 static struct reg_entry *
1151 arm_reg_parse_multi (char **ccp)
1152 {
1153   char *start = *ccp;
1154   char *p;
1155   struct reg_entry *reg;
1156 
1157 #ifdef REGISTER_PREFIX
1158   if (*start != REGISTER_PREFIX)
1159     return NULL;
1160   start++;
1161 #endif
1162 #ifdef OPTIONAL_REGISTER_PREFIX
1163   if (*start == OPTIONAL_REGISTER_PREFIX)
1164     start++;
1165 #endif
1166 
1167   p = start;
1168   if (!ISALPHA (*p) || !is_name_beginner (*p))
1169     return NULL;
1170 
1171   do
1172     p++;
1173   while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1174 
1175   reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1176 
1177   if (!reg)
1178     return NULL;
1179 
1180   *ccp = p;
1181   return reg;
1182 }
1183 
1184 static int
1185 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1186                     enum arm_reg_type type)
1187 {
1188   /* Alternative syntaxes are accepted for a few register classes.  */
1189   switch (type)
1190     {
1191     case REG_TYPE_MVF:
1192     case REG_TYPE_MVD:
1193     case REG_TYPE_MVFX:
1194     case REG_TYPE_MVDX:
1195       /* Generic coprocessor register names are allowed for these.  */
1196       if (reg && reg->type == REG_TYPE_CN)
1197 	return reg->number;
1198       break;
1199 
1200     case REG_TYPE_CP:
1201       /* For backward compatibility, a bare number is valid here.  */
1202       {
1203 	unsigned long processor = strtoul (start, ccp, 10);
1204 	if (*ccp != start && processor <= 15)
1205 	  return processor;
1206       }
1207 
1208     case REG_TYPE_MMXWC:
1209       /* WC includes WCG.  ??? I'm not sure this is true for all
1210 	 instructions that take WC registers.  */
1211       if (reg && reg->type == REG_TYPE_MMXWCG)
1212 	return reg->number;
1213       break;
1214 
1215     default:
1216       break;
1217     }
1218 
1219   return FAIL;
1220 }
1221 
1222 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1223    return value is the register number or FAIL.  */
1224 
1225 static int
1226 arm_reg_parse (char **ccp, enum arm_reg_type type)
1227 {
1228   char *start = *ccp;
1229   struct reg_entry *reg = arm_reg_parse_multi (ccp);
1230   int ret;
1231 
1232   /* Do not allow a scalar (reg+index) to parse as a register.  */
1233   if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1234     return FAIL;
1235 
1236   if (reg && reg->type == type)
1237     return reg->number;
1238 
1239   if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1240     return ret;
1241 
1242   *ccp = start;
1243   return FAIL;
1244 }
1245 
1246 /* Parse a Neon type specifier. *STR should point at the leading '.'
1247    character. Does no verification at this stage that the type fits the opcode
1248    properly. E.g.,
1249 
1250      .i32.i32.s16
1251      .s32.f32
1252      .u16
1253 
1254    Can all be legally parsed by this function.
1255 
1256    Fills in neon_type struct pointer with parsed information, and updates STR
1257    to point after the parsed type specifier. Returns SUCCESS if this was a legal
1258    type, FAIL if not.  */
1259 
1260 static int
1261 parse_neon_type (struct neon_type *type, char **str)
1262 {
1263   char *ptr = *str;
1264 
1265   if (type)
1266     type->elems = 0;
1267 
1268   while (type->elems < NEON_MAX_TYPE_ELS)
1269     {
1270       enum neon_el_type thistype = NT_untyped;
1271       unsigned thissize = -1u;
1272 
1273       if (*ptr != '.')
1274 	break;
1275 
1276       ptr++;
1277 
1278       /* Just a size without an explicit type.  */
1279       if (ISDIGIT (*ptr))
1280 	goto parsesize;
1281 
1282       switch (TOLOWER (*ptr))
1283 	{
1284 	case 'i': thistype = NT_integer; break;
1285 	case 'f': thistype = NT_float; break;
1286 	case 'p': thistype = NT_poly; break;
1287 	case 's': thistype = NT_signed; break;
1288 	case 'u': thistype = NT_unsigned; break;
1289         case 'd':
1290           thistype = NT_float;
1291           thissize = 64;
1292           ptr++;
1293           goto done;
1294 	default:
1295 	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1296 	  return FAIL;
1297 	}
1298 
1299       ptr++;
1300 
1301       /* .f is an abbreviation for .f32.  */
1302       if (thistype == NT_float && !ISDIGIT (*ptr))
1303 	thissize = 32;
1304       else
1305 	{
1306 	parsesize:
1307 	  thissize = strtoul (ptr, &ptr, 10);
1308 
1309 	  if (thissize != 8 && thissize != 16 && thissize != 32
1310               && thissize != 64)
1311             {
1312               as_bad (_("bad size %d in type specifier"), thissize);
1313 	      return FAIL;
1314 	    }
1315 	}
1316 
1317       done:
1318       if (type)
1319         {
1320           type->el[type->elems].type = thistype;
1321 	  type->el[type->elems].size = thissize;
1322 	  type->elems++;
1323 	}
1324     }
1325 
1326   /* Empty/missing type is not a successful parse.  */
1327   if (type->elems == 0)
1328     return FAIL;
1329 
1330   *str = ptr;
1331 
1332   return SUCCESS;
1333 }
1334 
1335 /* Errors may be set multiple times during parsing or bit encoding
1336    (particularly in the Neon bits), but usually the earliest error which is set
1337    will be the most meaningful. Avoid overwriting it with later (cascading)
1338    errors by calling this function.  */
1339 
1340 static void
1341 first_error (const char *err)
1342 {
1343   if (!inst.error)
1344     inst.error = err;
1345 }
1346 
1347 /* Parse a single type, e.g. ".s32", leading period included.  */
1348 static int
1349 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1350 {
1351   char *str = *ccp;
1352   struct neon_type optype;
1353 
1354   if (*str == '.')
1355     {
1356       if (parse_neon_type (&optype, &str) == SUCCESS)
1357         {
1358           if (optype.elems == 1)
1359             *vectype = optype.el[0];
1360           else
1361             {
1362               first_error (_("only one type should be specified for operand"));
1363               return FAIL;
1364             }
1365         }
1366       else
1367         {
1368           first_error (_("vector type expected"));
1369           return FAIL;
1370         }
1371     }
1372   else
1373     return FAIL;
1374 
1375   *ccp = str;
1376 
1377   return SUCCESS;
1378 }
1379 
1380 /* Special meanings for indices (which have a range of 0-7), which will fit into
1381    a 4-bit integer.  */
1382 
1383 #define NEON_ALL_LANES		15
1384 #define NEON_INTERLEAVE_LANES	14
1385 
1386 /* Parse either a register or a scalar, with an optional type. Return the
1387    register number, and optionally fill in the actual type of the register
1388    when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1389    type/index information in *TYPEINFO.  */
1390 
1391 static int
1392 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1393                            enum arm_reg_type *rtype,
1394                            struct neon_typed_alias *typeinfo)
1395 {
1396   char *str = *ccp;
1397   struct reg_entry *reg = arm_reg_parse_multi (&str);
1398   struct neon_typed_alias atype;
1399   struct neon_type_el parsetype;
1400 
1401   atype.defined = 0;
1402   atype.index = -1;
1403   atype.eltype.type = NT_invtype;
1404   atype.eltype.size = -1;
1405 
1406   /* Try alternate syntax for some types of register. Note these are mutually
1407      exclusive with the Neon syntax extensions.  */
1408   if (reg == NULL)
1409     {
1410       int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1411       if (altreg != FAIL)
1412         *ccp = str;
1413       if (typeinfo)
1414         *typeinfo = atype;
1415       return altreg;
1416     }
1417 
1418   /* Undo polymorphism when a set of register types may be accepted.  */
1419   if ((type == REG_TYPE_NDQ
1420        && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1421       || (type == REG_TYPE_VFSD
1422           && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1423       || (type == REG_TYPE_NSDQ
1424           && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1425               || reg->type == REG_TYPE_NQ))
1426       || (type == REG_TYPE_MMXWC
1427 	  && (reg->type == REG_TYPE_MMXWCG)))
1428     type = (enum arm_reg_type) reg->type;
1429 
1430   if (type != reg->type)
1431     return FAIL;
1432 
1433   if (reg->neon)
1434     atype = *reg->neon;
1435 
1436   if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1437     {
1438       if ((atype.defined & NTA_HASTYPE) != 0)
1439         {
1440           first_error (_("can't redefine type for operand"));
1441           return FAIL;
1442         }
1443       atype.defined |= NTA_HASTYPE;
1444       atype.eltype = parsetype;
1445     }
1446 
1447   if (skip_past_char (&str, '[') == SUCCESS)
1448     {
1449       if (type != REG_TYPE_VFD)
1450         {
1451           first_error (_("only D registers may be indexed"));
1452           return FAIL;
1453         }
1454 
1455       if ((atype.defined & NTA_HASINDEX) != 0)
1456         {
1457           first_error (_("can't change index for operand"));
1458           return FAIL;
1459         }
1460 
1461       atype.defined |= NTA_HASINDEX;
1462 
1463       if (skip_past_char (&str, ']') == SUCCESS)
1464         atype.index = NEON_ALL_LANES;
1465       else
1466         {
1467           expressionS exp;
1468 
1469           my_get_expression (&exp, &str, GE_NO_PREFIX);
1470 
1471           if (exp.X_op != O_constant)
1472             {
1473               first_error (_("constant expression required"));
1474               return FAIL;
1475             }
1476 
1477           if (skip_past_char (&str, ']') == FAIL)
1478             return FAIL;
1479 
1480           atype.index = exp.X_add_number;
1481         }
1482     }
1483 
1484   if (typeinfo)
1485     *typeinfo = atype;
1486 
1487   if (rtype)
1488     *rtype = type;
1489 
1490   *ccp = str;
1491 
1492   return reg->number;
1493 }
1494 
1495 /* Like arm_reg_parse, but allow allow the following extra features:
1496     - If RTYPE is non-zero, return the (possibly restricted) type of the
1497       register (e.g. Neon double or quad reg when either has been requested).
1498     - If this is a Neon vector type with additional type information, fill
1499       in the struct pointed to by VECTYPE (if non-NULL).
1500    This function will fault on encountering a scalar.  */
1501 
1502 static int
1503 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1504                      enum arm_reg_type *rtype, struct neon_type_el *vectype)
1505 {
1506   struct neon_typed_alias atype;
1507   char *str = *ccp;
1508   int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1509 
1510   if (reg == FAIL)
1511     return FAIL;
1512 
1513   /* Do not allow regname(... to parse as a register.  */
1514   if (*str == '(')
1515     return FAIL;
1516 
1517   /* Do not allow a scalar (reg+index) to parse as a register.  */
1518   if ((atype.defined & NTA_HASINDEX) != 0)
1519     {
1520       first_error (_("register operand expected, but got scalar"));
1521       return FAIL;
1522     }
1523 
1524   if (vectype)
1525     *vectype = atype.eltype;
1526 
1527   *ccp = str;
1528 
1529   return reg;
1530 }
1531 
1532 #define NEON_SCALAR_REG(X)	((X) >> 4)
1533 #define NEON_SCALAR_INDEX(X)	((X) & 15)
1534 
1535 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1536    have enough information to be able to do a good job bounds-checking. So, we
1537    just do easy checks here, and do further checks later.  */
1538 
1539 static int
1540 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1541 {
1542   int reg;
1543   char *str = *ccp;
1544   struct neon_typed_alias atype;
1545 
1546   reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1547 
1548   if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1549     return FAIL;
1550 
1551   if (atype.index == NEON_ALL_LANES)
1552     {
1553       first_error (_("scalar must have an index"));
1554       return FAIL;
1555     }
1556   else if (atype.index >= 64 / elsize)
1557     {
1558       first_error (_("scalar index out of range"));
1559       return FAIL;
1560     }
1561 
1562   if (type)
1563     *type = atype.eltype;
1564 
1565   *ccp = str;
1566 
1567   return reg * 16 + atype.index;
1568 }
1569 
1570 /* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1571 
1572 static long
1573 parse_reg_list (char ** strp)
1574 {
1575   char * str = * strp;
1576   long	 range = 0;
1577   int	 another_range;
1578 
1579   /* We come back here if we get ranges concatenated by '+' or '|'.  */
1580   do
1581     {
1582       another_range = 0;
1583 
1584       if (*str == '{')
1585 	{
1586 	  int in_range = 0;
1587 	  int cur_reg = -1;
1588 
1589 	  str++;
1590 	  do
1591 	    {
1592 	      int reg;
1593 
1594 	      if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1595 		{
1596 		  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1597 		  return FAIL;
1598 		}
1599 
1600 	      if (in_range)
1601 		{
1602 		  int i;
1603 
1604 		  if (reg <= cur_reg)
1605 		    {
1606 		      first_error (_("bad range in register list"));
1607 		      return FAIL;
1608 		    }
1609 
1610 		  for (i = cur_reg + 1; i < reg; i++)
1611 		    {
1612 		      if (range & (1 << i))
1613 			as_tsktsk
1614 			  (_("Warning: duplicated register (r%d) in register list"),
1615 			   i);
1616 		      else
1617 			range |= 1 << i;
1618 		    }
1619 		  in_range = 0;
1620 		}
1621 
1622 	      if (range & (1 << reg))
1623 		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1624 			   reg);
1625 	      else if (reg <= cur_reg)
1626 		as_tsktsk (_("Warning: register range not in ascending order"));
1627 
1628 	      range |= 1 << reg;
1629 	      cur_reg = reg;
1630 	    }
1631 	  while (skip_past_comma (&str) != FAIL
1632 		 || (in_range = 1, *str++ == '-'));
1633 	  str--;
1634 
1635 	  if (*str++ != '}')
1636 	    {
1637 	      first_error (_("missing `}'"));
1638 	      return FAIL;
1639 	    }
1640 	}
1641       else
1642 	{
1643 	  expressionS exp;
1644 
1645 	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1646 	    return FAIL;
1647 
1648 	  if (exp.X_op == O_constant)
1649 	    {
1650 	      if (exp.X_add_number
1651 		  != (exp.X_add_number & 0x0000ffff))
1652 		{
1653 		  inst.error = _("invalid register mask");
1654 		  return FAIL;
1655 		}
1656 
1657 	      if ((range & exp.X_add_number) != 0)
1658 		{
1659 		  int regno = range & exp.X_add_number;
1660 
1661 		  regno &= -regno;
1662 		  regno = (1 << regno) - 1;
1663 		  as_tsktsk
1664 		    (_("Warning: duplicated register (r%d) in register list"),
1665 		     regno);
1666 		}
1667 
1668 	      range |= exp.X_add_number;
1669 	    }
1670 	  else
1671 	    {
1672 	      if (inst.reloc.type != 0)
1673 		{
1674 		  inst.error = _("expression too complex");
1675 		  return FAIL;
1676 		}
1677 
1678 	      memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1679 	      inst.reloc.type = BFD_RELOC_ARM_MULTI;
1680 	      inst.reloc.pc_rel = 0;
1681 	    }
1682 	}
1683 
1684       if (*str == '|' || *str == '+')
1685 	{
1686 	  str++;
1687 	  another_range = 1;
1688 	}
1689     }
1690   while (another_range);
1691 
1692   *strp = str;
1693   return range;
1694 }
1695 
1696 /* Types of registers in a list.  */
1697 
1698 enum reg_list_els
1699 {
1700   REGLIST_VFP_S,
1701   REGLIST_VFP_D,
1702   REGLIST_NEON_D
1703 };
1704 
1705 /* Parse a VFP register list.  If the string is invalid return FAIL.
1706    Otherwise return the number of registers, and set PBASE to the first
1707    register.  Parses registers of type ETYPE.
1708    If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1709      - Q registers can be used to specify pairs of D registers
1710      - { } can be omitted from around a singleton register list
1711          FIXME: This is not implemented, as it would require backtracking in
1712          some cases, e.g.:
1713            vtbl.8 d3,d4,d5
1714          This could be done (the meaning isn't really ambiguous), but doesn't
1715          fit in well with the current parsing framework.
1716      - 32 D registers may be used (also true for VFPv3).
1717    FIXME: Types are ignored in these register lists, which is probably a
1718    bug.  */
1719 
1720 static int
1721 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1722 {
1723   char *str = *ccp;
1724   int base_reg;
1725   int new_base;
1726   enum arm_reg_type regtype = (enum arm_reg_type) 0;
1727   int max_regs = 0;
1728   int count = 0;
1729   int warned = 0;
1730   unsigned long mask = 0;
1731   int i;
1732 
1733   if (*str != '{')
1734     {
1735       inst.error = _("expecting {");
1736       return FAIL;
1737     }
1738 
1739   str++;
1740 
1741   switch (etype)
1742     {
1743     case REGLIST_VFP_S:
1744       regtype = REG_TYPE_VFS;
1745       max_regs = 32;
1746       break;
1747 
1748     case REGLIST_VFP_D:
1749       regtype = REG_TYPE_VFD;
1750       break;
1751 
1752     case REGLIST_NEON_D:
1753       regtype = REG_TYPE_NDQ;
1754       break;
1755     }
1756 
1757   if (etype != REGLIST_VFP_S)
1758     {
1759       /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
1760       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1761         {
1762           max_regs = 32;
1763           if (thumb_mode)
1764             ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1765                                     fpu_vfp_ext_d32);
1766           else
1767             ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1768                                     fpu_vfp_ext_d32);
1769         }
1770       else
1771         max_regs = 16;
1772     }
1773 
1774   base_reg = max_regs;
1775 
1776   do
1777     {
1778       int setmask = 1, addregs = 1;
1779 
1780       new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1781 
1782       if (new_base == FAIL)
1783 	{
1784 	  first_error (_(reg_expected_msgs[regtype]));
1785 	  return FAIL;
1786 	}
1787 
1788       if (new_base >= max_regs)
1789         {
1790           first_error (_("register out of range in list"));
1791           return FAIL;
1792         }
1793 
1794       /* Note: a value of 2 * n is returned for the register Q<n>.  */
1795       if (regtype == REG_TYPE_NQ)
1796         {
1797           setmask = 3;
1798           addregs = 2;
1799         }
1800 
1801       if (new_base < base_reg)
1802 	base_reg = new_base;
1803 
1804       if (mask & (setmask << new_base))
1805 	{
1806 	  first_error (_("invalid register list"));
1807 	  return FAIL;
1808 	}
1809 
1810       if ((mask >> new_base) != 0 && ! warned)
1811 	{
1812 	  as_tsktsk (_("register list not in ascending order"));
1813 	  warned = 1;
1814 	}
1815 
1816       mask |= setmask << new_base;
1817       count += addregs;
1818 
1819       if (*str == '-') /* We have the start of a range expression */
1820 	{
1821 	  int high_range;
1822 
1823 	  str++;
1824 
1825 	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1826               == FAIL)
1827 	    {
1828 	      inst.error = gettext (reg_expected_msgs[regtype]);
1829 	      return FAIL;
1830 	    }
1831 
1832           if (high_range >= max_regs)
1833             {
1834               first_error (_("register out of range in list"));
1835               return FAIL;
1836             }
1837 
1838           if (regtype == REG_TYPE_NQ)
1839             high_range = high_range + 1;
1840 
1841 	  if (high_range <= new_base)
1842 	    {
1843 	      inst.error = _("register range not in ascending order");
1844 	      return FAIL;
1845 	    }
1846 
1847 	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
1848 	    {
1849 	      if (mask & (setmask << new_base))
1850 		{
1851 		  inst.error = _("invalid register list");
1852 		  return FAIL;
1853 		}
1854 
1855 	      mask |= setmask << new_base;
1856 	      count += addregs;
1857 	    }
1858 	}
1859     }
1860   while (skip_past_comma (&str) != FAIL);
1861 
1862   str++;
1863 
1864   /* Sanity check -- should have raised a parse error above.  */
1865   if (count == 0 || count > max_regs)
1866     abort ();
1867 
1868   *pbase = base_reg;
1869 
1870   /* Final test -- the registers must be consecutive.  */
1871   mask >>= base_reg;
1872   for (i = 0; i < count; i++)
1873     {
1874       if ((mask & (1u << i)) == 0)
1875 	{
1876 	  inst.error = _("non-contiguous register range");
1877 	  return FAIL;
1878 	}
1879     }
1880 
1881   *ccp = str;
1882 
1883   return count;
1884 }
1885 
1886 /* True if two alias types are the same.  */
1887 
1888 static bfd_boolean
1889 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1890 {
1891   if (!a && !b)
1892     return TRUE;
1893 
1894   if (!a || !b)
1895     return FALSE;
1896 
1897   if (a->defined != b->defined)
1898     return FALSE;
1899 
1900   if ((a->defined & NTA_HASTYPE) != 0
1901       && (a->eltype.type != b->eltype.type
1902           || a->eltype.size != b->eltype.size))
1903     return FALSE;
1904 
1905   if ((a->defined & NTA_HASINDEX) != 0
1906       && (a->index != b->index))
1907     return FALSE;
1908 
1909   return TRUE;
1910 }
1911 
1912 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1913    The base register is put in *PBASE.
1914    The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1915    the return value.
1916    The register stride (minus one) is put in bit 4 of the return value.
1917    Bits [6:5] encode the list length (minus one).
1918    The type of the list elements is put in *ELTYPE, if non-NULL.  */
1919 
1920 #define NEON_LANE(X)		((X) & 0xf)
1921 #define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
1922 #define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
1923 
1924 static int
1925 parse_neon_el_struct_list (char **str, unsigned *pbase,
1926                            struct neon_type_el *eltype)
1927 {
1928   char *ptr = *str;
1929   int base_reg = -1;
1930   int reg_incr = -1;
1931   int count = 0;
1932   int lane = -1;
1933   int leading_brace = 0;
1934   enum arm_reg_type rtype = REG_TYPE_NDQ;
1935   const char *const incr_error = _("register stride must be 1 or 2");
1936   const char *const type_error = _("mismatched element/structure types in list");
1937   struct neon_typed_alias firsttype;
1938 
1939   if (skip_past_char (&ptr, '{') == SUCCESS)
1940     leading_brace = 1;
1941 
1942   do
1943     {
1944       struct neon_typed_alias atype;
1945       int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1946 
1947       if (getreg == FAIL)
1948         {
1949           first_error (_(reg_expected_msgs[rtype]));
1950           return FAIL;
1951         }
1952 
1953       if (base_reg == -1)
1954         {
1955           base_reg = getreg;
1956           if (rtype == REG_TYPE_NQ)
1957             {
1958               reg_incr = 1;
1959             }
1960           firsttype = atype;
1961         }
1962       else if (reg_incr == -1)
1963         {
1964           reg_incr = getreg - base_reg;
1965           if (reg_incr < 1 || reg_incr > 2)
1966             {
1967               first_error (_(incr_error));
1968               return FAIL;
1969             }
1970         }
1971       else if (getreg != base_reg + reg_incr * count)
1972         {
1973           first_error (_(incr_error));
1974           return FAIL;
1975         }
1976 
1977       if (! neon_alias_types_same (&atype, &firsttype))
1978         {
1979           first_error (_(type_error));
1980           return FAIL;
1981         }
1982 
1983       /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1984          modes.  */
1985       if (ptr[0] == '-')
1986         {
1987           struct neon_typed_alias htype;
1988           int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1989           if (lane == -1)
1990             lane = NEON_INTERLEAVE_LANES;
1991           else if (lane != NEON_INTERLEAVE_LANES)
1992             {
1993               first_error (_(type_error));
1994               return FAIL;
1995             }
1996           if (reg_incr == -1)
1997             reg_incr = 1;
1998           else if (reg_incr != 1)
1999             {
2000               first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2001               return FAIL;
2002             }
2003           ptr++;
2004           hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2005           if (hireg == FAIL)
2006             {
2007               first_error (_(reg_expected_msgs[rtype]));
2008               return FAIL;
2009             }
2010           if (! neon_alias_types_same (&htype, &firsttype))
2011             {
2012               first_error (_(type_error));
2013               return FAIL;
2014             }
2015           count += hireg + dregs - getreg;
2016           continue;
2017         }
2018 
2019       /* If we're using Q registers, we can't use [] or [n] syntax.  */
2020       if (rtype == REG_TYPE_NQ)
2021         {
2022           count += 2;
2023           continue;
2024         }
2025 
2026       if ((atype.defined & NTA_HASINDEX) != 0)
2027         {
2028           if (lane == -1)
2029             lane = atype.index;
2030           else if (lane != atype.index)
2031             {
2032               first_error (_(type_error));
2033               return FAIL;
2034             }
2035         }
2036       else if (lane == -1)
2037         lane = NEON_INTERLEAVE_LANES;
2038       else if (lane != NEON_INTERLEAVE_LANES)
2039         {
2040           first_error (_(type_error));
2041           return FAIL;
2042         }
2043       count++;
2044     }
2045   while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2046 
2047   /* No lane set by [x]. We must be interleaving structures.  */
2048   if (lane == -1)
2049     lane = NEON_INTERLEAVE_LANES;
2050 
2051   /* Sanity check.  */
2052   if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2053       || (count > 1 && reg_incr == -1))
2054     {
2055       first_error (_("error parsing element/structure list"));
2056       return FAIL;
2057     }
2058 
2059   if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2060     {
2061       first_error (_("expected }"));
2062       return FAIL;
2063     }
2064 
2065   if (reg_incr == -1)
2066     reg_incr = 1;
2067 
2068   if (eltype)
2069     *eltype = firsttype.eltype;
2070 
2071   *pbase = base_reg;
2072   *str = ptr;
2073 
2074   return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2075 }
2076 
2077 /* Parse an explicit relocation suffix on an expression.  This is
2078    either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2079    arm_reloc_hsh contains no entries, so this function can only
2080    succeed if there is no () after the word.  Returns -1 on error,
2081    BFD_RELOC_UNUSED if there wasn't any suffix.	 */
2082 
2083 static int
2084 parse_reloc (char **str)
2085 {
2086   struct reloc_entry *r;
2087   char *p, *q;
2088 
2089   if (**str != '(')
2090     return BFD_RELOC_UNUSED;
2091 
2092   p = *str + 1;
2093   q = p;
2094 
2095   while (*q && *q != ')' && *q != ',')
2096     q++;
2097   if (*q != ')')
2098     return -1;
2099 
2100   if ((r = (struct reloc_entry *)
2101        hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2102     return -1;
2103 
2104   *str = q + 1;
2105   return r->reloc;
2106 }
2107 
2108 /* Directives: register aliases.  */
2109 
2110 static struct reg_entry *
2111 insert_reg_alias (char *str, unsigned number, int type)
2112 {
2113   struct reg_entry *new_reg;
2114   const char *name;
2115 
2116   if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2117     {
2118       if (new_reg->builtin)
2119 	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2120 
2121       /* Only warn about a redefinition if it's not defined as the
2122 	 same register.	 */
2123       else if (new_reg->number != number || new_reg->type != type)
2124 	as_warn (_("ignoring redefinition of register alias '%s'"), str);
2125 
2126       return NULL;
2127     }
2128 
2129   name = xstrdup (str);
2130   new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2131 
2132   new_reg->name = name;
2133   new_reg->number = number;
2134   new_reg->type = type;
2135   new_reg->builtin = FALSE;
2136   new_reg->neon = NULL;
2137 
2138   if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2139     abort ();
2140 
2141   return new_reg;
2142 }
2143 
2144 static void
2145 insert_neon_reg_alias (char *str, int number, int type,
2146                        struct neon_typed_alias *atype)
2147 {
2148   struct reg_entry *reg = insert_reg_alias (str, number, type);
2149 
2150   if (!reg)
2151     {
2152       first_error (_("attempt to redefine typed alias"));
2153       return;
2154     }
2155 
2156   if (atype)
2157     {
2158       reg->neon = (struct neon_typed_alias *)
2159           xmalloc (sizeof (struct neon_typed_alias));
2160       *reg->neon = *atype;
2161     }
2162 }
2163 
2164 /* Look for the .req directive.	 This is of the form:
2165 
2166 	new_register_name .req existing_register_name
2167 
2168    If we find one, or if it looks sufficiently like one that we want to
2169    handle any error here, return TRUE.  Otherwise return FALSE.  */
2170 
2171 static bfd_boolean
2172 create_register_alias (char * newname, char *p)
2173 {
2174   struct reg_entry *old;
2175   char *oldname, *nbuf;
2176   size_t nlen;
2177 
2178   /* The input scrubber ensures that whitespace after the mnemonic is
2179      collapsed to single spaces.  */
2180   oldname = p;
2181   if (strncmp (oldname, " .req ", 6) != 0)
2182     return FALSE;
2183 
2184   oldname += 6;
2185   if (*oldname == '\0')
2186     return FALSE;
2187 
2188   old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2189   if (!old)
2190     {
2191       as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2192       return TRUE;
2193     }
2194 
2195   /* If TC_CASE_SENSITIVE is defined, then newname already points to
2196      the desired alias name, and p points to its end.  If not, then
2197      the desired alias name is in the global original_case_string.  */
2198 #ifdef TC_CASE_SENSITIVE
2199   nlen = p - newname;
2200 #else
2201   newname = original_case_string;
2202   nlen = strlen (newname);
2203 #endif
2204 
2205   nbuf = (char *) alloca (nlen + 1);
2206   memcpy (nbuf, newname, nlen);
2207   nbuf[nlen] = '\0';
2208 
2209   /* Create aliases under the new name as stated; an all-lowercase
2210      version of the new name; and an all-uppercase version of the new
2211      name.  */
2212   if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2213     {
2214       for (p = nbuf; *p; p++)
2215 	*p = TOUPPER (*p);
2216 
2217       if (strncmp (nbuf, newname, nlen))
2218 	{
2219 	  /* If this attempt to create an additional alias fails, do not bother
2220 	     trying to create the all-lower case alias.  We will fail and issue
2221 	     a second, duplicate error message.  This situation arises when the
2222 	     programmer does something like:
2223 	       foo .req r0
2224 	       Foo .req r1
2225 	     The second .req creates the "Foo" alias but then fails to create
2226 	     the artificial FOO alias because it has already been created by the
2227 	     first .req.  */
2228 	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2229 	    return TRUE;
2230 	}
2231 
2232       for (p = nbuf; *p; p++)
2233 	*p = TOLOWER (*p);
2234 
2235       if (strncmp (nbuf, newname, nlen))
2236 	insert_reg_alias (nbuf, old->number, old->type);
2237     }
2238 
2239   return TRUE;
2240 }
2241 
2242 /* Create a Neon typed/indexed register alias using directives, e.g.:
2243      X .dn d5.s32[1]
2244      Y .qn 6.s16
2245      Z .dn d7
2246      T .dn Z[0]
2247    These typed registers can be used instead of the types specified after the
2248    Neon mnemonic, so long as all operands given have types. Types can also be
2249    specified directly, e.g.:
2250      vadd d0.s32, d1.s32, d2.s32  */
2251 
2252 static bfd_boolean
2253 create_neon_reg_alias (char *newname, char *p)
2254 {
2255   enum arm_reg_type basetype;
2256   struct reg_entry *basereg;
2257   struct reg_entry mybasereg;
2258   struct neon_type ntype;
2259   struct neon_typed_alias typeinfo;
2260   char *namebuf, *nameend ATTRIBUTE_UNUSED;
2261   int namelen;
2262 
2263   typeinfo.defined = 0;
2264   typeinfo.eltype.type = NT_invtype;
2265   typeinfo.eltype.size = -1;
2266   typeinfo.index = -1;
2267 
2268   nameend = p;
2269 
2270   if (strncmp (p, " .dn ", 5) == 0)
2271     basetype = REG_TYPE_VFD;
2272   else if (strncmp (p, " .qn ", 5) == 0)
2273     basetype = REG_TYPE_NQ;
2274   else
2275     return FALSE;
2276 
2277   p += 5;
2278 
2279   if (*p == '\0')
2280     return FALSE;
2281 
2282   basereg = arm_reg_parse_multi (&p);
2283 
2284   if (basereg && basereg->type != basetype)
2285     {
2286       as_bad (_("bad type for register"));
2287       return FALSE;
2288     }
2289 
2290   if (basereg == NULL)
2291     {
2292       expressionS exp;
2293       /* Try parsing as an integer.  */
2294       my_get_expression (&exp, &p, GE_NO_PREFIX);
2295       if (exp.X_op != O_constant)
2296         {
2297           as_bad (_("expression must be constant"));
2298           return FALSE;
2299         }
2300       basereg = &mybasereg;
2301       basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2302                                                   : exp.X_add_number;
2303       basereg->neon = 0;
2304     }
2305 
2306   if (basereg->neon)
2307     typeinfo = *basereg->neon;
2308 
2309   if (parse_neon_type (&ntype, &p) == SUCCESS)
2310     {
2311       /* We got a type.  */
2312       if (typeinfo.defined & NTA_HASTYPE)
2313         {
2314           as_bad (_("can't redefine the type of a register alias"));
2315           return FALSE;
2316         }
2317 
2318       typeinfo.defined |= NTA_HASTYPE;
2319       if (ntype.elems != 1)
2320         {
2321           as_bad (_("you must specify a single type only"));
2322           return FALSE;
2323         }
2324       typeinfo.eltype = ntype.el[0];
2325     }
2326 
2327   if (skip_past_char (&p, '[') == SUCCESS)
2328     {
2329       expressionS exp;
2330       /* We got a scalar index.  */
2331 
2332       if (typeinfo.defined & NTA_HASINDEX)
2333         {
2334           as_bad (_("can't redefine the index of a scalar alias"));
2335           return FALSE;
2336         }
2337 
2338       my_get_expression (&exp, &p, GE_NO_PREFIX);
2339 
2340       if (exp.X_op != O_constant)
2341         {
2342           as_bad (_("scalar index must be constant"));
2343           return FALSE;
2344         }
2345 
2346       typeinfo.defined |= NTA_HASINDEX;
2347       typeinfo.index = exp.X_add_number;
2348 
2349       if (skip_past_char (&p, ']') == FAIL)
2350         {
2351           as_bad (_("expecting ]"));
2352           return FALSE;
2353         }
2354     }
2355 
2356   /* If TC_CASE_SENSITIVE is defined, then newname already points to
2357      the desired alias name, and p points to its end.  If not, then
2358      the desired alias name is in the global original_case_string.  */
2359 #ifdef TC_CASE_SENSITIVE
2360   namelen = nameend - newname;
2361 #else
2362   newname = original_case_string;
2363   namelen = strlen (newname);
2364 #endif
2365 
2366   namebuf = (char *) alloca (namelen + 1);
2367   strncpy (namebuf, newname, namelen);
2368   namebuf[namelen] = '\0';
2369 
2370   insert_neon_reg_alias (namebuf, basereg->number, basetype,
2371                          typeinfo.defined != 0 ? &typeinfo : NULL);
2372 
2373   /* Insert name in all uppercase.  */
2374   for (p = namebuf; *p; p++)
2375     *p = TOUPPER (*p);
2376 
2377   if (strncmp (namebuf, newname, namelen))
2378     insert_neon_reg_alias (namebuf, basereg->number, basetype,
2379                            typeinfo.defined != 0 ? &typeinfo : NULL);
2380 
2381   /* Insert name in all lowercase.  */
2382   for (p = namebuf; *p; p++)
2383     *p = TOLOWER (*p);
2384 
2385   if (strncmp (namebuf, newname, namelen))
2386     insert_neon_reg_alias (namebuf, basereg->number, basetype,
2387                            typeinfo.defined != 0 ? &typeinfo : NULL);
2388 
2389   return TRUE;
2390 }
2391 
2392 /* Should never be called, as .req goes between the alias and the
2393    register name, not at the beginning of the line.  */
2394 
2395 static void
2396 s_req (int a ATTRIBUTE_UNUSED)
2397 {
2398   as_bad (_("invalid syntax for .req directive"));
2399 }
2400 
2401 static void
2402 s_dn (int a ATTRIBUTE_UNUSED)
2403 {
2404   as_bad (_("invalid syntax for .dn directive"));
2405 }
2406 
2407 static void
2408 s_qn (int a ATTRIBUTE_UNUSED)
2409 {
2410   as_bad (_("invalid syntax for .qn directive"));
2411 }
2412 
2413 /* The .unreq directive deletes an alias which was previously defined
2414    by .req.  For example:
2415 
2416        my_alias .req r11
2417        .unreq my_alias	  */
2418 
2419 static void
2420 s_unreq (int a ATTRIBUTE_UNUSED)
2421 {
2422   char * name;
2423   char saved_char;
2424 
2425   name = input_line_pointer;
2426 
2427   while (*input_line_pointer != 0
2428 	 && *input_line_pointer != ' '
2429 	 && *input_line_pointer != '\n')
2430     ++input_line_pointer;
2431 
2432   saved_char = *input_line_pointer;
2433   *input_line_pointer = 0;
2434 
2435   if (!*name)
2436     as_bad (_("invalid syntax for .unreq directive"));
2437   else
2438     {
2439       struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2440                                                               name);
2441 
2442       if (!reg)
2443 	as_bad (_("unknown register alias '%s'"), name);
2444       else if (reg->builtin)
2445 	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2446 		 name);
2447       else
2448 	{
2449 	  char * p;
2450 	  char * nbuf;
2451 
2452 	  hash_delete (arm_reg_hsh, name, FALSE);
2453 	  free ((char *) reg->name);
2454           if (reg->neon)
2455             free (reg->neon);
2456 	  free (reg);
2457 
2458 	  /* Also locate the all upper case and all lower case versions.
2459 	     Do not complain if we cannot find one or the other as it
2460 	     was probably deleted above.  */
2461 
2462 	  nbuf = strdup (name);
2463 	  for (p = nbuf; *p; p++)
2464 	    *p = TOUPPER (*p);
2465 	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2466 	  if (reg)
2467 	    {
2468 	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2469 	      free ((char *) reg->name);
2470 	      if (reg->neon)
2471 		free (reg->neon);
2472 	      free (reg);
2473 	    }
2474 
2475 	  for (p = nbuf; *p; p++)
2476 	    *p = TOLOWER (*p);
2477 	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2478 	  if (reg)
2479 	    {
2480 	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2481 	      free ((char *) reg->name);
2482 	      if (reg->neon)
2483 		free (reg->neon);
2484 	      free (reg);
2485 	    }
2486 
2487 	  free (nbuf);
2488 	}
2489     }
2490 
2491   *input_line_pointer = saved_char;
2492   demand_empty_rest_of_line ();
2493 }
2494 
2495 /* Directives: Instruction set selection.  */
2496 
2497 #ifdef OBJ_ELF
2498 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2499    (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2500    Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2501    and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2502 
2503 /* Create a new mapping symbol for the transition to STATE.  */
2504 
2505 static void
2506 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2507 {
2508   symbolS * symbolP;
2509   const char * symname;
2510   int type;
2511 
2512   switch (state)
2513     {
2514     case MAP_DATA:
2515       symname = "$d";
2516       type = BSF_NO_FLAGS;
2517       break;
2518     case MAP_ARM:
2519       symname = "$a";
2520       type = BSF_NO_FLAGS;
2521       break;
2522     case MAP_THUMB:
2523       symname = "$t";
2524       type = BSF_NO_FLAGS;
2525       break;
2526     default:
2527       abort ();
2528     }
2529 
2530   symbolP = symbol_new (symname, now_seg, value, frag);
2531   symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2532 
2533   switch (state)
2534     {
2535     case MAP_ARM:
2536       THUMB_SET_FUNC (symbolP, 0);
2537       ARM_SET_THUMB (symbolP, 0);
2538       ARM_SET_INTERWORK (symbolP, support_interwork);
2539       break;
2540 
2541     case MAP_THUMB:
2542       THUMB_SET_FUNC (symbolP, 1);
2543       ARM_SET_THUMB (symbolP, 1);
2544       ARM_SET_INTERWORK (symbolP, support_interwork);
2545       break;
2546 
2547     case MAP_DATA:
2548     default:
2549       break;
2550     }
2551 
2552   /* Save the mapping symbols for future reference.  Also check that
2553      we do not place two mapping symbols at the same offset within a
2554      frag.  We'll handle overlap between frags in
2555      check_mapping_symbols.
2556 
2557      If .fill or other data filling directive generates zero sized data,
2558      the mapping symbol for the following code will have the same value
2559      as the one generated for the data filling directive.  In this case,
2560      we replace the old symbol with the new one at the same address.  */
2561   if (value == 0)
2562     {
2563       if (frag->tc_frag_data.first_map != NULL)
2564 	{
2565 	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2566 	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2567 	}
2568       frag->tc_frag_data.first_map = symbolP;
2569     }
2570   if (frag->tc_frag_data.last_map != NULL)
2571     {
2572       know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2573       if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2574 	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2575     }
2576   frag->tc_frag_data.last_map = symbolP;
2577 }
2578 
2579 /* We must sometimes convert a region marked as code to data during
2580    code alignment, if an odd number of bytes have to be padded.  The
2581    code mapping symbol is pushed to an aligned address.  */
2582 
2583 static void
2584 insert_data_mapping_symbol (enum mstate state,
2585 			    valueT value, fragS *frag, offsetT bytes)
2586 {
2587   /* If there was already a mapping symbol, remove it.  */
2588   if (frag->tc_frag_data.last_map != NULL
2589       && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2590     {
2591       symbolS *symp = frag->tc_frag_data.last_map;
2592 
2593       if (value == 0)
2594 	{
2595 	  know (frag->tc_frag_data.first_map == symp);
2596 	  frag->tc_frag_data.first_map = NULL;
2597 	}
2598       frag->tc_frag_data.last_map = NULL;
2599       symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2600     }
2601 
2602   make_mapping_symbol (MAP_DATA, value, frag);
2603   make_mapping_symbol (state, value + bytes, frag);
2604 }
2605 
2606 static void mapping_state_2 (enum mstate state, int max_chars);
2607 
2608 /* Set the mapping state to STATE.  Only call this when about to
2609    emit some STATE bytes to the file.  */
2610 
2611 void
2612 mapping_state (enum mstate state)
2613 {
2614   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2615 
2616 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2617 
2618   if (mapstate == state)
2619     /* The mapping symbol has already been emitted.
2620        There is nothing else to do.  */
2621     return;
2622 
2623   if (state == MAP_ARM || state == MAP_THUMB)
2624     /*  PR gas/12931
2625 	All ARM instructions require 4-byte alignment.
2626 	(Almost) all Thumb instructions require 2-byte alignment.
2627 
2628 	When emitting instructions into any section, mark the section
2629 	appropriately.
2630 
2631 	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2632 	but themselves require 2-byte alignment; this applies to some
2633 	PC- relative forms.  However, these cases will invovle implicit
2634 	literal pool generation or an explicit .align >=2, both of
2635 	which will cause the section to me marked with sufficient
2636 	alignment.  Thus, we don't handle those cases here.  */
2637     record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2638 
2639   if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2640     /* This case will be evaluated later in the next else.  */
2641     return;
2642   else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2643           || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2644     {
2645       /* Only add the symbol if the offset is > 0:
2646          if we're at the first frag, check it's size > 0;
2647          if we're not at the first frag, then for sure
2648             the offset is > 0.  */
2649       struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2650       const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2651 
2652       if (add_symbol)
2653         make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2654     }
2655 
2656   mapping_state_2 (state, 0);
2657 #undef TRANSITION
2658 }
2659 
2660 /* Same as mapping_state, but MAX_CHARS bytes have already been
2661    allocated.  Put the mapping symbol that far back.  */
2662 
2663 static void
2664 mapping_state_2 (enum mstate state, int max_chars)
2665 {
2666   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2667 
2668   if (!SEG_NORMAL (now_seg))
2669     return;
2670 
2671   if (mapstate == state)
2672     /* The mapping symbol has already been emitted.
2673        There is nothing else to do.  */
2674     return;
2675 
2676   seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2677   make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2678 }
2679 #else
2680 #define mapping_state(x) ((void)0)
2681 #define mapping_state_2(x, y) ((void)0)
2682 #endif
2683 
2684 /* Find the real, Thumb encoded start of a Thumb function.  */
2685 
2686 #ifdef OBJ_COFF
2687 static symbolS *
2688 find_real_start (symbolS * symbolP)
2689 {
2690   char *       real_start;
2691   const char * name = S_GET_NAME (symbolP);
2692   symbolS *    new_target;
2693 
2694   /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
2695 #define STUB_NAME ".real_start_of"
2696 
2697   if (name == NULL)
2698     abort ();
2699 
2700   /* The compiler may generate BL instructions to local labels because
2701      it needs to perform a branch to a far away location. These labels
2702      do not have a corresponding ".real_start_of" label.  We check
2703      both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2704      the ".real_start_of" convention for nonlocal branches.  */
2705   if (S_IS_LOCAL (symbolP) || name[0] == '.')
2706     return symbolP;
2707 
2708   real_start = ACONCAT ((STUB_NAME, name, NULL));
2709   new_target = symbol_find (real_start);
2710 
2711   if (new_target == NULL)
2712     {
2713       as_warn (_("Failed to find real start of function: %s\n"), name);
2714       new_target = symbolP;
2715     }
2716 
2717   return new_target;
2718 }
2719 #endif
2720 
2721 static void
2722 opcode_select (int width)
2723 {
2724   switch (width)
2725     {
2726     case 16:
2727       if (! thumb_mode)
2728 	{
2729 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2730 	    as_bad (_("selected processor does not support THUMB opcodes"));
2731 
2732 	  thumb_mode = 1;
2733 	  /* No need to force the alignment, since we will have been
2734 	     coming from ARM mode, which is word-aligned.  */
2735 	  record_alignment (now_seg, 1);
2736 	}
2737       break;
2738 
2739     case 32:
2740       if (thumb_mode)
2741 	{
2742 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2743 	    as_bad (_("selected processor does not support ARM opcodes"));
2744 
2745 	  thumb_mode = 0;
2746 
2747 	  if (!need_pass_2)
2748 	    frag_align (2, 0, 0);
2749 
2750 	  record_alignment (now_seg, 1);
2751 	}
2752       break;
2753 
2754     default:
2755       as_bad (_("invalid instruction size selected (%d)"), width);
2756     }
2757 }
2758 
2759 static void
2760 s_arm (int ignore ATTRIBUTE_UNUSED)
2761 {
2762   opcode_select (32);
2763   demand_empty_rest_of_line ();
2764 }
2765 
2766 static void
2767 s_thumb (int ignore ATTRIBUTE_UNUSED)
2768 {
2769   opcode_select (16);
2770   demand_empty_rest_of_line ();
2771 }
2772 
2773 static void
2774 s_code (int unused ATTRIBUTE_UNUSED)
2775 {
2776   int temp;
2777 
2778   temp = get_absolute_expression ();
2779   switch (temp)
2780     {
2781     case 16:
2782     case 32:
2783       opcode_select (temp);
2784       break;
2785 
2786     default:
2787       as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2788     }
2789 }
2790 
2791 static void
2792 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2793 {
2794   /* If we are not already in thumb mode go into it, EVEN if
2795      the target processor does not support thumb instructions.
2796      This is used by gcc/config/arm/lib1funcs.asm for example
2797      to compile interworking support functions even if the
2798      target processor should not support interworking.	*/
2799   if (! thumb_mode)
2800     {
2801       thumb_mode = 2;
2802       record_alignment (now_seg, 1);
2803     }
2804 
2805   demand_empty_rest_of_line ();
2806 }
2807 
2808 static void
2809 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2810 {
2811   s_thumb (0);
2812 
2813   /* The following label is the name/address of the start of a Thumb function.
2814      We need to know this for the interworking support.	 */
2815   label_is_thumb_function_name = TRUE;
2816 }
2817 
2818 /* Perform a .set directive, but also mark the alias as
2819    being a thumb function.  */
2820 
2821 static void
2822 s_thumb_set (int equiv)
2823 {
2824   /* XXX the following is a duplicate of the code for s_set() in read.c
2825      We cannot just call that code as we need to get at the symbol that
2826      is created.  */
2827   char *    name;
2828   char	    delim;
2829   char *    end_name;
2830   symbolS * symbolP;
2831 
2832   /* Especial apologies for the random logic:
2833      This just grew, and could be parsed much more simply!
2834      Dean - in haste.  */
2835   name	    = input_line_pointer;
2836   delim	    = get_symbol_end ();
2837   end_name  = input_line_pointer;
2838   *end_name = delim;
2839 
2840   if (*input_line_pointer != ',')
2841     {
2842       *end_name = 0;
2843       as_bad (_("expected comma after name \"%s\""), name);
2844       *end_name = delim;
2845       ignore_rest_of_line ();
2846       return;
2847     }
2848 
2849   input_line_pointer++;
2850   *end_name = 0;
2851 
2852   if (name[0] == '.' && name[1] == '\0')
2853     {
2854       /* XXX - this should not happen to .thumb_set.  */
2855       abort ();
2856     }
2857 
2858   if ((symbolP = symbol_find (name)) == NULL
2859       && (symbolP = md_undefined_symbol (name)) == NULL)
2860     {
2861 #ifndef NO_LISTING
2862       /* When doing symbol listings, play games with dummy fragments living
2863 	 outside the normal fragment chain to record the file and line info
2864 	 for this symbol.  */
2865       if (listing & LISTING_SYMBOLS)
2866 	{
2867 	  extern struct list_info_struct * listing_tail;
2868 	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2869 
2870 	  memset (dummy_frag, 0, sizeof (fragS));
2871 	  dummy_frag->fr_type = rs_fill;
2872 	  dummy_frag->line = listing_tail;
2873 	  symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2874 	  dummy_frag->fr_symbol = symbolP;
2875 	}
2876       else
2877 #endif
2878 	symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2879 
2880 #ifdef OBJ_COFF
2881       /* "set" symbols are local unless otherwise specified.  */
2882       SF_SET_LOCAL (symbolP);
2883 #endif /* OBJ_COFF  */
2884     }				/* Make a new symbol.  */
2885 
2886   symbol_table_insert (symbolP);
2887 
2888   * end_name = delim;
2889 
2890   if (equiv
2891       && S_IS_DEFINED (symbolP)
2892       && S_GET_SEGMENT (symbolP) != reg_section)
2893     as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2894 
2895   pseudo_set (symbolP);
2896 
2897   demand_empty_rest_of_line ();
2898 
2899   /* XXX Now we come to the Thumb specific bit of code.	 */
2900 
2901   THUMB_SET_FUNC (symbolP, 1);
2902   ARM_SET_THUMB (symbolP, 1);
2903 #if defined OBJ_ELF || defined OBJ_COFF
2904   ARM_SET_INTERWORK (symbolP, support_interwork);
2905 #endif
2906 }
2907 
2908 /* Directives: Mode selection.  */
2909 
2910 /* .syntax [unified|divided] - choose the new unified syntax
2911    (same for Arm and Thumb encoding, modulo slight differences in what
2912    can be represented) or the old divergent syntax for each mode.  */
2913 static void
2914 s_syntax (int unused ATTRIBUTE_UNUSED)
2915 {
2916   char *name, delim;
2917 
2918   name = input_line_pointer;
2919   delim = get_symbol_end ();
2920 
2921   if (!strcasecmp (name, "unified"))
2922     unified_syntax = TRUE;
2923   else if (!strcasecmp (name, "divided"))
2924     unified_syntax = FALSE;
2925   else
2926     {
2927       as_bad (_("unrecognized syntax mode \"%s\""), name);
2928       return;
2929     }
2930   *input_line_pointer = delim;
2931   demand_empty_rest_of_line ();
2932 }
2933 
2934 /* Directives: sectioning and alignment.  */
2935 
2936 /* Same as s_align_ptwo but align 0 => align 2.	 */
2937 
2938 static void
2939 s_align (int unused ATTRIBUTE_UNUSED)
2940 {
2941   int temp;
2942   bfd_boolean fill_p;
2943   long temp_fill;
2944   long max_alignment = 15;
2945 
2946   temp = get_absolute_expression ();
2947   if (temp > max_alignment)
2948     as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2949   else if (temp < 0)
2950     {
2951       as_bad (_("alignment negative. 0 assumed."));
2952       temp = 0;
2953     }
2954 
2955   if (*input_line_pointer == ',')
2956     {
2957       input_line_pointer++;
2958       temp_fill = get_absolute_expression ();
2959       fill_p = TRUE;
2960     }
2961   else
2962     {
2963       fill_p = FALSE;
2964       temp_fill = 0;
2965     }
2966 
2967   if (!temp)
2968     temp = 2;
2969 
2970   /* Only make a frag if we HAVE to.  */
2971   if (temp && !need_pass_2)
2972     {
2973       if (!fill_p && subseg_text_p (now_seg))
2974 	frag_align_code (temp, 0);
2975       else
2976 	frag_align (temp, (int) temp_fill, 0);
2977     }
2978   demand_empty_rest_of_line ();
2979 
2980   record_alignment (now_seg, temp);
2981 }
2982 
2983 static void
2984 s_bss (int ignore ATTRIBUTE_UNUSED)
2985 {
2986   /* We don't support putting frags in the BSS segment, we fake it by
2987      marking in_bss, then looking at s_skip for clues.	*/
2988   subseg_set (bss_section, 0);
2989   demand_empty_rest_of_line ();
2990 
2991 #ifdef md_elf_section_change_hook
2992   md_elf_section_change_hook ();
2993 #endif
2994 }
2995 
2996 static void
2997 s_even (int ignore ATTRIBUTE_UNUSED)
2998 {
2999   /* Never make frag if expect extra pass.  */
3000   if (!need_pass_2)
3001     frag_align (1, 0, 0);
3002 
3003   record_alignment (now_seg, 1);
3004 
3005   demand_empty_rest_of_line ();
3006 }
3007 
3008 /* Directives: Literal pools.  */
3009 
3010 static literal_pool *
3011 find_literal_pool (void)
3012 {
3013   literal_pool * pool;
3014 
3015   for (pool = list_of_pools; pool != NULL; pool = pool->next)
3016     {
3017       if (pool->section == now_seg
3018 	  && pool->sub_section == now_subseg)
3019 	break;
3020     }
3021 
3022   return pool;
3023 }
3024 
3025 static literal_pool *
3026 find_or_make_literal_pool (void)
3027 {
3028   /* Next literal pool ID number.  */
3029   static unsigned int latest_pool_num = 1;
3030   literal_pool *      pool;
3031 
3032   pool = find_literal_pool ();
3033 
3034   if (pool == NULL)
3035     {
3036       /* Create a new pool.  */
3037       pool = (literal_pool *) xmalloc (sizeof (* pool));
3038       if (! pool)
3039 	return NULL;
3040 
3041       pool->next_free_entry = 0;
3042       pool->section	    = now_seg;
3043       pool->sub_section	    = now_subseg;
3044       pool->next	    = list_of_pools;
3045       pool->symbol	    = NULL;
3046 
3047       /* Add it to the list.  */
3048       list_of_pools = pool;
3049     }
3050 
3051   /* New pools, and emptied pools, will have a NULL symbol.  */
3052   if (pool->symbol == NULL)
3053     {
3054       pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3055 				    (valueT) 0, &zero_address_frag);
3056       pool->id = latest_pool_num ++;
3057     }
3058 
3059   /* Done.  */
3060   return pool;
3061 }
3062 
3063 /* Add the literal in the global 'inst'
3064    structure to the relevant literal pool.  */
3065 
3066 static int
3067 add_to_lit_pool (void)
3068 {
3069   literal_pool * pool;
3070   unsigned int entry;
3071 
3072   pool = find_or_make_literal_pool ();
3073 
3074   /* Check if this literal value is already in the pool.  */
3075   for (entry = 0; entry < pool->next_free_entry; entry ++)
3076     {
3077       if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3078 	  && (inst.reloc.exp.X_op == O_constant)
3079 	  && (pool->literals[entry].X_add_number
3080 	      == inst.reloc.exp.X_add_number)
3081 	  && (pool->literals[entry].X_unsigned
3082 	      == inst.reloc.exp.X_unsigned))
3083 	break;
3084 
3085       if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3086 	  && (inst.reloc.exp.X_op == O_symbol)
3087 	  && (pool->literals[entry].X_add_number
3088 	      == inst.reloc.exp.X_add_number)
3089 	  && (pool->literals[entry].X_add_symbol
3090 	      == inst.reloc.exp.X_add_symbol)
3091 	  && (pool->literals[entry].X_op_symbol
3092 	      == inst.reloc.exp.X_op_symbol))
3093 	break;
3094     }
3095 
3096   /* Do we need to create a new entry?	*/
3097   if (entry == pool->next_free_entry)
3098     {
3099       if (entry >= MAX_LITERAL_POOL_SIZE)
3100 	{
3101 	  inst.error = _("literal pool overflow");
3102 	  return FAIL;
3103 	}
3104 
3105       pool->literals[entry] = inst.reloc.exp;
3106 #ifdef OBJ_ELF
3107       /* PR ld/12974: Record the location of the first source line to reference
3108 	 this entry in the literal pool.  If it turns out during linking that the
3109 	 symbol does not exist we will be able to give an accurate line number for
3110 	 the (first use of the) missing reference.  */
3111       if (debug_type == DEBUG_DWARF2)
3112 	dwarf2_where (pool->locs + entry);
3113 #endif
3114       pool->next_free_entry += 1;
3115     }
3116 
3117   inst.reloc.exp.X_op	      = O_symbol;
3118   inst.reloc.exp.X_add_number = ((int) entry) * 4;
3119   inst.reloc.exp.X_add_symbol = pool->symbol;
3120 
3121   return SUCCESS;
3122 }
3123 
3124 /* Can't use symbol_new here, so have to create a symbol and then at
3125    a later date assign it a value. Thats what these functions do.  */
3126 
3127 static void
3128 symbol_locate (symbolS *    symbolP,
3129 	       const char * name,	/* It is copied, the caller can modify.	 */
3130 	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
3131 	       valueT	    valu,	/* Symbol value.  */
3132 	       fragS *	    frag)	/* Associated fragment.	 */
3133 {
3134   unsigned int name_length;
3135   char * preserved_copy_of_name;
3136 
3137   name_length = strlen (name) + 1;   /* +1 for \0.  */
3138   obstack_grow (&notes, name, name_length);
3139   preserved_copy_of_name = (char *) obstack_finish (&notes);
3140 
3141 #ifdef tc_canonicalize_symbol_name
3142   preserved_copy_of_name =
3143     tc_canonicalize_symbol_name (preserved_copy_of_name);
3144 #endif
3145 
3146   S_SET_NAME (symbolP, preserved_copy_of_name);
3147 
3148   S_SET_SEGMENT (symbolP, segment);
3149   S_SET_VALUE (symbolP, valu);
3150   symbol_clear_list_pointers (symbolP);
3151 
3152   symbol_set_frag (symbolP, frag);
3153 
3154   /* Link to end of symbol chain.  */
3155   {
3156     extern int symbol_table_frozen;
3157 
3158     if (symbol_table_frozen)
3159       abort ();
3160   }
3161 
3162   symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3163 
3164   obj_symbol_new_hook (symbolP);
3165 
3166 #ifdef tc_symbol_new_hook
3167   tc_symbol_new_hook (symbolP);
3168 #endif
3169 
3170 #ifdef DEBUG_SYMS
3171   verify_symbol_chain (symbol_rootP, symbol_lastP);
3172 #endif /* DEBUG_SYMS  */
3173 }
3174 
3175 
3176 static void
3177 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3178 {
3179   unsigned int entry;
3180   literal_pool * pool;
3181   char sym_name[20];
3182 
3183   pool = find_literal_pool ();
3184   if (pool == NULL
3185       || pool->symbol == NULL
3186       || pool->next_free_entry == 0)
3187     return;
3188 
3189   mapping_state (MAP_DATA);
3190 
3191   /* Align pool as you have word accesses.
3192      Only make a frag if we have to.  */
3193   if (!need_pass_2)
3194     frag_align (2, 0, 0);
3195 
3196   record_alignment (now_seg, 2);
3197 
3198   sprintf (sym_name, "$$lit_\002%x", pool->id);
3199 
3200   symbol_locate (pool->symbol, sym_name, now_seg,
3201 		 (valueT) frag_now_fix (), frag_now);
3202   symbol_table_insert (pool->symbol);
3203 
3204   ARM_SET_THUMB (pool->symbol, thumb_mode);
3205 
3206 #if defined OBJ_COFF || defined OBJ_ELF
3207   ARM_SET_INTERWORK (pool->symbol, support_interwork);
3208 #endif
3209 
3210   for (entry = 0; entry < pool->next_free_entry; entry ++)
3211     {
3212 #ifdef OBJ_ELF
3213       if (debug_type == DEBUG_DWARF2)
3214 	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3215 #endif
3216       /* First output the expression in the instruction to the pool.  */
3217       emit_expr (&(pool->literals[entry]), 4); /* .word  */
3218     }
3219 
3220   /* Mark the pool as empty.  */
3221   pool->next_free_entry = 0;
3222   pool->symbol = NULL;
3223 }
3224 
3225 #ifdef OBJ_ELF
3226 /* Forward declarations for functions below, in the MD interface
3227    section.  */
3228 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3229 static valueT create_unwind_entry (int);
3230 static void start_unwind_section (const segT, int);
3231 static void add_unwind_opcode (valueT, int);
3232 static void flush_pending_unwind (void);
3233 
3234 /* Directives: Data.  */
3235 
3236 static void
3237 s_arm_elf_cons (int nbytes)
3238 {
3239   expressionS exp;
3240 
3241 #ifdef md_flush_pending_output
3242   md_flush_pending_output ();
3243 #endif
3244 
3245   if (is_it_end_of_statement ())
3246     {
3247       demand_empty_rest_of_line ();
3248       return;
3249     }
3250 
3251 #ifdef md_cons_align
3252   md_cons_align (nbytes);
3253 #endif
3254 
3255   mapping_state (MAP_DATA);
3256   do
3257     {
3258       int reloc;
3259       char *base = input_line_pointer;
3260 
3261       expression (& exp);
3262 
3263       if (exp.X_op != O_symbol)
3264 	emit_expr (&exp, (unsigned int) nbytes);
3265       else
3266 	{
3267 	  char *before_reloc = input_line_pointer;
3268 	  reloc = parse_reloc (&input_line_pointer);
3269 	  if (reloc == -1)
3270 	    {
3271 	      as_bad (_("unrecognized relocation suffix"));
3272 	      ignore_rest_of_line ();
3273 	      return;
3274 	    }
3275 	  else if (reloc == BFD_RELOC_UNUSED)
3276 	    emit_expr (&exp, (unsigned int) nbytes);
3277 	  else
3278 	    {
3279 	      reloc_howto_type *howto = (reloc_howto_type *)
3280                   bfd_reloc_type_lookup (stdoutput,
3281                                          (bfd_reloc_code_real_type) reloc);
3282 	      int size = bfd_get_reloc_size (howto);
3283 
3284 	      if (reloc == BFD_RELOC_ARM_PLT32)
3285 		{
3286 		  as_bad (_("(plt) is only valid on branch targets"));
3287 		  reloc = BFD_RELOC_UNUSED;
3288 		  size = 0;
3289 		}
3290 
3291 	      if (size > nbytes)
3292 		as_bad (_("%s relocations do not fit in %d bytes"),
3293 			howto->name, nbytes);
3294 	      else
3295 		{
3296 		  /* We've parsed an expression stopping at O_symbol.
3297 		     But there may be more expression left now that we
3298 		     have parsed the relocation marker.  Parse it again.
3299 		     XXX Surely there is a cleaner way to do this.  */
3300 		  char *p = input_line_pointer;
3301 		  int offset;
3302 		  char *save_buf = (char *) alloca (input_line_pointer - base);
3303 		  memcpy (save_buf, base, input_line_pointer - base);
3304 		  memmove (base + (input_line_pointer - before_reloc),
3305 			   base, before_reloc - base);
3306 
3307 		  input_line_pointer = base + (input_line_pointer-before_reloc);
3308 		  expression (&exp);
3309 		  memcpy (base, save_buf, p - base);
3310 
3311 		  offset = nbytes - size;
3312 		  p = frag_more ((int) nbytes);
3313 		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3314 			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3315 		}
3316 	    }
3317 	}
3318     }
3319   while (*input_line_pointer++ == ',');
3320 
3321   /* Put terminator back into stream.  */
3322   input_line_pointer --;
3323   demand_empty_rest_of_line ();
3324 }
3325 
3326 /* Emit an expression containing a 32-bit thumb instruction.
3327    Implementation based on put_thumb32_insn.  */
3328 
3329 static void
3330 emit_thumb32_expr (expressionS * exp)
3331 {
3332   expressionS exp_high = *exp;
3333 
3334   exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3335   emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3336   exp->X_add_number &= 0xffff;
3337   emit_expr (exp, (unsigned int) THUMB_SIZE);
3338 }
3339 
3340 /*  Guess the instruction size based on the opcode.  */
3341 
3342 static int
3343 thumb_insn_size (int opcode)
3344 {
3345   if ((unsigned int) opcode < 0xe800u)
3346     return 2;
3347   else if ((unsigned int) opcode >= 0xe8000000u)
3348     return 4;
3349   else
3350     return 0;
3351 }
3352 
3353 static bfd_boolean
3354 emit_insn (expressionS *exp, int nbytes)
3355 {
3356   int size = 0;
3357 
3358   if (exp->X_op == O_constant)
3359     {
3360       size = nbytes;
3361 
3362       if (size == 0)
3363 	size = thumb_insn_size (exp->X_add_number);
3364 
3365       if (size != 0)
3366 	{
3367 	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3368 	    {
3369 	      as_bad (_(".inst.n operand too big. "\
3370 			"Use .inst.w instead"));
3371 	      size = 0;
3372 	    }
3373 	  else
3374 	    {
3375 	      if (now_it.state == AUTOMATIC_IT_BLOCK)
3376 		set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3377 	      else
3378 		set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3379 
3380 	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3381 		emit_thumb32_expr (exp);
3382 	      else
3383 		emit_expr (exp, (unsigned int) size);
3384 
3385 	      it_fsm_post_encode ();
3386 	    }
3387 	}
3388       else
3389 	as_bad (_("cannot determine Thumb instruction size. "	\
3390 		  "Use .inst.n/.inst.w instead"));
3391     }
3392   else
3393     as_bad (_("constant expression required"));
3394 
3395   return (size != 0);
3396 }
3397 
3398 /* Like s_arm_elf_cons but do not use md_cons_align and
3399    set the mapping state to MAP_ARM/MAP_THUMB.  */
3400 
3401 static void
3402 s_arm_elf_inst (int nbytes)
3403 {
3404   if (is_it_end_of_statement ())
3405     {
3406       demand_empty_rest_of_line ();
3407       return;
3408     }
3409 
3410   /* Calling mapping_state () here will not change ARM/THUMB,
3411      but will ensure not to be in DATA state.  */
3412 
3413   if (thumb_mode)
3414     mapping_state (MAP_THUMB);
3415   else
3416     {
3417       if (nbytes != 0)
3418 	{
3419 	  as_bad (_("width suffixes are invalid in ARM mode"));
3420 	  ignore_rest_of_line ();
3421 	  return;
3422 	}
3423 
3424       nbytes = 4;
3425 
3426       mapping_state (MAP_ARM);
3427     }
3428 
3429   do
3430     {
3431       expressionS exp;
3432 
3433       expression (& exp);
3434 
3435       if (! emit_insn (& exp, nbytes))
3436 	{
3437 	  ignore_rest_of_line ();
3438 	  return;
3439 	}
3440     }
3441   while (*input_line_pointer++ == ',');
3442 
3443   /* Put terminator back into stream.  */
3444   input_line_pointer --;
3445   demand_empty_rest_of_line ();
3446 }
3447 
3448 /* Parse a .rel31 directive.  */
3449 
3450 static void
3451 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3452 {
3453   expressionS exp;
3454   char *p;
3455   valueT highbit;
3456 
3457   highbit = 0;
3458   if (*input_line_pointer == '1')
3459     highbit = 0x80000000;
3460   else if (*input_line_pointer != '0')
3461     as_bad (_("expected 0 or 1"));
3462 
3463   input_line_pointer++;
3464   if (*input_line_pointer != ',')
3465     as_bad (_("missing comma"));
3466   input_line_pointer++;
3467 
3468 #ifdef md_flush_pending_output
3469   md_flush_pending_output ();
3470 #endif
3471 
3472 #ifdef md_cons_align
3473   md_cons_align (4);
3474 #endif
3475 
3476   mapping_state (MAP_DATA);
3477 
3478   expression (&exp);
3479 
3480   p = frag_more (4);
3481   md_number_to_chars (p, highbit, 4);
3482   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3483 	       BFD_RELOC_ARM_PREL31);
3484 
3485   demand_empty_rest_of_line ();
3486 }
3487 
3488 /* Directives: AEABI stack-unwind tables.  */
3489 
3490 /* Parse an unwind_fnstart directive.  Simply records the current location.  */
3491 
3492 static void
3493 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3494 {
3495   demand_empty_rest_of_line ();
3496   if (unwind.proc_start)
3497     {
3498       as_bad (_("duplicate .fnstart directive"));
3499       return;
3500     }
3501 
3502   /* Mark the start of the function.  */
3503   unwind.proc_start = expr_build_dot ();
3504 
3505   /* Reset the rest of the unwind info.	 */
3506   unwind.opcode_count = 0;
3507   unwind.table_entry = NULL;
3508   unwind.personality_routine = NULL;
3509   unwind.personality_index = -1;
3510   unwind.frame_size = 0;
3511   unwind.fp_offset = 0;
3512   unwind.fp_reg = REG_SP;
3513   unwind.fp_used = 0;
3514   unwind.sp_restored = 0;
3515 }
3516 
3517 
3518 /* Parse a handlerdata directive.  Creates the exception handling table entry
3519    for the function.  */
3520 
3521 static void
3522 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3523 {
3524   demand_empty_rest_of_line ();
3525   if (!unwind.proc_start)
3526     as_bad (MISSING_FNSTART);
3527 
3528   if (unwind.table_entry)
3529     as_bad (_("duplicate .handlerdata directive"));
3530 
3531   create_unwind_entry (1);
3532 }
3533 
3534 /* Parse an unwind_fnend directive.  Generates the index table entry.  */
3535 
3536 static void
3537 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3538 {
3539   long where;
3540   char *ptr;
3541   valueT val;
3542   unsigned int marked_pr_dependency;
3543 
3544   demand_empty_rest_of_line ();
3545 
3546   if (!unwind.proc_start)
3547     {
3548       as_bad (_(".fnend directive without .fnstart"));
3549       return;
3550     }
3551 
3552   /* Add eh table entry.  */
3553   if (unwind.table_entry == NULL)
3554     val = create_unwind_entry (0);
3555   else
3556     val = 0;
3557 
3558   /* Add index table entry.  This is two words.	 */
3559   start_unwind_section (unwind.saved_seg, 1);
3560   frag_align (2, 0, 0);
3561   record_alignment (now_seg, 2);
3562 
3563   ptr = frag_more (8);
3564   memset (ptr, 0, 8);
3565   where = frag_now_fix () - 8;
3566 
3567   /* Self relative offset of the function start.  */
3568   fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3569 	   BFD_RELOC_ARM_PREL31);
3570 
3571   /* Indicate dependency on EHABI-defined personality routines to the
3572      linker, if it hasn't been done already.  */
3573   marked_pr_dependency
3574     = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3575   if (unwind.personality_index >= 0 && unwind.personality_index < 3
3576       && !(marked_pr_dependency & (1 << unwind.personality_index)))
3577     {
3578       static const char *const name[] =
3579 	{
3580 	  "__aeabi_unwind_cpp_pr0",
3581 	  "__aeabi_unwind_cpp_pr1",
3582 	  "__aeabi_unwind_cpp_pr2"
3583 	};
3584       symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3585       fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3586       seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3587 	|= 1 << unwind.personality_index;
3588     }
3589 
3590   if (val)
3591     /* Inline exception table entry.  */
3592     md_number_to_chars (ptr + 4, val, 4);
3593   else
3594     /* Self relative offset of the table entry.	 */
3595     fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3596 	     BFD_RELOC_ARM_PREL31);
3597 
3598   /* Restore the original section.  */
3599   subseg_set (unwind.saved_seg, unwind.saved_subseg);
3600 
3601   unwind.proc_start = NULL;
3602 }
3603 
3604 
3605 /* Parse an unwind_cantunwind directive.  */
3606 
3607 static void
3608 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3609 {
3610   demand_empty_rest_of_line ();
3611   if (!unwind.proc_start)
3612     as_bad (MISSING_FNSTART);
3613 
3614   if (unwind.personality_routine || unwind.personality_index != -1)
3615     as_bad (_("personality routine specified for cantunwind frame"));
3616 
3617   unwind.personality_index = -2;
3618 }
3619 
3620 
3621 /* Parse a personalityindex directive.	*/
3622 
3623 static void
3624 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3625 {
3626   expressionS exp;
3627 
3628   if (!unwind.proc_start)
3629     as_bad (MISSING_FNSTART);
3630 
3631   if (unwind.personality_routine || unwind.personality_index != -1)
3632     as_bad (_("duplicate .personalityindex directive"));
3633 
3634   expression (&exp);
3635 
3636   if (exp.X_op != O_constant
3637       || exp.X_add_number < 0 || exp.X_add_number > 15)
3638     {
3639       as_bad (_("bad personality routine number"));
3640       ignore_rest_of_line ();
3641       return;
3642     }
3643 
3644   unwind.personality_index = exp.X_add_number;
3645 
3646   demand_empty_rest_of_line ();
3647 }
3648 
3649 
3650 /* Parse a personality directive.  */
3651 
3652 static void
3653 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3654 {
3655   char *name, *p, c;
3656 
3657   if (!unwind.proc_start)
3658     as_bad (MISSING_FNSTART);
3659 
3660   if (unwind.personality_routine || unwind.personality_index != -1)
3661     as_bad (_("duplicate .personality directive"));
3662 
3663   name = input_line_pointer;
3664   c = get_symbol_end ();
3665   p = input_line_pointer;
3666   unwind.personality_routine = symbol_find_or_make (name);
3667   *p = c;
3668   demand_empty_rest_of_line ();
3669 }
3670 
3671 
3672 /* Parse a directive saving core registers.  */
3673 
3674 static void
3675 s_arm_unwind_save_core (void)
3676 {
3677   valueT op;
3678   long range;
3679   int n;
3680 
3681   range = parse_reg_list (&input_line_pointer);
3682   if (range == FAIL)
3683     {
3684       as_bad (_("expected register list"));
3685       ignore_rest_of_line ();
3686       return;
3687     }
3688 
3689   demand_empty_rest_of_line ();
3690 
3691   /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3692      into .unwind_save {..., sp...}.  We aren't bothered about the value of
3693      ip because it is clobbered by calls.  */
3694   if (unwind.sp_restored && unwind.fp_reg == 12
3695       && (range & 0x3000) == 0x1000)
3696     {
3697       unwind.opcode_count--;
3698       unwind.sp_restored = 0;
3699       range = (range | 0x2000) & ~0x1000;
3700       unwind.pending_offset = 0;
3701     }
3702 
3703   /* Pop r4-r15.  */
3704   if (range & 0xfff0)
3705     {
3706       /* See if we can use the short opcodes.  These pop a block of up to 8
3707 	 registers starting with r4, plus maybe r14.  */
3708       for (n = 0; n < 8; n++)
3709 	{
3710 	  /* Break at the first non-saved register.	 */
3711 	  if ((range & (1 << (n + 4))) == 0)
3712 	    break;
3713 	}
3714       /* See if there are any other bits set.  */
3715       if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3716 	{
3717 	  /* Use the long form.  */
3718 	  op = 0x8000 | ((range >> 4) & 0xfff);
3719 	  add_unwind_opcode (op, 2);
3720 	}
3721       else
3722 	{
3723 	  /* Use the short form.  */
3724 	  if (range & 0x4000)
3725 	    op = 0xa8; /* Pop r14.	*/
3726 	  else
3727 	    op = 0xa0; /* Do not pop r14.  */
3728 	  op |= (n - 1);
3729 	  add_unwind_opcode (op, 1);
3730 	}
3731     }
3732 
3733   /* Pop r0-r3.	 */
3734   if (range & 0xf)
3735     {
3736       op = 0xb100 | (range & 0xf);
3737       add_unwind_opcode (op, 2);
3738     }
3739 
3740   /* Record the number of bytes pushed.	 */
3741   for (n = 0; n < 16; n++)
3742     {
3743       if (range & (1 << n))
3744 	unwind.frame_size += 4;
3745     }
3746 }
3747 
3748 
3749 /* Parse a directive saving FPA registers.  */
3750 
3751 static void
3752 s_arm_unwind_save_fpa (int reg)
3753 {
3754   expressionS exp;
3755   int num_regs;
3756   valueT op;
3757 
3758   /* Get Number of registers to transfer.  */
3759   if (skip_past_comma (&input_line_pointer) != FAIL)
3760     expression (&exp);
3761   else
3762     exp.X_op = O_illegal;
3763 
3764   if (exp.X_op != O_constant)
3765     {
3766       as_bad (_("expected , <constant>"));
3767       ignore_rest_of_line ();
3768       return;
3769     }
3770 
3771   num_regs = exp.X_add_number;
3772 
3773   if (num_regs < 1 || num_regs > 4)
3774     {
3775       as_bad (_("number of registers must be in the range [1:4]"));
3776       ignore_rest_of_line ();
3777       return;
3778     }
3779 
3780   demand_empty_rest_of_line ();
3781 
3782   if (reg == 4)
3783     {
3784       /* Short form.  */
3785       op = 0xb4 | (num_regs - 1);
3786       add_unwind_opcode (op, 1);
3787     }
3788   else
3789     {
3790       /* Long form.  */
3791       op = 0xc800 | (reg << 4) | (num_regs - 1);
3792       add_unwind_opcode (op, 2);
3793     }
3794   unwind.frame_size += num_regs * 12;
3795 }
3796 
3797 
3798 /* Parse a directive saving VFP registers for ARMv6 and above.  */
3799 
3800 static void
3801 s_arm_unwind_save_vfp_armv6 (void)
3802 {
3803   int count;
3804   unsigned int start;
3805   valueT op;
3806   int num_vfpv3_regs = 0;
3807   int num_regs_below_16;
3808 
3809   count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3810   if (count == FAIL)
3811     {
3812       as_bad (_("expected register list"));
3813       ignore_rest_of_line ();
3814       return;
3815     }
3816 
3817   demand_empty_rest_of_line ();
3818 
3819   /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3820      than FSTMX/FLDMX-style ones).  */
3821 
3822   /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
3823   if (start >= 16)
3824     num_vfpv3_regs = count;
3825   else if (start + count > 16)
3826     num_vfpv3_regs = start + count - 16;
3827 
3828   if (num_vfpv3_regs > 0)
3829     {
3830       int start_offset = start > 16 ? start - 16 : 0;
3831       op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3832       add_unwind_opcode (op, 2);
3833     }
3834 
3835   /* Generate opcode for registers numbered in the range 0 .. 15.  */
3836   num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3837   gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3838   if (num_regs_below_16 > 0)
3839     {
3840       op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3841       add_unwind_opcode (op, 2);
3842     }
3843 
3844   unwind.frame_size += count * 8;
3845 }
3846 
3847 
3848 /* Parse a directive saving VFP registers for pre-ARMv6.  */
3849 
3850 static void
3851 s_arm_unwind_save_vfp (void)
3852 {
3853   int count;
3854   unsigned int reg;
3855   valueT op;
3856 
3857   count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3858   if (count == FAIL)
3859     {
3860       as_bad (_("expected register list"));
3861       ignore_rest_of_line ();
3862       return;
3863     }
3864 
3865   demand_empty_rest_of_line ();
3866 
3867   if (reg == 8)
3868     {
3869       /* Short form.  */
3870       op = 0xb8 | (count - 1);
3871       add_unwind_opcode (op, 1);
3872     }
3873   else
3874     {
3875       /* Long form.  */
3876       op = 0xb300 | (reg << 4) | (count - 1);
3877       add_unwind_opcode (op, 2);
3878     }
3879   unwind.frame_size += count * 8 + 4;
3880 }
3881 
3882 
3883 /* Parse a directive saving iWMMXt data registers.  */
3884 
3885 static void
3886 s_arm_unwind_save_mmxwr (void)
3887 {
3888   int reg;
3889   int hi_reg;
3890   int i;
3891   unsigned mask = 0;
3892   valueT op;
3893 
3894   if (*input_line_pointer == '{')
3895     input_line_pointer++;
3896 
3897   do
3898     {
3899       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3900 
3901       if (reg == FAIL)
3902 	{
3903 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3904 	  goto error;
3905 	}
3906 
3907       if (mask >> reg)
3908 	as_tsktsk (_("register list not in ascending order"));
3909       mask |= 1 << reg;
3910 
3911       if (*input_line_pointer == '-')
3912 	{
3913 	  input_line_pointer++;
3914 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3915 	  if (hi_reg == FAIL)
3916 	    {
3917 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3918 	      goto error;
3919 	    }
3920 	  else if (reg >= hi_reg)
3921 	    {
3922 	      as_bad (_("bad register range"));
3923 	      goto error;
3924 	    }
3925 	  for (; reg < hi_reg; reg++)
3926 	    mask |= 1 << reg;
3927 	}
3928     }
3929   while (skip_past_comma (&input_line_pointer) != FAIL);
3930 
3931   if (*input_line_pointer == '}')
3932     input_line_pointer++;
3933 
3934   demand_empty_rest_of_line ();
3935 
3936   /* Generate any deferred opcodes because we're going to be looking at
3937      the list.	*/
3938   flush_pending_unwind ();
3939 
3940   for (i = 0; i < 16; i++)
3941     {
3942       if (mask & (1 << i))
3943 	unwind.frame_size += 8;
3944     }
3945 
3946   /* Attempt to combine with a previous opcode.	 We do this because gcc
3947      likes to output separate unwind directives for a single block of
3948      registers.	 */
3949   if (unwind.opcode_count > 0)
3950     {
3951       i = unwind.opcodes[unwind.opcode_count - 1];
3952       if ((i & 0xf8) == 0xc0)
3953 	{
3954 	  i &= 7;
3955 	  /* Only merge if the blocks are contiguous.  */
3956 	  if (i < 6)
3957 	    {
3958 	      if ((mask & 0xfe00) == (1 << 9))
3959 		{
3960 		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3961 		  unwind.opcode_count--;
3962 		}
3963 	    }
3964 	  else if (i == 6 && unwind.opcode_count >= 2)
3965 	    {
3966 	      i = unwind.opcodes[unwind.opcode_count - 2];
3967 	      reg = i >> 4;
3968 	      i &= 0xf;
3969 
3970 	      op = 0xffff << (reg - 1);
3971 	      if (reg > 0
3972 		  && ((mask & op) == (1u << (reg - 1))))
3973 		{
3974 		  op = (1 << (reg + i + 1)) - 1;
3975 		  op &= ~((1 << reg) - 1);
3976 		  mask |= op;
3977 		  unwind.opcode_count -= 2;
3978 		}
3979 	    }
3980 	}
3981     }
3982 
3983   hi_reg = 15;
3984   /* We want to generate opcodes in the order the registers have been
3985      saved, ie. descending order.  */
3986   for (reg = 15; reg >= -1; reg--)
3987     {
3988       /* Save registers in blocks.  */
3989       if (reg < 0
3990 	  || !(mask & (1 << reg)))
3991 	{
3992 	  /* We found an unsaved reg.  Generate opcodes to save the
3993 	     preceding block.	*/
3994 	  if (reg != hi_reg)
3995 	    {
3996 	      if (reg == 9)
3997 		{
3998 		  /* Short form.  */
3999 		  op = 0xc0 | (hi_reg - 10);
4000 		  add_unwind_opcode (op, 1);
4001 		}
4002 	      else
4003 		{
4004 		  /* Long form.	 */
4005 		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4006 		  add_unwind_opcode (op, 2);
4007 		}
4008 	    }
4009 	  hi_reg = reg - 1;
4010 	}
4011     }
4012 
4013   return;
4014 error:
4015   ignore_rest_of_line ();
4016 }
4017 
4018 static void
4019 s_arm_unwind_save_mmxwcg (void)
4020 {
4021   int reg;
4022   int hi_reg;
4023   unsigned mask = 0;
4024   valueT op;
4025 
4026   if (*input_line_pointer == '{')
4027     input_line_pointer++;
4028 
4029   do
4030     {
4031       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4032 
4033       if (reg == FAIL)
4034 	{
4035 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4036 	  goto error;
4037 	}
4038 
4039       reg -= 8;
4040       if (mask >> reg)
4041 	as_tsktsk (_("register list not in ascending order"));
4042       mask |= 1 << reg;
4043 
4044       if (*input_line_pointer == '-')
4045 	{
4046 	  input_line_pointer++;
4047 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4048 	  if (hi_reg == FAIL)
4049 	    {
4050 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4051 	      goto error;
4052 	    }
4053 	  else if (reg >= hi_reg)
4054 	    {
4055 	      as_bad (_("bad register range"));
4056 	      goto error;
4057 	    }
4058 	  for (; reg < hi_reg; reg++)
4059 	    mask |= 1 << reg;
4060 	}
4061     }
4062   while (skip_past_comma (&input_line_pointer) != FAIL);
4063 
4064   if (*input_line_pointer == '}')
4065     input_line_pointer++;
4066 
4067   demand_empty_rest_of_line ();
4068 
4069   /* Generate any deferred opcodes because we're going to be looking at
4070      the list.	*/
4071   flush_pending_unwind ();
4072 
4073   for (reg = 0; reg < 16; reg++)
4074     {
4075       if (mask & (1 << reg))
4076 	unwind.frame_size += 4;
4077     }
4078   op = 0xc700 | mask;
4079   add_unwind_opcode (op, 2);
4080   return;
4081 error:
4082   ignore_rest_of_line ();
4083 }
4084 
4085 
4086 /* Parse an unwind_save directive.
4087    If the argument is non-zero, this is a .vsave directive.  */
4088 
4089 static void
4090 s_arm_unwind_save (int arch_v6)
4091 {
4092   char *peek;
4093   struct reg_entry *reg;
4094   bfd_boolean had_brace = FALSE;
4095 
4096   if (!unwind.proc_start)
4097     as_bad (MISSING_FNSTART);
4098 
4099   /* Figure out what sort of save we have.  */
4100   peek = input_line_pointer;
4101 
4102   if (*peek == '{')
4103     {
4104       had_brace = TRUE;
4105       peek++;
4106     }
4107 
4108   reg = arm_reg_parse_multi (&peek);
4109 
4110   if (!reg)
4111     {
4112       as_bad (_("register expected"));
4113       ignore_rest_of_line ();
4114       return;
4115     }
4116 
4117   switch (reg->type)
4118     {
4119     case REG_TYPE_FN:
4120       if (had_brace)
4121 	{
4122 	  as_bad (_("FPA .unwind_save does not take a register list"));
4123 	  ignore_rest_of_line ();
4124 	  return;
4125 	}
4126       input_line_pointer = peek;
4127       s_arm_unwind_save_fpa (reg->number);
4128       return;
4129 
4130     case REG_TYPE_RN:	  s_arm_unwind_save_core ();   return;
4131     case REG_TYPE_VFD:
4132       if (arch_v6)
4133         s_arm_unwind_save_vfp_armv6 ();
4134       else
4135         s_arm_unwind_save_vfp ();
4136       return;
4137     case REG_TYPE_MMXWR:  s_arm_unwind_save_mmxwr ();  return;
4138     case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4139 
4140     default:
4141       as_bad (_(".unwind_save does not support this kind of register"));
4142       ignore_rest_of_line ();
4143     }
4144 }
4145 
4146 
4147 /* Parse an unwind_movsp directive.  */
4148 
4149 static void
4150 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4151 {
4152   int reg;
4153   valueT op;
4154   int offset;
4155 
4156   if (!unwind.proc_start)
4157     as_bad (MISSING_FNSTART);
4158 
4159   reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4160   if (reg == FAIL)
4161     {
4162       as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4163       ignore_rest_of_line ();
4164       return;
4165     }
4166 
4167   /* Optional constant.	 */
4168   if (skip_past_comma (&input_line_pointer) != FAIL)
4169     {
4170       if (immediate_for_directive (&offset) == FAIL)
4171 	return;
4172     }
4173   else
4174     offset = 0;
4175 
4176   demand_empty_rest_of_line ();
4177 
4178   if (reg == REG_SP || reg == REG_PC)
4179     {
4180       as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4181       return;
4182     }
4183 
4184   if (unwind.fp_reg != REG_SP)
4185     as_bad (_("unexpected .unwind_movsp directive"));
4186 
4187   /* Generate opcode to restore the value.  */
4188   op = 0x90 | reg;
4189   add_unwind_opcode (op, 1);
4190 
4191   /* Record the information for later.	*/
4192   unwind.fp_reg = reg;
4193   unwind.fp_offset = unwind.frame_size - offset;
4194   unwind.sp_restored = 1;
4195 }
4196 
4197 /* Parse an unwind_pad directive.  */
4198 
4199 static void
4200 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4201 {
4202   int offset;
4203 
4204   if (!unwind.proc_start)
4205     as_bad (MISSING_FNSTART);
4206 
4207   if (immediate_for_directive (&offset) == FAIL)
4208     return;
4209 
4210   if (offset & 3)
4211     {
4212       as_bad (_("stack increment must be multiple of 4"));
4213       ignore_rest_of_line ();
4214       return;
4215     }
4216 
4217   /* Don't generate any opcodes, just record the details for later.  */
4218   unwind.frame_size += offset;
4219   unwind.pending_offset += offset;
4220 
4221   demand_empty_rest_of_line ();
4222 }
4223 
4224 /* Parse an unwind_setfp directive.  */
4225 
4226 static void
4227 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4228 {
4229   int sp_reg;
4230   int fp_reg;
4231   int offset;
4232 
4233   if (!unwind.proc_start)
4234     as_bad (MISSING_FNSTART);
4235 
4236   fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4237   if (skip_past_comma (&input_line_pointer) == FAIL)
4238     sp_reg = FAIL;
4239   else
4240     sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4241 
4242   if (fp_reg == FAIL || sp_reg == FAIL)
4243     {
4244       as_bad (_("expected <reg>, <reg>"));
4245       ignore_rest_of_line ();
4246       return;
4247     }
4248 
4249   /* Optional constant.	 */
4250   if (skip_past_comma (&input_line_pointer) != FAIL)
4251     {
4252       if (immediate_for_directive (&offset) == FAIL)
4253 	return;
4254     }
4255   else
4256     offset = 0;
4257 
4258   demand_empty_rest_of_line ();
4259 
4260   if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4261     {
4262       as_bad (_("register must be either sp or set by a previous"
4263 		"unwind_movsp directive"));
4264       return;
4265     }
4266 
4267   /* Don't generate any opcodes, just record the information for later.	 */
4268   unwind.fp_reg = fp_reg;
4269   unwind.fp_used = 1;
4270   if (sp_reg == REG_SP)
4271     unwind.fp_offset = unwind.frame_size - offset;
4272   else
4273     unwind.fp_offset -= offset;
4274 }
4275 
4276 /* Parse an unwind_raw directive.  */
4277 
4278 static void
4279 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4280 {
4281   expressionS exp;
4282   /* This is an arbitrary limit.	 */
4283   unsigned char op[16];
4284   int count;
4285 
4286   if (!unwind.proc_start)
4287     as_bad (MISSING_FNSTART);
4288 
4289   expression (&exp);
4290   if (exp.X_op == O_constant
4291       && skip_past_comma (&input_line_pointer) != FAIL)
4292     {
4293       unwind.frame_size += exp.X_add_number;
4294       expression (&exp);
4295     }
4296   else
4297     exp.X_op = O_illegal;
4298 
4299   if (exp.X_op != O_constant)
4300     {
4301       as_bad (_("expected <offset>, <opcode>"));
4302       ignore_rest_of_line ();
4303       return;
4304     }
4305 
4306   count = 0;
4307 
4308   /* Parse the opcode.	*/
4309   for (;;)
4310     {
4311       if (count >= 16)
4312 	{
4313 	  as_bad (_("unwind opcode too long"));
4314 	  ignore_rest_of_line ();
4315 	}
4316       if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4317 	{
4318 	  as_bad (_("invalid unwind opcode"));
4319 	  ignore_rest_of_line ();
4320 	  return;
4321 	}
4322       op[count++] = exp.X_add_number;
4323 
4324       /* Parse the next byte.  */
4325       if (skip_past_comma (&input_line_pointer) == FAIL)
4326 	break;
4327 
4328       expression (&exp);
4329     }
4330 
4331   /* Add the opcode bytes in reverse order.  */
4332   while (count--)
4333     add_unwind_opcode (op[count], 1);
4334 
4335   demand_empty_rest_of_line ();
4336 }
4337 
4338 
4339 /* Parse a .eabi_attribute directive.  */
4340 
4341 static void
4342 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4343 {
4344   int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4345 
4346   if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4347     attributes_set_explicitly[tag] = 1;
4348 }
4349 
4350 /* Emit a tls fix for the symbol.  */
4351 
4352 static void
4353 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4354 {
4355   char *p;
4356   expressionS exp;
4357 #ifdef md_flush_pending_output
4358   md_flush_pending_output ();
4359 #endif
4360 
4361 #ifdef md_cons_align
4362   md_cons_align (4);
4363 #endif
4364 
4365   /* Since we're just labelling the code, there's no need to define a
4366      mapping symbol.  */
4367   expression (&exp);
4368   p = obstack_next_free (&frchain_now->frch_obstack);
4369   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4370 	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4371 	       : BFD_RELOC_ARM_TLS_DESCSEQ);
4372 }
4373 #endif /* OBJ_ELF */
4374 
4375 static void s_arm_arch (int);
4376 static void s_arm_object_arch (int);
4377 static void s_arm_cpu (int);
4378 static void s_arm_fpu (int);
4379 static void s_arm_arch_extension (int);
4380 
4381 #ifdef TE_PE
4382 
4383 static void
4384 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4385 {
4386   expressionS exp;
4387 
4388   do
4389     {
4390       expression (&exp);
4391       if (exp.X_op == O_symbol)
4392 	exp.X_op = O_secrel;
4393 
4394       emit_expr (&exp, 4);
4395     }
4396   while (*input_line_pointer++ == ',');
4397 
4398   input_line_pointer--;
4399   demand_empty_rest_of_line ();
4400 }
4401 #endif /* TE_PE */
4402 
4403 /* This table describes all the machine specific pseudo-ops the assembler
4404    has to support.  The fields are:
4405      pseudo-op name without dot
4406      function to call to execute this pseudo-op
4407      Integer arg to pass to the function.  */
4408 
4409 const pseudo_typeS md_pseudo_table[] =
4410 {
4411   /* Never called because '.req' does not start a line.	 */
4412   { "req",	   s_req,	  0 },
4413   /* Following two are likewise never called.  */
4414   { "dn",	   s_dn,          0 },
4415   { "qn",          s_qn,          0 },
4416   { "unreq",	   s_unreq,	  0 },
4417   { "bss",	   s_bss,	  0 },
4418   { "align",	   s_align,	  0 },
4419   { "arm",	   s_arm,	  0 },
4420   { "thumb",	   s_thumb,	  0 },
4421   { "code",	   s_code,	  0 },
4422   { "force_thumb", s_force_thumb, 0 },
4423   { "thumb_func",  s_thumb_func,  0 },
4424   { "thumb_set",   s_thumb_set,	  0 },
4425   { "even",	   s_even,	  0 },
4426   { "ltorg",	   s_ltorg,	  0 },
4427   { "pool",	   s_ltorg,	  0 },
4428   { "syntax",	   s_syntax,	  0 },
4429   { "cpu",	   s_arm_cpu,	  0 },
4430   { "arch",	   s_arm_arch,	  0 },
4431   { "object_arch", s_arm_object_arch,	0 },
4432   { "fpu",	   s_arm_fpu,	  0 },
4433   { "arch_extension", s_arm_arch_extension, 0 },
4434 #ifdef OBJ_ELF
4435   { "word",	        s_arm_elf_cons, 4 },
4436   { "long",	        s_arm_elf_cons, 4 },
4437   { "inst.n",           s_arm_elf_inst, 2 },
4438   { "inst.w",           s_arm_elf_inst, 4 },
4439   { "inst",             s_arm_elf_inst, 0 },
4440   { "rel31",	        s_arm_rel31,	  0 },
4441   { "fnstart",		s_arm_unwind_fnstart,	0 },
4442   { "fnend",		s_arm_unwind_fnend,	0 },
4443   { "cantunwind",	s_arm_unwind_cantunwind, 0 },
4444   { "personality",	s_arm_unwind_personality, 0 },
4445   { "personalityindex",	s_arm_unwind_personalityindex, 0 },
4446   { "handlerdata",	s_arm_unwind_handlerdata, 0 },
4447   { "save",		s_arm_unwind_save,	0 },
4448   { "vsave",		s_arm_unwind_save,	1 },
4449   { "movsp",		s_arm_unwind_movsp,	0 },
4450   { "pad",		s_arm_unwind_pad,	0 },
4451   { "setfp",		s_arm_unwind_setfp,	0 },
4452   { "unwind_raw",	s_arm_unwind_raw,	0 },
4453   { "eabi_attribute",	s_arm_eabi_attribute,	0 },
4454   { "tlsdescseq",	s_arm_tls_descseq,      0 },
4455 #else
4456   { "word",	   cons, 4},
4457 
4458   /* These are used for dwarf.  */
4459   {"2byte", cons, 2},
4460   {"4byte", cons, 4},
4461   {"8byte", cons, 8},
4462   /* These are used for dwarf2.  */
4463   { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4464   { "loc",  dwarf2_directive_loc,  0 },
4465   { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4466 #endif
4467   { "extend",	   float_cons, 'x' },
4468   { "ldouble",	   float_cons, 'x' },
4469   { "packed",	   float_cons, 'p' },
4470 #ifdef TE_PE
4471   {"secrel32", pe_directive_secrel, 0},
4472 #endif
4473   { 0, 0, 0 }
4474 };
4475 
4476 /* Parser functions used exclusively in instruction operands.  */
4477 
4478 /* Generic immediate-value read function for use in insn parsing.
4479    STR points to the beginning of the immediate (the leading #);
4480    VAL receives the value; if the value is outside [MIN, MAX]
4481    issue an error.  PREFIX_OPT is true if the immediate prefix is
4482    optional.  */
4483 
4484 static int
4485 parse_immediate (char **str, int *val, int min, int max,
4486 		 bfd_boolean prefix_opt)
4487 {
4488   expressionS exp;
4489   my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4490   if (exp.X_op != O_constant)
4491     {
4492       inst.error = _("constant expression required");
4493       return FAIL;
4494     }
4495 
4496   if (exp.X_add_number < min || exp.X_add_number > max)
4497     {
4498       inst.error = _("immediate value out of range");
4499       return FAIL;
4500     }
4501 
4502   *val = exp.X_add_number;
4503   return SUCCESS;
4504 }
4505 
4506 /* Less-generic immediate-value read function with the possibility of loading a
4507    big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4508    instructions. Puts the result directly in inst.operands[i].  */
4509 
4510 static int
4511 parse_big_immediate (char **str, int i)
4512 {
4513   expressionS exp;
4514   char *ptr = *str;
4515 
4516   my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4517 
4518   if (exp.X_op == O_constant)
4519     {
4520       inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4521       /* If we're on a 64-bit host, then a 64-bit number can be returned using
4522 	 O_constant.  We have to be careful not to break compilation for
4523 	 32-bit X_add_number, though.  */
4524       if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4525 	{
4526           /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4.  */
4527 	  inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4528 	  inst.operands[i].regisimm = 1;
4529 	}
4530     }
4531   else if (exp.X_op == O_big
4532 	   && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
4533     {
4534       unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4535 
4536       /* Bignums have their least significant bits in
4537          generic_bignum[0]. Make sure we put 32 bits in imm and
4538          32 bits in reg,  in a (hopefully) portable way.  */
4539       gas_assert (parts != 0);
4540 
4541       /* Make sure that the number is not too big.
4542 	 PR 11972: Bignums can now be sign-extended to the
4543 	 size of a .octa so check that the out of range bits
4544 	 are all zero or all one.  */
4545       if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
4546 	{
4547 	  LITTLENUM_TYPE m = -1;
4548 
4549 	  if (generic_bignum[parts * 2] != 0
4550 	      && generic_bignum[parts * 2] != m)
4551 	    return FAIL;
4552 
4553 	  for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
4554 	    if (generic_bignum[j] != generic_bignum[j-1])
4555 	      return FAIL;
4556 	}
4557 
4558       inst.operands[i].imm = 0;
4559       for (j = 0; j < parts; j++, idx++)
4560         inst.operands[i].imm |= generic_bignum[idx]
4561                                 << (LITTLENUM_NUMBER_OF_BITS * j);
4562       inst.operands[i].reg = 0;
4563       for (j = 0; j < parts; j++, idx++)
4564         inst.operands[i].reg |= generic_bignum[idx]
4565                                 << (LITTLENUM_NUMBER_OF_BITS * j);
4566       inst.operands[i].regisimm = 1;
4567     }
4568   else
4569     return FAIL;
4570 
4571   *str = ptr;
4572 
4573   return SUCCESS;
4574 }
4575 
4576 /* Returns the pseudo-register number of an FPA immediate constant,
4577    or FAIL if there isn't a valid constant here.  */
4578 
4579 static int
4580 parse_fpa_immediate (char ** str)
4581 {
4582   LITTLENUM_TYPE words[MAX_LITTLENUMS];
4583   char *	 save_in;
4584   expressionS	 exp;
4585   int		 i;
4586   int		 j;
4587 
4588   /* First try and match exact strings, this is to guarantee
4589      that some formats will work even for cross assembly.  */
4590 
4591   for (i = 0; fp_const[i]; i++)
4592     {
4593       if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4594 	{
4595 	  char *start = *str;
4596 
4597 	  *str += strlen (fp_const[i]);
4598 	  if (is_end_of_line[(unsigned char) **str])
4599 	    return i + 8;
4600 	  *str = start;
4601 	}
4602     }
4603 
4604   /* Just because we didn't get a match doesn't mean that the constant
4605      isn't valid, just that it is in a format that we don't
4606      automatically recognize.  Try parsing it with the standard
4607      expression routines.  */
4608 
4609   memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4610 
4611   /* Look for a raw floating point number.  */
4612   if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4613       && is_end_of_line[(unsigned char) *save_in])
4614     {
4615       for (i = 0; i < NUM_FLOAT_VALS; i++)
4616 	{
4617 	  for (j = 0; j < MAX_LITTLENUMS; j++)
4618 	    {
4619 	      if (words[j] != fp_values[i][j])
4620 		break;
4621 	    }
4622 
4623 	  if (j == MAX_LITTLENUMS)
4624 	    {
4625 	      *str = save_in;
4626 	      return i + 8;
4627 	    }
4628 	}
4629     }
4630 
4631   /* Try and parse a more complex expression, this will probably fail
4632      unless the code uses a floating point prefix (eg "0f").  */
4633   save_in = input_line_pointer;
4634   input_line_pointer = *str;
4635   if (expression (&exp) == absolute_section
4636       && exp.X_op == O_big
4637       && exp.X_add_number < 0)
4638     {
4639       /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4640 	 Ditto for 15.	*/
4641       if (gen_to_words (words, 5, (long) 15) == 0)
4642 	{
4643 	  for (i = 0; i < NUM_FLOAT_VALS; i++)
4644 	    {
4645 	      for (j = 0; j < MAX_LITTLENUMS; j++)
4646 		{
4647 		  if (words[j] != fp_values[i][j])
4648 		    break;
4649 		}
4650 
4651 	      if (j == MAX_LITTLENUMS)
4652 		{
4653 		  *str = input_line_pointer;
4654 		  input_line_pointer = save_in;
4655 		  return i + 8;
4656 		}
4657 	    }
4658 	}
4659     }
4660 
4661   *str = input_line_pointer;
4662   input_line_pointer = save_in;
4663   inst.error = _("invalid FPA immediate expression");
4664   return FAIL;
4665 }
4666 
4667 /* Returns 1 if a number has "quarter-precision" float format
4668    0baBbbbbbc defgh000 00000000 00000000.  */
4669 
4670 static int
4671 is_quarter_float (unsigned imm)
4672 {
4673   int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4674   return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4675 }
4676 
4677 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4678    0baBbbbbbc defgh000 00000000 00000000.
4679    The zero and minus-zero cases need special handling, since they can't be
4680    encoded in the "quarter-precision" float format, but can nonetheless be
4681    loaded as integer constants.  */
4682 
4683 static unsigned
4684 parse_qfloat_immediate (char **ccp, int *immed)
4685 {
4686   char *str = *ccp;
4687   char *fpnum;
4688   LITTLENUM_TYPE words[MAX_LITTLENUMS];
4689   int found_fpchar = 0;
4690 
4691   skip_past_char (&str, '#');
4692 
4693   /* We must not accidentally parse an integer as a floating-point number. Make
4694      sure that the value we parse is not an integer by checking for special
4695      characters '.' or 'e'.
4696      FIXME: This is a horrible hack, but doing better is tricky because type
4697      information isn't in a very usable state at parse time.  */
4698   fpnum = str;
4699   skip_whitespace (fpnum);
4700 
4701   if (strncmp (fpnum, "0x", 2) == 0)
4702     return FAIL;
4703   else
4704     {
4705       for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4706         if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4707           {
4708             found_fpchar = 1;
4709             break;
4710           }
4711 
4712       if (!found_fpchar)
4713         return FAIL;
4714     }
4715 
4716   if ((str = atof_ieee (str, 's', words)) != NULL)
4717     {
4718       unsigned fpword = 0;
4719       int i;
4720 
4721       /* Our FP word must be 32 bits (single-precision FP).  */
4722       for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4723         {
4724           fpword <<= LITTLENUM_NUMBER_OF_BITS;
4725           fpword |= words[i];
4726         }
4727 
4728       if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4729         *immed = fpword;
4730       else
4731         return FAIL;
4732 
4733       *ccp = str;
4734 
4735       return SUCCESS;
4736     }
4737 
4738   return FAIL;
4739 }
4740 
4741 /* Shift operands.  */
4742 enum shift_kind
4743 {
4744   SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4745 };
4746 
4747 struct asm_shift_name
4748 {
4749   const char	  *name;
4750   enum shift_kind  kind;
4751 };
4752 
4753 /* Third argument to parse_shift.  */
4754 enum parse_shift_mode
4755 {
4756   NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
4757   SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
4758   SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
4759   SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
4760   SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
4761 };
4762 
4763 /* Parse a <shift> specifier on an ARM data processing instruction.
4764    This has three forms:
4765 
4766      (LSL|LSR|ASL|ASR|ROR) Rs
4767      (LSL|LSR|ASL|ASR|ROR) #imm
4768      RRX
4769 
4770    Note that ASL is assimilated to LSL in the instruction encoding, and
4771    RRX to ROR #0 (which cannot be written as such).  */
4772 
4773 static int
4774 parse_shift (char **str, int i, enum parse_shift_mode mode)
4775 {
4776   const struct asm_shift_name *shift_name;
4777   enum shift_kind shift;
4778   char *s = *str;
4779   char *p = s;
4780   int reg;
4781 
4782   for (p = *str; ISALPHA (*p); p++)
4783     ;
4784 
4785   if (p == *str)
4786     {
4787       inst.error = _("shift expression expected");
4788       return FAIL;
4789     }
4790 
4791   shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4792                                                             p - *str);
4793 
4794   if (shift_name == NULL)
4795     {
4796       inst.error = _("shift expression expected");
4797       return FAIL;
4798     }
4799 
4800   shift = shift_name->kind;
4801 
4802   switch (mode)
4803     {
4804     case NO_SHIFT_RESTRICT:
4805     case SHIFT_IMMEDIATE:   break;
4806 
4807     case SHIFT_LSL_OR_ASR_IMMEDIATE:
4808       if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4809 	{
4810 	  inst.error = _("'LSL' or 'ASR' required");
4811 	  return FAIL;
4812 	}
4813       break;
4814 
4815     case SHIFT_LSL_IMMEDIATE:
4816       if (shift != SHIFT_LSL)
4817 	{
4818 	  inst.error = _("'LSL' required");
4819 	  return FAIL;
4820 	}
4821       break;
4822 
4823     case SHIFT_ASR_IMMEDIATE:
4824       if (shift != SHIFT_ASR)
4825 	{
4826 	  inst.error = _("'ASR' required");
4827 	  return FAIL;
4828 	}
4829       break;
4830 
4831     default: abort ();
4832     }
4833 
4834   if (shift != SHIFT_RRX)
4835     {
4836       /* Whitespace can appear here if the next thing is a bare digit.	*/
4837       skip_whitespace (p);
4838 
4839       if (mode == NO_SHIFT_RESTRICT
4840 	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4841 	{
4842 	  inst.operands[i].imm = reg;
4843 	  inst.operands[i].immisreg = 1;
4844 	}
4845       else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4846 	return FAIL;
4847     }
4848   inst.operands[i].shift_kind = shift;
4849   inst.operands[i].shifted = 1;
4850   *str = p;
4851   return SUCCESS;
4852 }
4853 
4854 /* Parse a <shifter_operand> for an ARM data processing instruction:
4855 
4856       #<immediate>
4857       #<immediate>, <rotate>
4858       <Rm>
4859       <Rm>, <shift>
4860 
4861    where <shift> is defined by parse_shift above, and <rotate> is a
4862    multiple of 2 between 0 and 30.  Validation of immediate operands
4863    is deferred to md_apply_fix.  */
4864 
4865 static int
4866 parse_shifter_operand (char **str, int i)
4867 {
4868   int value;
4869   expressionS exp;
4870 
4871   if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4872     {
4873       inst.operands[i].reg = value;
4874       inst.operands[i].isreg = 1;
4875 
4876       /* parse_shift will override this if appropriate */
4877       inst.reloc.exp.X_op = O_constant;
4878       inst.reloc.exp.X_add_number = 0;
4879 
4880       if (skip_past_comma (str) == FAIL)
4881 	return SUCCESS;
4882 
4883       /* Shift operation on register.  */
4884       return parse_shift (str, i, NO_SHIFT_RESTRICT);
4885     }
4886 
4887   if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4888     return FAIL;
4889 
4890   if (skip_past_comma (str) == SUCCESS)
4891     {
4892       /* #x, y -- ie explicit rotation by Y.  */
4893       if (my_get_expression (&exp, str, GE_NO_PREFIX))
4894 	return FAIL;
4895 
4896       if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4897 	{
4898 	  inst.error = _("constant expression expected");
4899 	  return FAIL;
4900 	}
4901 
4902       value = exp.X_add_number;
4903       if (value < 0 || value > 30 || value % 2 != 0)
4904 	{
4905 	  inst.error = _("invalid rotation");
4906 	  return FAIL;
4907 	}
4908       if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4909 	{
4910 	  inst.error = _("invalid constant");
4911 	  return FAIL;
4912 	}
4913 
4914       /* Encode as specified.  */
4915       inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
4916       return SUCCESS;
4917     }
4918 
4919   inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4920   inst.reloc.pc_rel = 0;
4921   return SUCCESS;
4922 }
4923 
4924 /* Group relocation information.  Each entry in the table contains the
4925    textual name of the relocation as may appear in assembler source
4926    and must end with a colon.
4927    Along with this textual name are the relocation codes to be used if
4928    the corresponding instruction is an ALU instruction (ADD or SUB only),
4929    an LDR, an LDRS, or an LDC.  */
4930 
4931 struct group_reloc_table_entry
4932 {
4933   const char *name;
4934   int alu_code;
4935   int ldr_code;
4936   int ldrs_code;
4937   int ldc_code;
4938 };
4939 
4940 typedef enum
4941 {
4942   /* Varieties of non-ALU group relocation.  */
4943 
4944   GROUP_LDR,
4945   GROUP_LDRS,
4946   GROUP_LDC
4947 } group_reloc_type;
4948 
4949 static struct group_reloc_table_entry group_reloc_table[] =
4950   { /* Program counter relative: */
4951     { "pc_g0_nc",
4952       BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
4953       0,				/* LDR */
4954       0,				/* LDRS */
4955       0 },				/* LDC */
4956     { "pc_g0",
4957       BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
4958       BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
4959       BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
4960       BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
4961     { "pc_g1_nc",
4962       BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
4963       0,				/* LDR */
4964       0,				/* LDRS */
4965       0 },				/* LDC */
4966     { "pc_g1",
4967       BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
4968       BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
4969       BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
4970       BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
4971     { "pc_g2",
4972       BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
4973       BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
4974       BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
4975       BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
4976     /* Section base relative */
4977     { "sb_g0_nc",
4978       BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
4979       0,				/* LDR */
4980       0,				/* LDRS */
4981       0 },				/* LDC */
4982     { "sb_g0",
4983       BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
4984       BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
4985       BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
4986       BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
4987     { "sb_g1_nc",
4988       BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
4989       0,				/* LDR */
4990       0,				/* LDRS */
4991       0 },				/* LDC */
4992     { "sb_g1",
4993       BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
4994       BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
4995       BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
4996       BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
4997     { "sb_g2",
4998       BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
4999       BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
5000       BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
5001       BFD_RELOC_ARM_LDC_SB_G2 }	};	/* LDC */
5002 
5003 /* Given the address of a pointer pointing to the textual name of a group
5004    relocation as may appear in assembler source, attempt to find its details
5005    in group_reloc_table.  The pointer will be updated to the character after
5006    the trailing colon.  On failure, FAIL will be returned; SUCCESS
5007    otherwise.  On success, *entry will be updated to point at the relevant
5008    group_reloc_table entry. */
5009 
5010 static int
5011 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5012 {
5013   unsigned int i;
5014   for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5015     {
5016       int length = strlen (group_reloc_table[i].name);
5017 
5018       if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5019 	  && (*str)[length] == ':')
5020         {
5021           *out = &group_reloc_table[i];
5022           *str += (length + 1);
5023           return SUCCESS;
5024         }
5025     }
5026 
5027   return FAIL;
5028 }
5029 
5030 /* Parse a <shifter_operand> for an ARM data processing instruction
5031    (as for parse_shifter_operand) where group relocations are allowed:
5032 
5033       #<immediate>
5034       #<immediate>, <rotate>
5035       #:<group_reloc>:<expression>
5036       <Rm>
5037       <Rm>, <shift>
5038 
5039    where <group_reloc> is one of the strings defined in group_reloc_table.
5040    The hashes are optional.
5041 
5042    Everything else is as for parse_shifter_operand.  */
5043 
5044 static parse_operand_result
5045 parse_shifter_operand_group_reloc (char **str, int i)
5046 {
5047   /* Determine if we have the sequence of characters #: or just :
5048      coming next.  If we do, then we check for a group relocation.
5049      If we don't, punt the whole lot to parse_shifter_operand.  */
5050 
5051   if (((*str)[0] == '#' && (*str)[1] == ':')
5052       || (*str)[0] == ':')
5053     {
5054       struct group_reloc_table_entry *entry;
5055 
5056       if ((*str)[0] == '#')
5057         (*str) += 2;
5058       else
5059         (*str)++;
5060 
5061       /* Try to parse a group relocation.  Anything else is an error.  */
5062       if (find_group_reloc_table_entry (str, &entry) == FAIL)
5063         {
5064           inst.error = _("unknown group relocation");
5065           return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5066         }
5067 
5068       /* We now have the group relocation table entry corresponding to
5069          the name in the assembler source.  Next, we parse the expression.  */
5070       if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5071         return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5072 
5073       /* Record the relocation type (always the ALU variant here).  */
5074       inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5075       gas_assert (inst.reloc.type != 0);
5076 
5077       return PARSE_OPERAND_SUCCESS;
5078     }
5079   else
5080     return parse_shifter_operand (str, i) == SUCCESS
5081            ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5082 
5083   /* Never reached.  */
5084 }
5085 
5086 /* Parse a Neon alignment expression.  Information is written to
5087    inst.operands[i].  We assume the initial ':' has been skipped.
5088 
5089    align	.imm = align << 8, .immisalign=1, .preind=0  */
5090 static parse_operand_result
5091 parse_neon_alignment (char **str, int i)
5092 {
5093   char *p = *str;
5094   expressionS exp;
5095 
5096   my_get_expression (&exp, &p, GE_NO_PREFIX);
5097 
5098   if (exp.X_op != O_constant)
5099     {
5100       inst.error = _("alignment must be constant");
5101       return PARSE_OPERAND_FAIL;
5102     }
5103 
5104   inst.operands[i].imm = exp.X_add_number << 8;
5105   inst.operands[i].immisalign = 1;
5106   /* Alignments are not pre-indexes.  */
5107   inst.operands[i].preind = 0;
5108 
5109   *str = p;
5110   return PARSE_OPERAND_SUCCESS;
5111 }
5112 
5113 /* Parse all forms of an ARM address expression.  Information is written
5114    to inst.operands[i] and/or inst.reloc.
5115 
5116    Preindexed addressing (.preind=1):
5117 
5118    [Rn, #offset]       .reg=Rn .reloc.exp=offset
5119    [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5120    [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5121 		       .shift_kind=shift .reloc.exp=shift_imm
5122 
5123    These three may have a trailing ! which causes .writeback to be set also.
5124 
5125    Postindexed addressing (.postind=1, .writeback=1):
5126 
5127    [Rn], #offset       .reg=Rn .reloc.exp=offset
5128    [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5129    [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5130 		       .shift_kind=shift .reloc.exp=shift_imm
5131 
5132    Unindexed addressing (.preind=0, .postind=0):
5133 
5134    [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5135 
5136    Other:
5137 
5138    [Rn]{!}	       shorthand for [Rn,#0]{!}
5139    =immediate	       .isreg=0 .reloc.exp=immediate
5140    label	       .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5141 
5142   It is the caller's responsibility to check for addressing modes not
5143   supported by the instruction, and to set inst.reloc.type.  */
5144 
5145 static parse_operand_result
5146 parse_address_main (char **str, int i, int group_relocations,
5147                     group_reloc_type group_type)
5148 {
5149   char *p = *str;
5150   int reg;
5151 
5152   if (skip_past_char (&p, '[') == FAIL)
5153     {
5154       if (skip_past_char (&p, '=') == FAIL)
5155 	{
5156 	  /* Bare address - translate to PC-relative offset.  */
5157 	  inst.reloc.pc_rel = 1;
5158 	  inst.operands[i].reg = REG_PC;
5159 	  inst.operands[i].isreg = 1;
5160 	  inst.operands[i].preind = 1;
5161 	}
5162       /* Otherwise a load-constant pseudo op, no special treatment needed here.  */
5163 
5164       if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5165 	return PARSE_OPERAND_FAIL;
5166 
5167       *str = p;
5168       return PARSE_OPERAND_SUCCESS;
5169     }
5170 
5171   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5172     {
5173       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5174       return PARSE_OPERAND_FAIL;
5175     }
5176   inst.operands[i].reg = reg;
5177   inst.operands[i].isreg = 1;
5178 
5179   if (skip_past_comma (&p) == SUCCESS)
5180     {
5181       inst.operands[i].preind = 1;
5182 
5183       if (*p == '+') p++;
5184       else if (*p == '-') p++, inst.operands[i].negative = 1;
5185 
5186       if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5187 	{
5188 	  inst.operands[i].imm = reg;
5189 	  inst.operands[i].immisreg = 1;
5190 
5191 	  if (skip_past_comma (&p) == SUCCESS)
5192 	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5193 	      return PARSE_OPERAND_FAIL;
5194 	}
5195       else if (skip_past_char (&p, ':') == SUCCESS)
5196 	{
5197 	  /* FIXME: '@' should be used here, but it's filtered out by generic
5198 	     code before we get to see it here. This may be subject to
5199 	     change.  */
5200 	  parse_operand_result result = parse_neon_alignment (&p, i);
5201 
5202 	  if (result != PARSE_OPERAND_SUCCESS)
5203 	    return result;
5204 	}
5205       else
5206 	{
5207 	  if (inst.operands[i].negative)
5208 	    {
5209 	      inst.operands[i].negative = 0;
5210 	      p--;
5211 	    }
5212 
5213 	  if (group_relocations
5214 	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5215 	    {
5216 	      struct group_reloc_table_entry *entry;
5217 
5218               /* Skip over the #: or : sequence.  */
5219               if (*p == '#')
5220                 p += 2;
5221               else
5222                 p++;
5223 
5224 	      /* Try to parse a group relocation.  Anything else is an
5225                  error.  */
5226 	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5227 		{
5228 		  inst.error = _("unknown group relocation");
5229 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5230 		}
5231 
5232 	      /* We now have the group relocation table entry corresponding to
5233 		 the name in the assembler source.  Next, we parse the
5234                  expression.  */
5235 	      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5236 		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5237 
5238 	      /* Record the relocation type.  */
5239               switch (group_type)
5240                 {
5241                   case GROUP_LDR:
5242 	            inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5243                     break;
5244 
5245                   case GROUP_LDRS:
5246 	            inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5247                     break;
5248 
5249                   case GROUP_LDC:
5250 	            inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5251                     break;
5252 
5253                   default:
5254                     gas_assert (0);
5255                 }
5256 
5257               if (inst.reloc.type == 0)
5258 		{
5259 		  inst.error = _("this group relocation is not allowed on this instruction");
5260 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5261 		}
5262             }
5263           else
5264 	    {
5265 	      char *q = p;
5266 	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5267 		return PARSE_OPERAND_FAIL;
5268 	      /* If the offset is 0, find out if it's a +0 or -0.  */
5269 	      if (inst.reloc.exp.X_op == O_constant
5270 		  && inst.reloc.exp.X_add_number == 0)
5271 		{
5272 		  skip_whitespace (q);
5273 		  if (*q == '#')
5274 		    {
5275 		      q++;
5276 		      skip_whitespace (q);
5277 		    }
5278 		  if (*q == '-')
5279 		    inst.operands[i].negative = 1;
5280 		}
5281 	    }
5282 	}
5283     }
5284   else if (skip_past_char (&p, ':') == SUCCESS)
5285     {
5286       /* FIXME: '@' should be used here, but it's filtered out by generic code
5287 	 before we get to see it here. This may be subject to change.  */
5288       parse_operand_result result = parse_neon_alignment (&p, i);
5289 
5290       if (result != PARSE_OPERAND_SUCCESS)
5291 	return result;
5292     }
5293 
5294   if (skip_past_char (&p, ']') == FAIL)
5295     {
5296       inst.error = _("']' expected");
5297       return PARSE_OPERAND_FAIL;
5298     }
5299 
5300   if (skip_past_char (&p, '!') == SUCCESS)
5301     inst.operands[i].writeback = 1;
5302 
5303   else if (skip_past_comma (&p) == SUCCESS)
5304     {
5305       if (skip_past_char (&p, '{') == SUCCESS)
5306 	{
5307 	  /* [Rn], {expr} - unindexed, with option */
5308 	  if (parse_immediate (&p, &inst.operands[i].imm,
5309 			       0, 255, TRUE) == FAIL)
5310 	    return PARSE_OPERAND_FAIL;
5311 
5312 	  if (skip_past_char (&p, '}') == FAIL)
5313 	    {
5314 	      inst.error = _("'}' expected at end of 'option' field");
5315 	      return PARSE_OPERAND_FAIL;
5316 	    }
5317 	  if (inst.operands[i].preind)
5318 	    {
5319 	      inst.error = _("cannot combine index with option");
5320 	      return PARSE_OPERAND_FAIL;
5321 	    }
5322 	  *str = p;
5323 	  return PARSE_OPERAND_SUCCESS;
5324 	}
5325       else
5326 	{
5327 	  inst.operands[i].postind = 1;
5328 	  inst.operands[i].writeback = 1;
5329 
5330 	  if (inst.operands[i].preind)
5331 	    {
5332 	      inst.error = _("cannot combine pre- and post-indexing");
5333 	      return PARSE_OPERAND_FAIL;
5334 	    }
5335 
5336 	  if (*p == '+') p++;
5337 	  else if (*p == '-') p++, inst.operands[i].negative = 1;
5338 
5339 	  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5340 	    {
5341               /* We might be using the immediate for alignment already. If we
5342                  are, OR the register number into the low-order bits.  */
5343               if (inst.operands[i].immisalign)
5344 	        inst.operands[i].imm |= reg;
5345               else
5346                 inst.operands[i].imm = reg;
5347 	      inst.operands[i].immisreg = 1;
5348 
5349 	      if (skip_past_comma (&p) == SUCCESS)
5350 		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5351 		  return PARSE_OPERAND_FAIL;
5352 	    }
5353 	  else
5354 	    {
5355 	      char *q = p;
5356 	      if (inst.operands[i].negative)
5357 		{
5358 		  inst.operands[i].negative = 0;
5359 		  p--;
5360 		}
5361 	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5362 		return PARSE_OPERAND_FAIL;
5363 	      /* If the offset is 0, find out if it's a +0 or -0.  */
5364 	      if (inst.reloc.exp.X_op == O_constant
5365 		  && inst.reloc.exp.X_add_number == 0)
5366 		{
5367 		  skip_whitespace (q);
5368 		  if (*q == '#')
5369 		    {
5370 		      q++;
5371 		      skip_whitespace (q);
5372 		    }
5373 		  if (*q == '-')
5374 		    inst.operands[i].negative = 1;
5375 		}
5376 	    }
5377 	}
5378     }
5379 
5380   /* If at this point neither .preind nor .postind is set, we have a
5381      bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
5382   if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5383     {
5384       inst.operands[i].preind = 1;
5385       inst.reloc.exp.X_op = O_constant;
5386       inst.reloc.exp.X_add_number = 0;
5387     }
5388   *str = p;
5389   return PARSE_OPERAND_SUCCESS;
5390 }
5391 
5392 static int
5393 parse_address (char **str, int i)
5394 {
5395   return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5396          ? SUCCESS : FAIL;
5397 }
5398 
5399 static parse_operand_result
5400 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5401 {
5402   return parse_address_main (str, i, 1, type);
5403 }
5404 
5405 /* Parse an operand for a MOVW or MOVT instruction.  */
5406 static int
5407 parse_half (char **str)
5408 {
5409   char * p;
5410 
5411   p = *str;
5412   skip_past_char (&p, '#');
5413   if (strncasecmp (p, ":lower16:", 9) == 0)
5414     inst.reloc.type = BFD_RELOC_ARM_MOVW;
5415   else if (strncasecmp (p, ":upper16:", 9) == 0)
5416     inst.reloc.type = BFD_RELOC_ARM_MOVT;
5417 
5418   if (inst.reloc.type != BFD_RELOC_UNUSED)
5419     {
5420       p += 9;
5421       skip_whitespace (p);
5422     }
5423 
5424   if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5425     return FAIL;
5426 
5427   if (inst.reloc.type == BFD_RELOC_UNUSED)
5428     {
5429       if (inst.reloc.exp.X_op != O_constant)
5430 	{
5431 	  inst.error = _("constant expression expected");
5432 	  return FAIL;
5433 	}
5434       if (inst.reloc.exp.X_add_number < 0
5435 	  || inst.reloc.exp.X_add_number > 0xffff)
5436 	{
5437 	  inst.error = _("immediate value out of range");
5438 	  return FAIL;
5439 	}
5440     }
5441   *str = p;
5442   return SUCCESS;
5443 }
5444 
5445 /* Miscellaneous. */
5446 
5447 /* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
5448    or a bitmask suitable to be or-ed into the ARM msr instruction.  */
5449 static int
5450 parse_psr (char **str, bfd_boolean lhs)
5451 {
5452   char *p;
5453   unsigned long psr_field;
5454   const struct asm_psr *psr;
5455   char *start;
5456   bfd_boolean is_apsr = FALSE;
5457   bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5458 
5459   /* PR gas/12698:  If the user has specified -march=all then m_profile will
5460      be TRUE, but we want to ignore it in this case as we are building for any
5461      CPU type, including non-m variants.  */
5462   if (selected_cpu.core == arm_arch_any.core)
5463     m_profile = FALSE;
5464 
5465   /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
5466      feature for ease of use and backwards compatibility.  */
5467   p = *str;
5468   if (strncasecmp (p, "SPSR", 4) == 0)
5469     {
5470       if (m_profile)
5471 	goto unsupported_psr;
5472 
5473       psr_field = SPSR_BIT;
5474     }
5475   else if (strncasecmp (p, "CPSR", 4) == 0)
5476     {
5477       if (m_profile)
5478 	goto unsupported_psr;
5479 
5480       psr_field = 0;
5481     }
5482   else if (strncasecmp (p, "APSR", 4) == 0)
5483     {
5484       /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5485 	 and ARMv7-R architecture CPUs.  */
5486       is_apsr = TRUE;
5487       psr_field = 0;
5488     }
5489   else if (m_profile)
5490     {
5491       start = p;
5492       do
5493 	p++;
5494       while (ISALNUM (*p) || *p == '_');
5495 
5496       if (strncasecmp (start, "iapsr", 5) == 0
5497 	  || strncasecmp (start, "eapsr", 5) == 0
5498 	  || strncasecmp (start, "xpsr", 4) == 0
5499 	  || strncasecmp (start, "psr", 3) == 0)
5500 	p = start + strcspn (start, "rR") + 1;
5501 
5502       psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5503                                                   p - start);
5504 
5505       if (!psr)
5506 	return FAIL;
5507 
5508       /* If APSR is being written, a bitfield may be specified.  Note that
5509 	 APSR itself is handled above.  */
5510       if (psr->field <= 3)
5511 	{
5512 	  psr_field = psr->field;
5513 	  is_apsr = TRUE;
5514 	  goto check_suffix;
5515 	}
5516 
5517       *str = p;
5518       /* M-profile MSR instructions have the mask field set to "10", except
5519 	 *PSR variants which modify APSR, which may use a different mask (and
5520 	 have been handled already).  Do that by setting the PSR_f field
5521 	 here.  */
5522       return psr->field | (lhs ? PSR_f : 0);
5523     }
5524   else
5525     goto unsupported_psr;
5526 
5527   p += 4;
5528 check_suffix:
5529   if (*p == '_')
5530     {
5531       /* A suffix follows.  */
5532       p++;
5533       start = p;
5534 
5535       do
5536 	p++;
5537       while (ISALNUM (*p) || *p == '_');
5538 
5539       if (is_apsr)
5540 	{
5541 	  /* APSR uses a notation for bits, rather than fields.  */
5542 	  unsigned int nzcvq_bits = 0;
5543 	  unsigned int g_bit = 0;
5544 	  char *bit;
5545 
5546 	  for (bit = start; bit != p; bit++)
5547 	    {
5548 	      switch (TOLOWER (*bit))
5549 	        {
5550 		case 'n':
5551 		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5552 		  break;
5553 
5554 		case 'z':
5555 		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5556 		  break;
5557 
5558 		case 'c':
5559 		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5560 		  break;
5561 
5562 		case 'v':
5563 		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5564 		  break;
5565 
5566 		case 'q':
5567 		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5568 		  break;
5569 
5570 		case 'g':
5571 		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5572 		  break;
5573 
5574 		default:
5575 		  inst.error = _("unexpected bit specified after APSR");
5576 		  return FAIL;
5577 		}
5578 	    }
5579 
5580 	  if (nzcvq_bits == 0x1f)
5581 	    psr_field |= PSR_f;
5582 
5583 	  if (g_bit == 0x1)
5584 	    {
5585 	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5586 	        {
5587 		  inst.error = _("selected processor does not "
5588 				 "support DSP extension");
5589 		  return FAIL;
5590 		}
5591 
5592 	      psr_field |= PSR_s;
5593 	    }
5594 
5595 	  if ((nzcvq_bits & 0x20) != 0
5596 	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5597 	      || (g_bit & 0x2) != 0)
5598 	    {
5599 	      inst.error = _("bad bitmask specified after APSR");
5600 	      return FAIL;
5601 	    }
5602 	}
5603       else
5604         {
5605 	  psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5606                                                       p - start);
5607 	  if (!psr)
5608             goto error;
5609 
5610 	  psr_field |= psr->field;
5611 	}
5612     }
5613   else
5614     {
5615       if (ISALNUM (*p))
5616 	goto error;    /* Garbage after "[CS]PSR".  */
5617 
5618       /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
5619          is deprecated, but allow it anyway.  */
5620       if (is_apsr && lhs)
5621 	{
5622 	  psr_field |= PSR_f;
5623 	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
5624 		       "deprecated"));
5625 	}
5626       else if (!m_profile)
5627 	/* These bits are never right for M-profile devices: don't set them
5628 	   (only code paths which read/write APSR reach here).  */
5629 	psr_field |= (PSR_c | PSR_f);
5630     }
5631   *str = p;
5632   return psr_field;
5633 
5634  unsupported_psr:
5635   inst.error = _("selected processor does not support requested special "
5636 		 "purpose register");
5637   return FAIL;
5638 
5639  error:
5640   inst.error = _("flag for {c}psr instruction expected");
5641   return FAIL;
5642 }
5643 
5644 /* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
5645    value suitable for splatting into the AIF field of the instruction.	*/
5646 
5647 static int
5648 parse_cps_flags (char **str)
5649 {
5650   int val = 0;
5651   int saw_a_flag = 0;
5652   char *s = *str;
5653 
5654   for (;;)
5655     switch (*s++)
5656       {
5657       case '\0': case ',':
5658 	goto done;
5659 
5660       case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5661       case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5662       case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5663 
5664       default:
5665 	inst.error = _("unrecognized CPS flag");
5666 	return FAIL;
5667       }
5668 
5669  done:
5670   if (saw_a_flag == 0)
5671     {
5672       inst.error = _("missing CPS flags");
5673       return FAIL;
5674     }
5675 
5676   *str = s - 1;
5677   return val;
5678 }
5679 
5680 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5681    returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
5682 
5683 static int
5684 parse_endian_specifier (char **str)
5685 {
5686   int little_endian;
5687   char *s = *str;
5688 
5689   if (strncasecmp (s, "BE", 2))
5690     little_endian = 0;
5691   else if (strncasecmp (s, "LE", 2))
5692     little_endian = 1;
5693   else
5694     {
5695       inst.error = _("valid endian specifiers are be or le");
5696       return FAIL;
5697     }
5698 
5699   if (ISALNUM (s[2]) || s[2] == '_')
5700     {
5701       inst.error = _("valid endian specifiers are be or le");
5702       return FAIL;
5703     }
5704 
5705   *str = s + 2;
5706   return little_endian;
5707 }
5708 
5709 /* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
5710    value suitable for poking into the rotate field of an sxt or sxta
5711    instruction, or FAIL on error.  */
5712 
5713 static int
5714 parse_ror (char **str)
5715 {
5716   int rot;
5717   char *s = *str;
5718 
5719   if (strncasecmp (s, "ROR", 3) == 0)
5720     s += 3;
5721   else
5722     {
5723       inst.error = _("missing rotation field after comma");
5724       return FAIL;
5725     }
5726 
5727   if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5728     return FAIL;
5729 
5730   switch (rot)
5731     {
5732     case  0: *str = s; return 0x0;
5733     case  8: *str = s; return 0x1;
5734     case 16: *str = s; return 0x2;
5735     case 24: *str = s; return 0x3;
5736 
5737     default:
5738       inst.error = _("rotation can only be 0, 8, 16, or 24");
5739       return FAIL;
5740     }
5741 }
5742 
5743 /* Parse a conditional code (from conds[] below).  The value returned is in the
5744    range 0 .. 14, or FAIL.  */
5745 static int
5746 parse_cond (char **str)
5747 {
5748   char *q;
5749   const struct asm_cond *c;
5750   int n;
5751   /* Condition codes are always 2 characters, so matching up to
5752      3 characters is sufficient.  */
5753   char cond[3];
5754 
5755   q = *str;
5756   n = 0;
5757   while (ISALPHA (*q) && n < 3)
5758     {
5759       cond[n] = TOLOWER (*q);
5760       q++;
5761       n++;
5762     }
5763 
5764   c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5765   if (!c)
5766     {
5767       inst.error = _("condition required");
5768       return FAIL;
5769     }
5770 
5771   *str = q;
5772   return c->value;
5773 }
5774 
5775 /* If the given feature available in the selected CPU, mark it as used.
5776    Returns TRUE iff feature is available.  */
5777 static bfd_boolean
5778 mark_feature_used (const arm_feature_set *feature)
5779 {
5780   /* Ensure the option is valid on the current architecture.  */
5781   if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
5782     return FALSE;
5783 
5784   /* Add the appropriate architecture feature for the barrier option used.
5785      */
5786   if (thumb_mode)
5787     ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
5788   else
5789     ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
5790 
5791   return TRUE;
5792 }
5793 
5794 /* Parse an option for a barrier instruction.  Returns the encoding for the
5795    option, or FAIL.  */
5796 static int
5797 parse_barrier (char **str)
5798 {
5799   char *p, *q;
5800   const struct asm_barrier_opt *o;
5801 
5802   p = q = *str;
5803   while (ISALPHA (*q))
5804     q++;
5805 
5806   o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5807                                                     q - p);
5808   if (!o)
5809     return FAIL;
5810 
5811   if (!mark_feature_used (&o->arch))
5812     return FAIL;
5813 
5814   *str = q;
5815   return o->value;
5816 }
5817 
5818 /* Parse the operands of a table branch instruction.  Similar to a memory
5819    operand.  */
5820 static int
5821 parse_tb (char **str)
5822 {
5823   char * p = *str;
5824   int reg;
5825 
5826   if (skip_past_char (&p, '[') == FAIL)
5827     {
5828       inst.error = _("'[' expected");
5829       return FAIL;
5830     }
5831 
5832   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5833     {
5834       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5835       return FAIL;
5836     }
5837   inst.operands[0].reg = reg;
5838 
5839   if (skip_past_comma (&p) == FAIL)
5840     {
5841       inst.error = _("',' expected");
5842       return FAIL;
5843     }
5844 
5845   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5846     {
5847       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5848       return FAIL;
5849     }
5850   inst.operands[0].imm = reg;
5851 
5852   if (skip_past_comma (&p) == SUCCESS)
5853     {
5854       if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5855 	return FAIL;
5856       if (inst.reloc.exp.X_add_number != 1)
5857 	{
5858 	  inst.error = _("invalid shift");
5859 	  return FAIL;
5860 	}
5861       inst.operands[0].shifted = 1;
5862     }
5863 
5864   if (skip_past_char (&p, ']') == FAIL)
5865     {
5866       inst.error = _("']' expected");
5867       return FAIL;
5868     }
5869   *str = p;
5870   return SUCCESS;
5871 }
5872 
5873 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5874    information on the types the operands can take and how they are encoded.
5875    Up to four operands may be read; this function handles setting the
5876    ".present" field for each read operand itself.
5877    Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5878    else returns FAIL.  */
5879 
5880 static int
5881 parse_neon_mov (char **str, int *which_operand)
5882 {
5883   int i = *which_operand, val;
5884   enum arm_reg_type rtype;
5885   char *ptr = *str;
5886   struct neon_type_el optype;
5887 
5888   if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5889     {
5890       /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
5891       inst.operands[i].reg = val;
5892       inst.operands[i].isscalar = 1;
5893       inst.operands[i].vectype = optype;
5894       inst.operands[i++].present = 1;
5895 
5896       if (skip_past_comma (&ptr) == FAIL)
5897         goto wanted_comma;
5898 
5899       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5900         goto wanted_arm;
5901 
5902       inst.operands[i].reg = val;
5903       inst.operands[i].isreg = 1;
5904       inst.operands[i].present = 1;
5905     }
5906   else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5907            != FAIL)
5908     {
5909       /* Cases 0, 1, 2, 3, 5 (D only).  */
5910       if (skip_past_comma (&ptr) == FAIL)
5911         goto wanted_comma;
5912 
5913       inst.operands[i].reg = val;
5914       inst.operands[i].isreg = 1;
5915       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5916       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5917       inst.operands[i].isvec = 1;
5918       inst.operands[i].vectype = optype;
5919       inst.operands[i++].present = 1;
5920 
5921       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5922         {
5923           /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5924              Case 13: VMOV <Sd>, <Rm>  */
5925           inst.operands[i].reg = val;
5926           inst.operands[i].isreg = 1;
5927           inst.operands[i].present = 1;
5928 
5929           if (rtype == REG_TYPE_NQ)
5930             {
5931               first_error (_("can't use Neon quad register here"));
5932               return FAIL;
5933             }
5934           else if (rtype != REG_TYPE_VFS)
5935             {
5936               i++;
5937               if (skip_past_comma (&ptr) == FAIL)
5938                 goto wanted_comma;
5939               if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5940                 goto wanted_arm;
5941               inst.operands[i].reg = val;
5942               inst.operands[i].isreg = 1;
5943               inst.operands[i].present = 1;
5944             }
5945         }
5946       else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5947                                            &optype)) != FAIL)
5948         {
5949           /* Case 0: VMOV<c><q> <Qd>, <Qm>
5950              Case 1: VMOV<c><q> <Dd>, <Dm>
5951              Case 8: VMOV.F32 <Sd>, <Sm>
5952              Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
5953 
5954           inst.operands[i].reg = val;
5955           inst.operands[i].isreg = 1;
5956           inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5957           inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5958           inst.operands[i].isvec = 1;
5959           inst.operands[i].vectype = optype;
5960           inst.operands[i].present = 1;
5961 
5962           if (skip_past_comma (&ptr) == SUCCESS)
5963             {
5964               /* Case 15.  */
5965               i++;
5966 
5967               if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5968                 goto wanted_arm;
5969 
5970               inst.operands[i].reg = val;
5971               inst.operands[i].isreg = 1;
5972               inst.operands[i++].present = 1;
5973 
5974               if (skip_past_comma (&ptr) == FAIL)
5975                 goto wanted_comma;
5976 
5977               if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5978                 goto wanted_arm;
5979 
5980               inst.operands[i].reg = val;
5981               inst.operands[i].isreg = 1;
5982               inst.operands[i].present = 1;
5983             }
5984         }
5985       else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5986           /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5987              Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5988              Case 10: VMOV.F32 <Sd>, #<imm>
5989              Case 11: VMOV.F64 <Dd>, #<imm>  */
5990         inst.operands[i].immisfloat = 1;
5991       else if (parse_big_immediate (&ptr, i) == SUCCESS)
5992           /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5993              Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
5994         ;
5995       else
5996         {
5997           first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5998           return FAIL;
5999         }
6000     }
6001   else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6002     {
6003       /* Cases 6, 7.  */
6004       inst.operands[i].reg = val;
6005       inst.operands[i].isreg = 1;
6006       inst.operands[i++].present = 1;
6007 
6008       if (skip_past_comma (&ptr) == FAIL)
6009         goto wanted_comma;
6010 
6011       if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6012         {
6013           /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
6014           inst.operands[i].reg = val;
6015           inst.operands[i].isscalar = 1;
6016           inst.operands[i].present = 1;
6017           inst.operands[i].vectype = optype;
6018         }
6019       else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6020         {
6021           /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
6022           inst.operands[i].reg = val;
6023           inst.operands[i].isreg = 1;
6024           inst.operands[i++].present = 1;
6025 
6026           if (skip_past_comma (&ptr) == FAIL)
6027             goto wanted_comma;
6028 
6029           if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6030               == FAIL)
6031             {
6032               first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6033               return FAIL;
6034             }
6035 
6036           inst.operands[i].reg = val;
6037           inst.operands[i].isreg = 1;
6038           inst.operands[i].isvec = 1;
6039           inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6040           inst.operands[i].vectype = optype;
6041           inst.operands[i].present = 1;
6042 
6043           if (rtype == REG_TYPE_VFS)
6044             {
6045               /* Case 14.  */
6046               i++;
6047               if (skip_past_comma (&ptr) == FAIL)
6048                 goto wanted_comma;
6049               if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6050                                               &optype)) == FAIL)
6051                 {
6052                   first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6053                   return FAIL;
6054                 }
6055               inst.operands[i].reg = val;
6056               inst.operands[i].isreg = 1;
6057               inst.operands[i].isvec = 1;
6058               inst.operands[i].issingle = 1;
6059               inst.operands[i].vectype = optype;
6060               inst.operands[i].present = 1;
6061             }
6062         }
6063       else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6064                != FAIL)
6065         {
6066           /* Case 13.  */
6067           inst.operands[i].reg = val;
6068           inst.operands[i].isreg = 1;
6069           inst.operands[i].isvec = 1;
6070           inst.operands[i].issingle = 1;
6071           inst.operands[i].vectype = optype;
6072           inst.operands[i].present = 1;
6073         }
6074     }
6075   else
6076     {
6077       first_error (_("parse error"));
6078       return FAIL;
6079     }
6080 
6081   /* Successfully parsed the operands. Update args.  */
6082   *which_operand = i;
6083   *str = ptr;
6084   return SUCCESS;
6085 
6086  wanted_comma:
6087   first_error (_("expected comma"));
6088   return FAIL;
6089 
6090  wanted_arm:
6091   first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6092   return FAIL;
6093 }
6094 
6095 /* Use this macro when the operand constraints are different
6096    for ARM and THUMB (e.g. ldrd).  */
6097 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6098 	((arm_operand) | ((thumb_operand) << 16))
6099 
6100 /* Matcher codes for parse_operands.  */
6101 enum operand_parse_code
6102 {
6103   OP_stop,	/* end of line */
6104 
6105   OP_RR,	/* ARM register */
6106   OP_RRnpc,	/* ARM register, not r15 */
6107   OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6108   OP_RRnpcb,	/* ARM register, not r15, in square brackets */
6109   OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
6110 		   optional trailing ! */
6111   OP_RRw,	/* ARM register, not r15, optional trailing ! */
6112   OP_RCP,	/* Coprocessor number */
6113   OP_RCN,	/* Coprocessor register */
6114   OP_RF,	/* FPA register */
6115   OP_RVS,	/* VFP single precision register */
6116   OP_RVD,	/* VFP double precision register (0..15) */
6117   OP_RND,       /* Neon double precision register (0..31) */
6118   OP_RNQ,	/* Neon quad precision register */
6119   OP_RVSD,	/* VFP single or double precision register */
6120   OP_RNDQ,      /* Neon double or quad precision register */
6121   OP_RNSDQ,	/* Neon single, double or quad precision register */
6122   OP_RNSC,      /* Neon scalar D[X] */
6123   OP_RVC,	/* VFP control register */
6124   OP_RMF,	/* Maverick F register */
6125   OP_RMD,	/* Maverick D register */
6126   OP_RMFX,	/* Maverick FX register */
6127   OP_RMDX,	/* Maverick DX register */
6128   OP_RMAX,	/* Maverick AX register */
6129   OP_RMDS,	/* Maverick DSPSC register */
6130   OP_RIWR,	/* iWMMXt wR register */
6131   OP_RIWC,	/* iWMMXt wC register */
6132   OP_RIWG,	/* iWMMXt wCG register */
6133   OP_RXA,	/* XScale accumulator register */
6134 
6135   OP_REGLST,	/* ARM register list */
6136   OP_VRSLST,	/* VFP single-precision register list */
6137   OP_VRDLST,	/* VFP double-precision register list */
6138   OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
6139   OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
6140   OP_NSTRLST,   /* Neon element/structure list */
6141 
6142   OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
6143   OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
6144   OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
6145   OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
6146   OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
6147   OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
6148   OP_VMOV,      /* Neon VMOV operands.  */
6149   OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
6150   OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
6151   OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
6152 
6153   OP_I0,        /* immediate zero */
6154   OP_I7,	/* immediate value 0 .. 7 */
6155   OP_I15,	/*		   0 .. 15 */
6156   OP_I16,	/*		   1 .. 16 */
6157   OP_I16z,      /*                 0 .. 16 */
6158   OP_I31,	/*		   0 .. 31 */
6159   OP_I31w,	/*		   0 .. 31, optional trailing ! */
6160   OP_I32,	/*		   1 .. 32 */
6161   OP_I32z,	/*		   0 .. 32 */
6162   OP_I63,	/*		   0 .. 63 */
6163   OP_I63s,	/*		 -64 .. 63 */
6164   OP_I64,	/*		   1 .. 64 */
6165   OP_I64z,	/*		   0 .. 64 */
6166   OP_I255,	/*		   0 .. 255 */
6167 
6168   OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
6169   OP_I7b,	/*			       0 .. 7 */
6170   OP_I15b,	/*			       0 .. 15 */
6171   OP_I31b,	/*			       0 .. 31 */
6172 
6173   OP_SH,	/* shifter operand */
6174   OP_SHG,	/* shifter operand with possible group relocation */
6175   OP_ADDR,	/* Memory address expression (any mode) */
6176   OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
6177   OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6178   OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
6179   OP_EXP,	/* arbitrary expression */
6180   OP_EXPi,	/* same, with optional immediate prefix */
6181   OP_EXPr,	/* same, with optional relocation suffix */
6182   OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
6183 
6184   OP_CPSF,	/* CPS flags */
6185   OP_ENDI,	/* Endianness specifier */
6186   OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
6187   OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
6188   OP_COND,	/* conditional code */
6189   OP_TB,	/* Table branch.  */
6190 
6191   OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
6192 
6193   OP_RRnpc_I0,	/* ARM register or literal 0 */
6194   OP_RR_EXr,	/* ARM register or expression with opt. reloc suff. */
6195   OP_RR_EXi,	/* ARM register or expression with imm prefix */
6196   OP_RF_IF,	/* FPA register or immediate */
6197   OP_RIWR_RIWC, /* iWMMXt R or C reg */
6198   OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6199 
6200   /* Optional operands.	 */
6201   OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
6202   OP_oI31b,	 /*				0 .. 31 */
6203   OP_oI32b,      /*                             1 .. 32 */
6204   OP_oI32z,      /*                             0 .. 32 */
6205   OP_oIffffb,	 /*				0 .. 65535 */
6206   OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
6207 
6208   OP_oRR,	 /* ARM register */
6209   OP_oRRnpc,	 /* ARM register, not the PC */
6210   OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6211   OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
6212   OP_oRND,       /* Optional Neon double precision register */
6213   OP_oRNQ,       /* Optional Neon quad precision register */
6214   OP_oRNDQ,      /* Optional Neon double or quad precision register */
6215   OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
6216   OP_oSHll,	 /* LSL immediate */
6217   OP_oSHar,	 /* ASR immediate */
6218   OP_oSHllar,	 /* LSL or ASR immediate */
6219   OP_oROR,	 /* ROR 0/8/16/24 */
6220   OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
6221 
6222   /* Some pre-defined mixed (ARM/THUMB) operands.  */
6223   OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6224   OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6225   OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6226 
6227   OP_FIRST_OPTIONAL = OP_oI7b
6228 };
6229 
6230 /* Generic instruction operand parser.	This does no encoding and no
6231    semantic validation; it merely squirrels values away in the inst
6232    structure.  Returns SUCCESS or FAIL depending on whether the
6233    specified grammar matched.  */
6234 static int
6235 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6236 {
6237   unsigned const int *upat = pattern;
6238   char *backtrack_pos = 0;
6239   const char *backtrack_error = 0;
6240   int i, val = 0, backtrack_index = 0;
6241   enum arm_reg_type rtype;
6242   parse_operand_result result;
6243   unsigned int op_parse_code;
6244 
6245 #define po_char_or_fail(chr)			\
6246   do						\
6247     {						\
6248       if (skip_past_char (&str, chr) == FAIL)	\
6249         goto bad_args;				\
6250     }						\
6251   while (0)
6252 
6253 #define po_reg_or_fail(regtype)					\
6254   do								\
6255     {								\
6256       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
6257   			         & inst.operands[i].vectype);	\
6258       if (val == FAIL)						\
6259         {							\
6260           first_error (_(reg_expected_msgs[regtype]));		\
6261           goto failure;						\
6262         }							\
6263       inst.operands[i].reg = val;				\
6264       inst.operands[i].isreg = 1;				\
6265       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
6266       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
6267       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
6268                              || rtype == REG_TYPE_VFD		\
6269                              || rtype == REG_TYPE_NQ);		\
6270     }								\
6271   while (0)
6272 
6273 #define po_reg_or_goto(regtype, label)				\
6274   do								\
6275     {								\
6276       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
6277 				 & inst.operands[i].vectype);	\
6278       if (val == FAIL)						\
6279 	goto label;						\
6280 								\
6281       inst.operands[i].reg = val;				\
6282       inst.operands[i].isreg = 1;				\
6283       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
6284       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
6285       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
6286                              || rtype == REG_TYPE_VFD		\
6287 			     || rtype == REG_TYPE_NQ);		\
6288     }								\
6289   while (0)
6290 
6291 #define po_imm_or_fail(min, max, popt)				\
6292   do								\
6293     {								\
6294       if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
6295 	goto failure;						\
6296       inst.operands[i].imm = val;				\
6297     }								\
6298   while (0)
6299 
6300 #define po_scalar_or_goto(elsz, label)					\
6301   do									\
6302     {									\
6303       val = parse_scalar (& str, elsz, & inst.operands[i].vectype);	\
6304       if (val == FAIL)							\
6305 	goto label;							\
6306       inst.operands[i].reg = val;					\
6307       inst.operands[i].isscalar = 1;					\
6308     }									\
6309   while (0)
6310 
6311 #define po_misc_or_fail(expr)			\
6312   do						\
6313     {						\
6314       if (expr)					\
6315 	goto failure;				\
6316     }						\
6317   while (0)
6318 
6319 #define po_misc_or_fail_no_backtrack(expr)		\
6320   do							\
6321     {							\
6322       result = expr;					\
6323       if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
6324 	backtrack_pos = 0;				\
6325       if (result != PARSE_OPERAND_SUCCESS)		\
6326 	goto failure;					\
6327     }							\
6328   while (0)
6329 
6330 #define po_barrier_or_imm(str)				   \
6331   do							   \
6332     {						 	   \
6333       val = parse_barrier (&str);			   \
6334       if (val == FAIL)					   \
6335 	{						   \
6336 	  if (ISALPHA (*str))				   \
6337 	      goto failure;				   \
6338 	  else						   \
6339 	      goto immediate;				   \
6340 	}						   \
6341       else						   \
6342 	{						   \
6343 	  if ((inst.instruction & 0xf0) == 0x60		   \
6344 	      && val != 0xf)				   \
6345 	    {						   \
6346 	       /* ISB can only take SY as an option.  */   \
6347 	       inst.error = _("invalid barrier type");	   \
6348 	       goto failure;				   \
6349 	    }						   \
6350 	}						   \
6351     }							   \
6352   while (0)
6353 
6354   skip_whitespace (str);
6355 
6356   for (i = 0; upat[i] != OP_stop; i++)
6357     {
6358       op_parse_code = upat[i];
6359       if (op_parse_code >= 1<<16)
6360 	op_parse_code = thumb ? (op_parse_code >> 16)
6361 				: (op_parse_code & ((1<<16)-1));
6362 
6363       if (op_parse_code >= OP_FIRST_OPTIONAL)
6364 	{
6365 	  /* Remember where we are in case we need to backtrack.  */
6366 	  gas_assert (!backtrack_pos);
6367 	  backtrack_pos = str;
6368 	  backtrack_error = inst.error;
6369 	  backtrack_index = i;
6370 	}
6371 
6372       if (i > 0 && (i > 1 || inst.operands[0].present))
6373 	po_char_or_fail (',');
6374 
6375       switch (op_parse_code)
6376 	{
6377 	  /* Registers */
6378 	case OP_oRRnpc:
6379 	case OP_oRRnpcsp:
6380 	case OP_RRnpc:
6381 	case OP_RRnpcsp:
6382 	case OP_oRR:
6383 	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
6384 	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
6385 	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
6386 	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
6387 	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
6388 	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
6389         case OP_oRND:
6390 	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
6391 	case OP_RVC:
6392 	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6393 	  break;
6394 	  /* Also accept generic coprocessor regs for unknown registers.  */
6395 	  coproc_reg:
6396 	  po_reg_or_fail (REG_TYPE_CN);
6397 	  break;
6398 	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
6399 	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
6400 	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
6401 	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
6402 	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
6403 	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
6404 	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
6405 	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
6406 	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
6407 	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
6408         case OP_oRNQ:
6409 	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
6410         case OP_oRNDQ:
6411 	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
6412         case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
6413         case OP_oRNSDQ:
6414         case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
6415 
6416         /* Neon scalar. Using an element size of 8 means that some invalid
6417            scalars are accepted here, so deal with those in later code.  */
6418         case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
6419 
6420         case OP_RNDQ_I0:
6421           {
6422             po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6423             break;
6424             try_imm0:
6425             po_imm_or_fail (0, 0, TRUE);
6426           }
6427           break;
6428 
6429         case OP_RVSD_I0:
6430           po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6431           break;
6432 
6433         case OP_RR_RNSC:
6434           {
6435             po_scalar_or_goto (8, try_rr);
6436             break;
6437             try_rr:
6438             po_reg_or_fail (REG_TYPE_RN);
6439           }
6440           break;
6441 
6442         case OP_RNSDQ_RNSC:
6443           {
6444             po_scalar_or_goto (8, try_nsdq);
6445             break;
6446             try_nsdq:
6447             po_reg_or_fail (REG_TYPE_NSDQ);
6448           }
6449           break;
6450 
6451         case OP_RNDQ_RNSC:
6452           {
6453             po_scalar_or_goto (8, try_ndq);
6454             break;
6455             try_ndq:
6456             po_reg_or_fail (REG_TYPE_NDQ);
6457           }
6458           break;
6459 
6460         case OP_RND_RNSC:
6461           {
6462             po_scalar_or_goto (8, try_vfd);
6463             break;
6464             try_vfd:
6465             po_reg_or_fail (REG_TYPE_VFD);
6466           }
6467           break;
6468 
6469         case OP_VMOV:
6470           /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6471              not careful then bad things might happen.  */
6472           po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6473           break;
6474 
6475         case OP_RNDQ_Ibig:
6476           {
6477             po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6478             break;
6479             try_immbig:
6480             /* There's a possibility of getting a 64-bit immediate here, so
6481                we need special handling.  */
6482             if (parse_big_immediate (&str, i) == FAIL)
6483               {
6484                 inst.error = _("immediate value is out of range");
6485                 goto failure;
6486               }
6487           }
6488           break;
6489 
6490         case OP_RNDQ_I63b:
6491           {
6492             po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6493             break;
6494             try_shimm:
6495             po_imm_or_fail (0, 63, TRUE);
6496           }
6497           break;
6498 
6499 	case OP_RRnpcb:
6500 	  po_char_or_fail ('[');
6501 	  po_reg_or_fail  (REG_TYPE_RN);
6502 	  po_char_or_fail (']');
6503 	  break;
6504 
6505 	case OP_RRnpctw:
6506 	case OP_RRw:
6507 	case OP_oRRw:
6508 	  po_reg_or_fail (REG_TYPE_RN);
6509 	  if (skip_past_char (&str, '!') == SUCCESS)
6510 	    inst.operands[i].writeback = 1;
6511 	  break;
6512 
6513 	  /* Immediates */
6514 	case OP_I7:	 po_imm_or_fail (  0,	   7, FALSE);	break;
6515 	case OP_I15:	 po_imm_or_fail (  0,	  15, FALSE);	break;
6516 	case OP_I16:	 po_imm_or_fail (  1,	  16, FALSE);	break;
6517         case OP_I16z:	 po_imm_or_fail (  0,     16, FALSE);   break;
6518 	case OP_I31:	 po_imm_or_fail (  0,	  31, FALSE);	break;
6519 	case OP_I32:	 po_imm_or_fail (  1,	  32, FALSE);	break;
6520         case OP_I32z:	 po_imm_or_fail (  0,     32, FALSE);   break;
6521 	case OP_I63s:	 po_imm_or_fail (-64,	  63, FALSE);	break;
6522         case OP_I63:	 po_imm_or_fail (  0,     63, FALSE);   break;
6523         case OP_I64:	 po_imm_or_fail (  1,     64, FALSE);   break;
6524         case OP_I64z:	 po_imm_or_fail (  0,     64, FALSE);   break;
6525 	case OP_I255:	 po_imm_or_fail (  0,	 255, FALSE);	break;
6526 
6527 	case OP_I4b:	 po_imm_or_fail (  1,	   4, TRUE);	break;
6528 	case OP_oI7b:
6529 	case OP_I7b:	 po_imm_or_fail (  0,	   7, TRUE);	break;
6530 	case OP_I15b:	 po_imm_or_fail (  0,	  15, TRUE);	break;
6531 	case OP_oI31b:
6532 	case OP_I31b:	 po_imm_or_fail (  0,	  31, TRUE);	break;
6533         case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
6534         case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
6535 	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);	break;
6536 
6537 	  /* Immediate variants */
6538 	case OP_oI255c:
6539 	  po_char_or_fail ('{');
6540 	  po_imm_or_fail (0, 255, TRUE);
6541 	  po_char_or_fail ('}');
6542 	  break;
6543 
6544 	case OP_I31w:
6545 	  /* The expression parser chokes on a trailing !, so we have
6546 	     to find it first and zap it.  */
6547 	  {
6548 	    char *s = str;
6549 	    while (*s && *s != ',')
6550 	      s++;
6551 	    if (s[-1] == '!')
6552 	      {
6553 		s[-1] = '\0';
6554 		inst.operands[i].writeback = 1;
6555 	      }
6556 	    po_imm_or_fail (0, 31, TRUE);
6557 	    if (str == s - 1)
6558 	      str = s;
6559 	  }
6560 	  break;
6561 
6562 	  /* Expressions */
6563 	case OP_EXPi:	EXPi:
6564 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6565 					      GE_OPT_PREFIX));
6566 	  break;
6567 
6568 	case OP_EXP:
6569 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6570 					      GE_NO_PREFIX));
6571 	  break;
6572 
6573 	case OP_EXPr:	EXPr:
6574 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6575 					      GE_NO_PREFIX));
6576 	  if (inst.reloc.exp.X_op == O_symbol)
6577 	    {
6578 	      val = parse_reloc (&str);
6579 	      if (val == -1)
6580 		{
6581 		  inst.error = _("unrecognized relocation suffix");
6582 		  goto failure;
6583 		}
6584 	      else if (val != BFD_RELOC_UNUSED)
6585 		{
6586 		  inst.operands[i].imm = val;
6587 		  inst.operands[i].hasreloc = 1;
6588 		}
6589 	    }
6590 	  break;
6591 
6592 	  /* Operand for MOVW or MOVT.  */
6593 	case OP_HALF:
6594 	  po_misc_or_fail (parse_half (&str));
6595 	  break;
6596 
6597 	  /* Register or expression.  */
6598 	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6599 	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6600 
6601 	  /* Register or immediate.  */
6602 	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
6603 	I0:		  po_imm_or_fail (0, 0, FALSE);	      break;
6604 
6605 	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
6606 	IF:
6607 	  if (!is_immediate_prefix (*str))
6608 	    goto bad_args;
6609 	  str++;
6610 	  val = parse_fpa_immediate (&str);
6611 	  if (val == FAIL)
6612 	    goto failure;
6613 	  /* FPA immediates are encoded as registers 8-15.
6614 	     parse_fpa_immediate has already applied the offset.  */
6615 	  inst.operands[i].reg = val;
6616 	  inst.operands[i].isreg = 1;
6617 	  break;
6618 
6619 	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6620 	I32z:		  po_imm_or_fail (0, 32, FALSE);	  break;
6621 
6622 	  /* Two kinds of register.  */
6623 	case OP_RIWR_RIWC:
6624 	  {
6625 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
6626 	    if (!rege
6627 		|| (rege->type != REG_TYPE_MMXWR
6628 		    && rege->type != REG_TYPE_MMXWC
6629 		    && rege->type != REG_TYPE_MMXWCG))
6630 	      {
6631 		inst.error = _("iWMMXt data or control register expected");
6632 		goto failure;
6633 	      }
6634 	    inst.operands[i].reg = rege->number;
6635 	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6636 	  }
6637 	  break;
6638 
6639 	case OP_RIWC_RIWG:
6640 	  {
6641 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
6642 	    if (!rege
6643 		|| (rege->type != REG_TYPE_MMXWC
6644 		    && rege->type != REG_TYPE_MMXWCG))
6645 	      {
6646 		inst.error = _("iWMMXt control register expected");
6647 		goto failure;
6648 	      }
6649 	    inst.operands[i].reg = rege->number;
6650 	    inst.operands[i].isreg = 1;
6651 	  }
6652 	  break;
6653 
6654 	  /* Misc */
6655 	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
6656 	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
6657 	case OP_oROR:	 val = parse_ror (&str);		break;
6658 	case OP_COND:	 val = parse_cond (&str);		break;
6659 	case OP_oBARRIER_I15:
6660 	  po_barrier_or_imm (str); break;
6661 	  immediate:
6662 	  if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6663             goto failure;
6664 	  break;
6665 
6666 	case OP_wPSR:
6667 	case OP_rPSR:
6668 	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
6669 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6670 	    {
6671 	      inst.error = _("Banked registers are not available with this "
6672 			     "architecture.");
6673 	      goto failure;
6674 	    }
6675 	  break;
6676 	  try_psr:
6677 	  val = parse_psr (&str, op_parse_code == OP_wPSR);
6678 	  break;
6679 
6680         case OP_APSR_RR:
6681           po_reg_or_goto (REG_TYPE_RN, try_apsr);
6682           break;
6683           try_apsr:
6684           /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6685              instruction).  */
6686           if (strncasecmp (str, "APSR_", 5) == 0)
6687             {
6688               unsigned found = 0;
6689               str += 5;
6690               while (found < 15)
6691                 switch (*str++)
6692                   {
6693                   case 'c': found = (found & 1) ? 16 : found | 1; break;
6694                   case 'n': found = (found & 2) ? 16 : found | 2; break;
6695                   case 'z': found = (found & 4) ? 16 : found | 4; break;
6696                   case 'v': found = (found & 8) ? 16 : found | 8; break;
6697                   default: found = 16;
6698                   }
6699               if (found != 15)
6700                 goto failure;
6701               inst.operands[i].isvec = 1;
6702 	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
6703 	      inst.operands[i].reg = REG_PC;
6704             }
6705           else
6706             goto failure;
6707           break;
6708 
6709 	case OP_TB:
6710 	  po_misc_or_fail (parse_tb (&str));
6711 	  break;
6712 
6713 	  /* Register lists.  */
6714 	case OP_REGLST:
6715 	  val = parse_reg_list (&str);
6716 	  if (*str == '^')
6717 	    {
6718 	      inst.operands[1].writeback = 1;
6719 	      str++;
6720 	    }
6721 	  break;
6722 
6723 	case OP_VRSLST:
6724 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6725 	  break;
6726 
6727 	case OP_VRDLST:
6728 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6729 	  break;
6730 
6731         case OP_VRSDLST:
6732           /* Allow Q registers too.  */
6733           val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6734                                     REGLIST_NEON_D);
6735           if (val == FAIL)
6736             {
6737               inst.error = NULL;
6738               val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6739                                         REGLIST_VFP_S);
6740               inst.operands[i].issingle = 1;
6741             }
6742           break;
6743 
6744         case OP_NRDLST:
6745           val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6746                                     REGLIST_NEON_D);
6747           break;
6748 
6749 	case OP_NSTRLST:
6750           val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6751                                            &inst.operands[i].vectype);
6752           break;
6753 
6754 	  /* Addressing modes */
6755 	case OP_ADDR:
6756 	  po_misc_or_fail (parse_address (&str, i));
6757 	  break;
6758 
6759 	case OP_ADDRGLDR:
6760 	  po_misc_or_fail_no_backtrack (
6761             parse_address_group_reloc (&str, i, GROUP_LDR));
6762 	  break;
6763 
6764 	case OP_ADDRGLDRS:
6765 	  po_misc_or_fail_no_backtrack (
6766             parse_address_group_reloc (&str, i, GROUP_LDRS));
6767 	  break;
6768 
6769 	case OP_ADDRGLDC:
6770 	  po_misc_or_fail_no_backtrack (
6771             parse_address_group_reloc (&str, i, GROUP_LDC));
6772 	  break;
6773 
6774 	case OP_SH:
6775 	  po_misc_or_fail (parse_shifter_operand (&str, i));
6776 	  break;
6777 
6778 	case OP_SHG:
6779 	  po_misc_or_fail_no_backtrack (
6780             parse_shifter_operand_group_reloc (&str, i));
6781 	  break;
6782 
6783 	case OP_oSHll:
6784 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6785 	  break;
6786 
6787 	case OP_oSHar:
6788 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6789 	  break;
6790 
6791 	case OP_oSHllar:
6792 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6793 	  break;
6794 
6795 	default:
6796 	  as_fatal (_("unhandled operand code %d"), op_parse_code);
6797 	}
6798 
6799       /* Various value-based sanity checks and shared operations.  We
6800 	 do not signal immediate failures for the register constraints;
6801 	 this allows a syntax error to take precedence.	 */
6802       switch (op_parse_code)
6803 	{
6804 	case OP_oRRnpc:
6805 	case OP_RRnpc:
6806 	case OP_RRnpcb:
6807 	case OP_RRw:
6808 	case OP_oRRw:
6809 	case OP_RRnpc_I0:
6810 	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6811 	    inst.error = BAD_PC;
6812 	  break;
6813 
6814 	case OP_oRRnpcsp:
6815 	case OP_RRnpcsp:
6816 	  if (inst.operands[i].isreg)
6817 	    {
6818 	      if (inst.operands[i].reg == REG_PC)
6819 		inst.error = BAD_PC;
6820 	      else if (inst.operands[i].reg == REG_SP)
6821 		inst.error = BAD_SP;
6822 	    }
6823 	  break;
6824 
6825 	case OP_RRnpctw:
6826 	  if (inst.operands[i].isreg
6827 	      && inst.operands[i].reg == REG_PC
6828 	      && (inst.operands[i].writeback || thumb))
6829 	    inst.error = BAD_PC;
6830 	  break;
6831 
6832 	case OP_CPSF:
6833 	case OP_ENDI:
6834 	case OP_oROR:
6835 	case OP_wPSR:
6836 	case OP_rPSR:
6837 	case OP_COND:
6838 	case OP_oBARRIER_I15:
6839 	case OP_REGLST:
6840 	case OP_VRSLST:
6841 	case OP_VRDLST:
6842         case OP_VRSDLST:
6843         case OP_NRDLST:
6844         case OP_NSTRLST:
6845 	  if (val == FAIL)
6846 	    goto failure;
6847 	  inst.operands[i].imm = val;
6848 	  break;
6849 
6850 	default:
6851 	  break;
6852 	}
6853 
6854       /* If we get here, this operand was successfully parsed.	*/
6855       inst.operands[i].present = 1;
6856       continue;
6857 
6858     bad_args:
6859       inst.error = BAD_ARGS;
6860 
6861     failure:
6862       if (!backtrack_pos)
6863 	{
6864 	  /* The parse routine should already have set inst.error, but set a
6865 	     default here just in case.  */
6866 	  if (!inst.error)
6867 	    inst.error = _("syntax error");
6868 	  return FAIL;
6869 	}
6870 
6871       /* Do not backtrack over a trailing optional argument that
6872 	 absorbed some text.  We will only fail again, with the
6873 	 'garbage following instruction' error message, which is
6874 	 probably less helpful than the current one.  */
6875       if (backtrack_index == i && backtrack_pos != str
6876 	  && upat[i+1] == OP_stop)
6877 	{
6878 	  if (!inst.error)
6879 	    inst.error = _("syntax error");
6880 	  return FAIL;
6881 	}
6882 
6883       /* Try again, skipping the optional argument at backtrack_pos.  */
6884       str = backtrack_pos;
6885       inst.error = backtrack_error;
6886       inst.operands[backtrack_index].present = 0;
6887       i = backtrack_index;
6888       backtrack_pos = 0;
6889     }
6890 
6891   /* Check that we have parsed all the arguments.  */
6892   if (*str != '\0' && !inst.error)
6893     inst.error = _("garbage following instruction");
6894 
6895   return inst.error ? FAIL : SUCCESS;
6896 }
6897 
6898 #undef po_char_or_fail
6899 #undef po_reg_or_fail
6900 #undef po_reg_or_goto
6901 #undef po_imm_or_fail
6902 #undef po_scalar_or_fail
6903 #undef po_barrier_or_imm
6904 
6905 /* Shorthand macro for instruction encoding functions issuing errors.  */
6906 #define constraint(expr, err)			\
6907   do						\
6908     {						\
6909       if (expr)					\
6910 	{					\
6911 	  inst.error = err;			\
6912 	  return;				\
6913 	}					\
6914     }						\
6915   while (0)
6916 
6917 /* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
6918    instructions are unpredictable if these registers are used.  This
6919    is the BadReg predicate in ARM's Thumb-2 documentation.  */
6920 #define reject_bad_reg(reg)				\
6921   do							\
6922    if (reg == REG_SP || reg == REG_PC)			\
6923      {							\
6924        inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC;	\
6925        return;						\
6926      }							\
6927   while (0)
6928 
6929 /* If REG is R13 (the stack pointer), warn that its use is
6930    deprecated.  */
6931 #define warn_deprecated_sp(reg)			\
6932   do						\
6933     if (warn_on_deprecated && reg == REG_SP)	\
6934        as_warn (_("use of r13 is deprecated"));	\
6935   while (0)
6936 
6937 /* Functions for operand encoding.  ARM, then Thumb.  */
6938 
6939 #define rotate_left(v, n) (v << n | v >> (32 - n))
6940 
6941 /* If VAL can be encoded in the immediate field of an ARM instruction,
6942    return the encoded form.  Otherwise, return FAIL.  */
6943 
6944 static unsigned int
6945 encode_arm_immediate (unsigned int val)
6946 {
6947   unsigned int a, i;
6948 
6949   for (i = 0; i < 32; i += 2)
6950     if ((a = rotate_left (val, i)) <= 0xff)
6951       return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
6952 
6953   return FAIL;
6954 }
6955 
6956 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6957    return the encoded form.  Otherwise, return FAIL.  */
6958 static unsigned int
6959 encode_thumb32_immediate (unsigned int val)
6960 {
6961   unsigned int a, i;
6962 
6963   if (val <= 0xff)
6964     return val;
6965 
6966   for (i = 1; i <= 24; i++)
6967     {
6968       a = val >> i;
6969       if ((val & ~(0xff << i)) == 0)
6970 	return ((val >> i) & 0x7f) | ((32 - i) << 7);
6971     }
6972 
6973   a = val & 0xff;
6974   if (val == ((a << 16) | a))
6975     return 0x100 | a;
6976   if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6977     return 0x300 | a;
6978 
6979   a = val & 0xff00;
6980   if (val == ((a << 16) | a))
6981     return 0x200 | (a >> 8);
6982 
6983   return FAIL;
6984 }
6985 /* Encode a VFP SP or DP register number into inst.instruction.  */
6986 
6987 static void
6988 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6989 {
6990   if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6991       && reg > 15)
6992     {
6993       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6994         {
6995           if (thumb_mode)
6996             ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6997                                     fpu_vfp_ext_d32);
6998           else
6999             ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7000                                     fpu_vfp_ext_d32);
7001         }
7002       else
7003         {
7004           first_error (_("D register out of range for selected VFP version"));
7005           return;
7006         }
7007     }
7008 
7009   switch (pos)
7010     {
7011     case VFP_REG_Sd:
7012       inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7013       break;
7014 
7015     case VFP_REG_Sn:
7016       inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7017       break;
7018 
7019     case VFP_REG_Sm:
7020       inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7021       break;
7022 
7023     case VFP_REG_Dd:
7024       inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7025       break;
7026 
7027     case VFP_REG_Dn:
7028       inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7029       break;
7030 
7031     case VFP_REG_Dm:
7032       inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7033       break;
7034 
7035     default:
7036       abort ();
7037     }
7038 }
7039 
7040 /* Encode a <shift> in an ARM-format instruction.  The immediate,
7041    if any, is handled by md_apply_fix.	 */
7042 static void
7043 encode_arm_shift (int i)
7044 {
7045   if (inst.operands[i].shift_kind == SHIFT_RRX)
7046     inst.instruction |= SHIFT_ROR << 5;
7047   else
7048     {
7049       inst.instruction |= inst.operands[i].shift_kind << 5;
7050       if (inst.operands[i].immisreg)
7051 	{
7052 	  inst.instruction |= SHIFT_BY_REG;
7053 	  inst.instruction |= inst.operands[i].imm << 8;
7054 	}
7055       else
7056 	inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7057     }
7058 }
7059 
7060 static void
7061 encode_arm_shifter_operand (int i)
7062 {
7063   if (inst.operands[i].isreg)
7064     {
7065       inst.instruction |= inst.operands[i].reg;
7066       encode_arm_shift (i);
7067     }
7068   else
7069     {
7070       inst.instruction |= INST_IMMEDIATE;
7071       if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7072 	inst.instruction |= inst.operands[i].imm;
7073     }
7074 }
7075 
7076 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
7077 static void
7078 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7079 {
7080   /* PR 14260:
7081      Generate an error if the operand is not a register.  */
7082   constraint (!inst.operands[i].isreg,
7083 	      _("Instruction does not support =N addresses"));
7084 
7085   inst.instruction |= inst.operands[i].reg << 16;
7086 
7087   if (inst.operands[i].preind)
7088     {
7089       if (is_t)
7090 	{
7091 	  inst.error = _("instruction does not accept preindexed addressing");
7092 	  return;
7093 	}
7094       inst.instruction |= PRE_INDEX;
7095       if (inst.operands[i].writeback)
7096 	inst.instruction |= WRITE_BACK;
7097 
7098     }
7099   else if (inst.operands[i].postind)
7100     {
7101       gas_assert (inst.operands[i].writeback);
7102       if (is_t)
7103 	inst.instruction |= WRITE_BACK;
7104     }
7105   else /* unindexed - only for coprocessor */
7106     {
7107       inst.error = _("instruction does not accept unindexed addressing");
7108       return;
7109     }
7110 
7111   if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7112       && (((inst.instruction & 0x000f0000) >> 16)
7113 	  == ((inst.instruction & 0x0000f000) >> 12)))
7114     as_warn ((inst.instruction & LOAD_BIT)
7115 	     ? _("destination register same as write-back base")
7116 	     : _("source register same as write-back base"));
7117 }
7118 
7119 /* inst.operands[i] was set up by parse_address.  Encode it into an
7120    ARM-format mode 2 load or store instruction.	 If is_t is true,
7121    reject forms that cannot be used with a T instruction (i.e. not
7122    post-indexed).  */
7123 static void
7124 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7125 {
7126   const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7127 
7128   encode_arm_addr_mode_common (i, is_t);
7129 
7130   if (inst.operands[i].immisreg)
7131     {
7132       constraint ((inst.operands[i].imm == REG_PC
7133 		   || (is_pc && inst.operands[i].writeback)),
7134 		  BAD_PC_ADDRESSING);
7135       inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
7136       inst.instruction |= inst.operands[i].imm;
7137       if (!inst.operands[i].negative)
7138 	inst.instruction |= INDEX_UP;
7139       if (inst.operands[i].shifted)
7140 	{
7141 	  if (inst.operands[i].shift_kind == SHIFT_RRX)
7142 	    inst.instruction |= SHIFT_ROR << 5;
7143 	  else
7144 	    {
7145 	      inst.instruction |= inst.operands[i].shift_kind << 5;
7146 	      inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7147 	    }
7148 	}
7149     }
7150   else /* immediate offset in inst.reloc */
7151     {
7152       if (is_pc && !inst.reloc.pc_rel)
7153 	{
7154 	  const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7155 
7156 	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
7157 	     cannot use PC in addressing.
7158 	     PC cannot be used in writeback addressing, either.  */
7159 	  constraint ((is_t || inst.operands[i].writeback),
7160 		      BAD_PC_ADDRESSING);
7161 
7162 	  /* Use of PC in str is deprecated for ARMv7.  */
7163 	  if (warn_on_deprecated
7164 	      && !is_load
7165 	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7166 	    as_warn (_("use of PC in this instruction is deprecated"));
7167 	}
7168 
7169       if (inst.reloc.type == BFD_RELOC_UNUSED)
7170 	{
7171 	  /* Prefer + for zero encoded value.  */
7172 	  if (!inst.operands[i].negative)
7173 	    inst.instruction |= INDEX_UP;
7174 	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7175 	}
7176     }
7177 }
7178 
7179 /* inst.operands[i] was set up by parse_address.  Encode it into an
7180    ARM-format mode 3 load or store instruction.	 Reject forms that
7181    cannot be used with such instructions.  If is_t is true, reject
7182    forms that cannot be used with a T instruction (i.e. not
7183    post-indexed).  */
7184 static void
7185 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7186 {
7187   if (inst.operands[i].immisreg && inst.operands[i].shifted)
7188     {
7189       inst.error = _("instruction does not accept scaled register index");
7190       return;
7191     }
7192 
7193   encode_arm_addr_mode_common (i, is_t);
7194 
7195   if (inst.operands[i].immisreg)
7196     {
7197       constraint ((inst.operands[i].imm == REG_PC
7198 		   || inst.operands[i].reg == REG_PC),
7199 		  BAD_PC_ADDRESSING);
7200       inst.instruction |= inst.operands[i].imm;
7201       if (!inst.operands[i].negative)
7202 	inst.instruction |= INDEX_UP;
7203     }
7204   else /* immediate offset in inst.reloc */
7205     {
7206       constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7207 		   && inst.operands[i].writeback),
7208 		  BAD_PC_WRITEBACK);
7209       inst.instruction |= HWOFFSET_IMM;
7210       if (inst.reloc.type == BFD_RELOC_UNUSED)
7211 	{
7212 	  /* Prefer + for zero encoded value.  */
7213 	  if (!inst.operands[i].negative)
7214 	    inst.instruction |= INDEX_UP;
7215 
7216 	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7217 	}
7218     }
7219 }
7220 
7221 /* inst.operands[i] was set up by parse_address.  Encode it into an
7222    ARM-format instruction.  Reject all forms which cannot be encoded
7223    into a coprocessor load/store instruction.  If wb_ok is false,
7224    reject use of writeback; if unind_ok is false, reject use of
7225    unindexed addressing.  If reloc_override is not 0, use it instead
7226    of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7227    (in which case it is preserved).  */
7228 
7229 static int
7230 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7231 {
7232   inst.instruction |= inst.operands[i].reg << 16;
7233 
7234   gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7235 
7236   if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7237     {
7238       gas_assert (!inst.operands[i].writeback);
7239       if (!unind_ok)
7240 	{
7241 	  inst.error = _("instruction does not support unindexed addressing");
7242 	  return FAIL;
7243 	}
7244       inst.instruction |= inst.operands[i].imm;
7245       inst.instruction |= INDEX_UP;
7246       return SUCCESS;
7247     }
7248 
7249   if (inst.operands[i].preind)
7250     inst.instruction |= PRE_INDEX;
7251 
7252   if (inst.operands[i].writeback)
7253     {
7254       if (inst.operands[i].reg == REG_PC)
7255 	{
7256 	  inst.error = _("pc may not be used with write-back");
7257 	  return FAIL;
7258 	}
7259       if (!wb_ok)
7260 	{
7261 	  inst.error = _("instruction does not support writeback");
7262 	  return FAIL;
7263 	}
7264       inst.instruction |= WRITE_BACK;
7265     }
7266 
7267   if (reloc_override)
7268     inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7269   else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7270             || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7271            && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7272     {
7273       if (thumb_mode)
7274         inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7275       else
7276         inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7277     }
7278 
7279   /* Prefer + for zero encoded value.  */
7280   if (!inst.operands[i].negative)
7281     inst.instruction |= INDEX_UP;
7282 
7283   return SUCCESS;
7284 }
7285 
7286 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7287    Determine whether it can be performed with a move instruction; if
7288    it can, convert inst.instruction to that move instruction and
7289    return TRUE; if it can't, convert inst.instruction to a literal-pool
7290    load and return FALSE.  If this is not a valid thing to do in the
7291    current context, set inst.error and return TRUE.
7292 
7293    inst.operands[i] describes the destination register.	 */
7294 
7295 static bfd_boolean
7296 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
7297 {
7298   unsigned long tbit;
7299 
7300   if (thumb_p)
7301     tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7302   else
7303     tbit = LOAD_BIT;
7304 
7305   if ((inst.instruction & tbit) == 0)
7306     {
7307       inst.error = _("invalid pseudo operation");
7308       return TRUE;
7309     }
7310   if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
7311     {
7312       inst.error = _("constant expression expected");
7313       return TRUE;
7314     }
7315   if (inst.reloc.exp.X_op == O_constant)
7316     {
7317       if (thumb_p)
7318 	{
7319 	  if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7320 	    {
7321 	      /* This can be done with a mov(1) instruction.  */
7322 	      inst.instruction	= T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7323 	      inst.instruction |= inst.reloc.exp.X_add_number;
7324 	      return TRUE;
7325 	    }
7326 	}
7327       else
7328 	{
7329 	  int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7330 	  if (value != FAIL)
7331 	    {
7332 	      /* This can be done with a mov instruction.  */
7333 	      inst.instruction &= LITERAL_MASK;
7334 	      inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7335 	      inst.instruction |= value & 0xfff;
7336 	      return TRUE;
7337 	    }
7338 
7339 	  value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7340 	  if (value != FAIL)
7341 	    {
7342 	      /* This can be done with a mvn instruction.  */
7343 	      inst.instruction &= LITERAL_MASK;
7344 	      inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7345 	      inst.instruction |= value & 0xfff;
7346 	      return TRUE;
7347 	    }
7348 	}
7349     }
7350 
7351   if (add_to_lit_pool () == FAIL)
7352     {
7353       inst.error = _("literal pool insertion failed");
7354       return TRUE;
7355     }
7356   inst.operands[1].reg = REG_PC;
7357   inst.operands[1].isreg = 1;
7358   inst.operands[1].preind = 1;
7359   inst.reloc.pc_rel = 1;
7360   inst.reloc.type = (thumb_p
7361 		     ? BFD_RELOC_ARM_THUMB_OFFSET
7362 		     : (mode_3
7363 			? BFD_RELOC_ARM_HWLITERAL
7364 			: BFD_RELOC_ARM_LITERAL));
7365   return FALSE;
7366 }
7367 
7368 /* Functions for instruction encoding, sorted by sub-architecture.
7369    First some generics; their names are taken from the conventional
7370    bit positions for register arguments in ARM format instructions.  */
7371 
7372 static void
7373 do_noargs (void)
7374 {
7375 }
7376 
7377 static void
7378 do_rd (void)
7379 {
7380   inst.instruction |= inst.operands[0].reg << 12;
7381 }
7382 
7383 static void
7384 do_rd_rm (void)
7385 {
7386   inst.instruction |= inst.operands[0].reg << 12;
7387   inst.instruction |= inst.operands[1].reg;
7388 }
7389 
7390 static void
7391 do_rm_rn (void)
7392 {
7393   inst.instruction |= inst.operands[0].reg;
7394   inst.instruction |= inst.operands[1].reg << 16;
7395 }
7396 
7397 static void
7398 do_rd_rn (void)
7399 {
7400   inst.instruction |= inst.operands[0].reg << 12;
7401   inst.instruction |= inst.operands[1].reg << 16;
7402 }
7403 
7404 static void
7405 do_rn_rd (void)
7406 {
7407   inst.instruction |= inst.operands[0].reg << 16;
7408   inst.instruction |= inst.operands[1].reg << 12;
7409 }
7410 
7411 static bfd_boolean
7412 check_obsolete (const arm_feature_set *feature, const char *msg)
7413 {
7414   if (ARM_CPU_IS_ANY (cpu_variant))
7415     {
7416       as_warn ("%s", msg);
7417       return TRUE;
7418     }
7419   else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
7420     {
7421       as_bad ("%s", msg);
7422       return TRUE;
7423     }
7424 
7425   return FALSE;
7426 }
7427 
7428 static void
7429 do_rd_rm_rn (void)
7430 {
7431   unsigned Rn = inst.operands[2].reg;
7432   /* Enforce restrictions on SWP instruction.  */
7433   if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7434     {
7435       constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7436 		  _("Rn must not overlap other operands"));
7437 
7438       /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
7439        */
7440       if (!check_obsolete (&arm_ext_v8,
7441 			   _("swp{b} use is obsoleted for ARMv8 and later"))
7442 	  && warn_on_deprecated
7443 	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
7444 	as_warn (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
7445     }
7446 
7447   inst.instruction |= inst.operands[0].reg << 12;
7448   inst.instruction |= inst.operands[1].reg;
7449   inst.instruction |= Rn << 16;
7450 }
7451 
7452 static void
7453 do_rd_rn_rm (void)
7454 {
7455   inst.instruction |= inst.operands[0].reg << 12;
7456   inst.instruction |= inst.operands[1].reg << 16;
7457   inst.instruction |= inst.operands[2].reg;
7458 }
7459 
7460 static void
7461 do_rm_rd_rn (void)
7462 {
7463   constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7464   constraint (((inst.reloc.exp.X_op != O_constant
7465 		&& inst.reloc.exp.X_op != O_illegal)
7466 	       || inst.reloc.exp.X_add_number != 0),
7467 	      BAD_ADDR_MODE);
7468   inst.instruction |= inst.operands[0].reg;
7469   inst.instruction |= inst.operands[1].reg << 12;
7470   inst.instruction |= inst.operands[2].reg << 16;
7471 }
7472 
7473 static void
7474 do_imm0 (void)
7475 {
7476   inst.instruction |= inst.operands[0].imm;
7477 }
7478 
7479 static void
7480 do_rd_cpaddr (void)
7481 {
7482   inst.instruction |= inst.operands[0].reg << 12;
7483   encode_arm_cp_address (1, TRUE, TRUE, 0);
7484 }
7485 
7486 /* ARM instructions, in alphabetical order by function name (except
7487    that wrapper functions appear immediately after the function they
7488    wrap).  */
7489 
7490 /* This is a pseudo-op of the form "adr rd, label" to be converted
7491    into a relative address of the form "add rd, pc, #label-.-8".  */
7492 
7493 static void
7494 do_adr (void)
7495 {
7496   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
7497 
7498   /* Frag hacking will turn this into a sub instruction if the offset turns
7499      out to be negative.  */
7500   inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7501   inst.reloc.pc_rel = 1;
7502   inst.reloc.exp.X_add_number -= 8;
7503 }
7504 
7505 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7506    into a relative address of the form:
7507    add rd, pc, #low(label-.-8)"
7508    add rd, rd, #high(label-.-8)"  */
7509 
7510 static void
7511 do_adrl (void)
7512 {
7513   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
7514 
7515   /* Frag hacking will turn this into a sub instruction if the offset turns
7516      out to be negative.  */
7517   inst.reloc.type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7518   inst.reloc.pc_rel	       = 1;
7519   inst.size		       = INSN_SIZE * 2;
7520   inst.reloc.exp.X_add_number -= 8;
7521 }
7522 
7523 static void
7524 do_arit (void)
7525 {
7526   if (!inst.operands[1].present)
7527     inst.operands[1].reg = inst.operands[0].reg;
7528   inst.instruction |= inst.operands[0].reg << 12;
7529   inst.instruction |= inst.operands[1].reg << 16;
7530   encode_arm_shifter_operand (2);
7531 }
7532 
7533 static void
7534 do_barrier (void)
7535 {
7536   if (inst.operands[0].present)
7537     {
7538       constraint ((inst.instruction & 0xf0) != 0x40
7539 		  && inst.operands[0].imm > 0xf
7540 		  && inst.operands[0].imm < 0x0,
7541 		  _("bad barrier type"));
7542       inst.instruction |= inst.operands[0].imm;
7543     }
7544   else
7545     inst.instruction |= 0xf;
7546 }
7547 
7548 static void
7549 do_bfc (void)
7550 {
7551   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7552   constraint (msb > 32, _("bit-field extends past end of register"));
7553   /* The instruction encoding stores the LSB and MSB,
7554      not the LSB and width.  */
7555   inst.instruction |= inst.operands[0].reg << 12;
7556   inst.instruction |= inst.operands[1].imm << 7;
7557   inst.instruction |= (msb - 1) << 16;
7558 }
7559 
7560 static void
7561 do_bfi (void)
7562 {
7563   unsigned int msb;
7564 
7565   /* #0 in second position is alternative syntax for bfc, which is
7566      the same instruction but with REG_PC in the Rm field.  */
7567   if (!inst.operands[1].isreg)
7568     inst.operands[1].reg = REG_PC;
7569 
7570   msb = inst.operands[2].imm + inst.operands[3].imm;
7571   constraint (msb > 32, _("bit-field extends past end of register"));
7572   /* The instruction encoding stores the LSB and MSB,
7573      not the LSB and width.  */
7574   inst.instruction |= inst.operands[0].reg << 12;
7575   inst.instruction |= inst.operands[1].reg;
7576   inst.instruction |= inst.operands[2].imm << 7;
7577   inst.instruction |= (msb - 1) << 16;
7578 }
7579 
7580 static void
7581 do_bfx (void)
7582 {
7583   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7584 	      _("bit-field extends past end of register"));
7585   inst.instruction |= inst.operands[0].reg << 12;
7586   inst.instruction |= inst.operands[1].reg;
7587   inst.instruction |= inst.operands[2].imm << 7;
7588   inst.instruction |= (inst.operands[3].imm - 1) << 16;
7589 }
7590 
7591 /* ARM V5 breakpoint instruction (argument parse)
7592      BKPT <16 bit unsigned immediate>
7593      Instruction is not conditional.
7594 	The bit pattern given in insns[] has the COND_ALWAYS condition,
7595 	and it is an error if the caller tried to override that.  */
7596 
7597 static void
7598 do_bkpt (void)
7599 {
7600   /* Top 12 of 16 bits to bits 19:8.  */
7601   inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7602 
7603   /* Bottom 4 of 16 bits to bits 3:0.  */
7604   inst.instruction |= inst.operands[0].imm & 0xf;
7605 }
7606 
7607 static void
7608 encode_branch (int default_reloc)
7609 {
7610   if (inst.operands[0].hasreloc)
7611     {
7612       constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
7613 		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
7614 		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
7615       inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
7616 	? BFD_RELOC_ARM_PLT32
7617 	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
7618     }
7619   else
7620     inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7621   inst.reloc.pc_rel = 1;
7622 }
7623 
7624 static void
7625 do_branch (void)
7626 {
7627 #ifdef OBJ_ELF
7628   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7629     encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7630   else
7631 #endif
7632     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7633 }
7634 
7635 static void
7636 do_bl (void)
7637 {
7638 #ifdef OBJ_ELF
7639   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7640     {
7641       if (inst.cond == COND_ALWAYS)
7642 	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7643       else
7644 	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7645     }
7646   else
7647 #endif
7648     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7649 }
7650 
7651 /* ARM V5 branch-link-exchange instruction (argument parse)
7652      BLX <target_addr>		ie BLX(1)
7653      BLX{<condition>} <Rm>	ie BLX(2)
7654    Unfortunately, there are two different opcodes for this mnemonic.
7655    So, the insns[].value is not used, and the code here zaps values
7656 	into inst.instruction.
7657    Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
7658 
7659 static void
7660 do_blx (void)
7661 {
7662   if (inst.operands[0].isreg)
7663     {
7664       /* Arg is a register; the opcode provided by insns[] is correct.
7665 	 It is not illegal to do "blx pc", just useless.  */
7666       if (inst.operands[0].reg == REG_PC)
7667 	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7668 
7669       inst.instruction |= inst.operands[0].reg;
7670     }
7671   else
7672     {
7673       /* Arg is an address; this instruction cannot be executed
7674 	 conditionally, and the opcode must be adjusted.
7675 	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7676 	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
7677       constraint (inst.cond != COND_ALWAYS, BAD_COND);
7678       inst.instruction = 0xfa000000;
7679       encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7680     }
7681 }
7682 
7683 static void
7684 do_bx (void)
7685 {
7686   bfd_boolean want_reloc;
7687 
7688   if (inst.operands[0].reg == REG_PC)
7689     as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7690 
7691   inst.instruction |= inst.operands[0].reg;
7692   /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7693      it is for ARMv4t or earlier.  */
7694   want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7695   if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7696       want_reloc = TRUE;
7697 
7698 #ifdef OBJ_ELF
7699   if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7700 #endif
7701     want_reloc = FALSE;
7702 
7703   if (want_reloc)
7704     inst.reloc.type = BFD_RELOC_ARM_V4BX;
7705 }
7706 
7707 
7708 /* ARM v5TEJ.  Jump to Jazelle code.  */
7709 
7710 static void
7711 do_bxj (void)
7712 {
7713   if (inst.operands[0].reg == REG_PC)
7714     as_tsktsk (_("use of r15 in bxj is not really useful"));
7715 
7716   inst.instruction |= inst.operands[0].reg;
7717 }
7718 
7719 /* Co-processor data operation:
7720       CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7721       CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
7722 static void
7723 do_cdp (void)
7724 {
7725   inst.instruction |= inst.operands[0].reg << 8;
7726   inst.instruction |= inst.operands[1].imm << 20;
7727   inst.instruction |= inst.operands[2].reg << 12;
7728   inst.instruction |= inst.operands[3].reg << 16;
7729   inst.instruction |= inst.operands[4].reg;
7730   inst.instruction |= inst.operands[5].imm << 5;
7731 }
7732 
7733 static void
7734 do_cmp (void)
7735 {
7736   inst.instruction |= inst.operands[0].reg << 16;
7737   encode_arm_shifter_operand (1);
7738 }
7739 
7740 /* Transfer between coprocessor and ARM registers.
7741    MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7742    MRC2
7743    MCR{cond}
7744    MCR2
7745 
7746    No special properties.  */
7747 
7748 struct deprecated_coproc_regs_s
7749 {
7750   unsigned cp;
7751   int opc1;
7752   unsigned crn;
7753   unsigned crm;
7754   int opc2;
7755   arm_feature_set deprecated;
7756   arm_feature_set obsoleted;
7757   const char *dep_msg;
7758   const char *obs_msg;
7759 };
7760 
7761 #define DEPR_ACCESS_V8 \
7762   N_("This coprocessor register access is deprecated in ARMv8")
7763 
7764 /* Table of all deprecated coprocessor registers.  */
7765 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
7766 {
7767     {15, 0, 7, 10, 5,					/* CP15DMB.  */
7768      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7769      DEPR_ACCESS_V8, NULL},
7770     {15, 0, 7, 10, 4,					/* CP15DSB.  */
7771      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7772      DEPR_ACCESS_V8, NULL},
7773     {15, 0, 7,  5, 4,					/* CP15ISB.  */
7774      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7775      DEPR_ACCESS_V8, NULL},
7776     {14, 6, 1,  0, 0,					/* TEEHBR.  */
7777      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7778      DEPR_ACCESS_V8, NULL},
7779     {14, 6, 0,  0, 0,					/* TEECR.  */
7780      ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7781      DEPR_ACCESS_V8, NULL},
7782 };
7783 
7784 #undef DEPR_ACCESS_V8
7785 
7786 static const size_t deprecated_coproc_reg_count =
7787   sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
7788 
7789 static void
7790 do_co_reg (void)
7791 {
7792   unsigned Rd;
7793   size_t i;
7794 
7795   Rd = inst.operands[2].reg;
7796   if (thumb_mode)
7797     {
7798       if (inst.instruction == 0xee000010
7799 	  || inst.instruction == 0xfe000010)
7800 	/* MCR, MCR2  */
7801 	reject_bad_reg (Rd);
7802       else
7803 	/* MRC, MRC2  */
7804 	constraint (Rd == REG_SP, BAD_SP);
7805     }
7806   else
7807     {
7808       /* MCR */
7809       if (inst.instruction == 0xe000010)
7810 	constraint (Rd == REG_PC, BAD_PC);
7811     }
7812 
7813     for (i = 0; i < deprecated_coproc_reg_count; ++i)
7814       {
7815 	const struct deprecated_coproc_regs_s *r =
7816 	  deprecated_coproc_regs + i;
7817 
7818 	if (inst.operands[0].reg == r->cp
7819 	    && inst.operands[1].imm == r->opc1
7820 	    && inst.operands[3].reg == r->crn
7821 	    && inst.operands[4].reg == r->crm
7822 	    && inst.operands[5].imm == r->opc2)
7823 	  {
7824 	    if (!check_obsolete (&r->obsoleted, r->obs_msg)
7825 	        && warn_on_deprecated
7826 		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
7827 	      as_warn ("%s", r->dep_msg);
7828 	  }
7829       }
7830 
7831   inst.instruction |= inst.operands[0].reg << 8;
7832   inst.instruction |= inst.operands[1].imm << 21;
7833   inst.instruction |= Rd << 12;
7834   inst.instruction |= inst.operands[3].reg << 16;
7835   inst.instruction |= inst.operands[4].reg;
7836   inst.instruction |= inst.operands[5].imm << 5;
7837 }
7838 
7839 /* Transfer between coprocessor register and pair of ARM registers.
7840    MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7841    MCRR2
7842    MRRC{cond}
7843    MRRC2
7844 
7845    Two XScale instructions are special cases of these:
7846 
7847      MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7848      MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7849 
7850    Result unpredictable if Rd or Rn is R15.  */
7851 
7852 static void
7853 do_co_reg2c (void)
7854 {
7855   unsigned Rd, Rn;
7856 
7857   Rd = inst.operands[2].reg;
7858   Rn = inst.operands[3].reg;
7859 
7860   if (thumb_mode)
7861     {
7862       reject_bad_reg (Rd);
7863       reject_bad_reg (Rn);
7864     }
7865   else
7866     {
7867       constraint (Rd == REG_PC, BAD_PC);
7868       constraint (Rn == REG_PC, BAD_PC);
7869     }
7870 
7871   inst.instruction |= inst.operands[0].reg << 8;
7872   inst.instruction |= inst.operands[1].imm << 4;
7873   inst.instruction |= Rd << 12;
7874   inst.instruction |= Rn << 16;
7875   inst.instruction |= inst.operands[4].reg;
7876 }
7877 
7878 static void
7879 do_cpsi (void)
7880 {
7881   inst.instruction |= inst.operands[0].imm << 6;
7882   if (inst.operands[1].present)
7883     {
7884       inst.instruction |= CPSI_MMOD;
7885       inst.instruction |= inst.operands[1].imm;
7886     }
7887 }
7888 
7889 static void
7890 do_dbg (void)
7891 {
7892   inst.instruction |= inst.operands[0].imm;
7893 }
7894 
7895 static void
7896 do_div (void)
7897 {
7898   unsigned Rd, Rn, Rm;
7899 
7900   Rd = inst.operands[0].reg;
7901   Rn = (inst.operands[1].present
7902 	? inst.operands[1].reg : Rd);
7903   Rm = inst.operands[2].reg;
7904 
7905   constraint ((Rd == REG_PC), BAD_PC);
7906   constraint ((Rn == REG_PC), BAD_PC);
7907   constraint ((Rm == REG_PC), BAD_PC);
7908 
7909   inst.instruction |= Rd << 16;
7910   inst.instruction |= Rn << 0;
7911   inst.instruction |= Rm << 8;
7912 }
7913 
7914 static void
7915 do_it (void)
7916 {
7917   /* There is no IT instruction in ARM mode.  We
7918      process it to do the validation as if in
7919      thumb mode, just in case the code gets
7920      assembled for thumb using the unified syntax.  */
7921 
7922   inst.size = 0;
7923   if (unified_syntax)
7924     {
7925       set_it_insn_type (IT_INSN);
7926       now_it.mask = (inst.instruction & 0xf) | 0x10;
7927       now_it.cc = inst.operands[0].imm;
7928     }
7929 }
7930 
7931 /* If there is only one register in the register list,
7932    then return its register number.  Otherwise return -1.  */
7933 static int
7934 only_one_reg_in_list (int range)
7935 {
7936   int i = ffs (range) - 1;
7937   return (i > 15 || range != (1 << i)) ? -1 : i;
7938 }
7939 
7940 static void
7941 encode_ldmstm(int from_push_pop_mnem)
7942 {
7943   int base_reg = inst.operands[0].reg;
7944   int range = inst.operands[1].imm;
7945   int one_reg;
7946 
7947   inst.instruction |= base_reg << 16;
7948   inst.instruction |= range;
7949 
7950   if (inst.operands[1].writeback)
7951     inst.instruction |= LDM_TYPE_2_OR_3;
7952 
7953   if (inst.operands[0].writeback)
7954     {
7955       inst.instruction |= WRITE_BACK;
7956       /* Check for unpredictable uses of writeback.  */
7957       if (inst.instruction & LOAD_BIT)
7958 	{
7959 	  /* Not allowed in LDM type 2.	 */
7960 	  if ((inst.instruction & LDM_TYPE_2_OR_3)
7961 	      && ((range & (1 << REG_PC)) == 0))
7962 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
7963 	  /* Only allowed if base reg not in list for other types.  */
7964 	  else if (range & (1 << base_reg))
7965 	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7966 	}
7967       else /* STM.  */
7968 	{
7969 	  /* Not allowed for type 2.  */
7970 	  if (inst.instruction & LDM_TYPE_2_OR_3)
7971 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
7972 	  /* Only allowed if base reg not in list, or first in list.  */
7973 	  else if ((range & (1 << base_reg))
7974 		   && (range & ((1 << base_reg) - 1)))
7975 	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7976 	}
7977     }
7978 
7979   /* If PUSH/POP has only one register, then use the A2 encoding.  */
7980   one_reg = only_one_reg_in_list (range);
7981   if (from_push_pop_mnem && one_reg >= 0)
7982     {
7983       int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
7984 
7985       inst.instruction &= A_COND_MASK;
7986       inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
7987       inst.instruction |= one_reg << 12;
7988     }
7989 }
7990 
7991 static void
7992 do_ldmstm (void)
7993 {
7994   encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
7995 }
7996 
7997 /* ARMv5TE load-consecutive (argument parse)
7998    Mode is like LDRH.
7999 
8000      LDRccD R, mode
8001      STRccD R, mode.  */
8002 
8003 static void
8004 do_ldrd (void)
8005 {
8006   constraint (inst.operands[0].reg % 2 != 0,
8007 	      _("first transfer register must be even"));
8008   constraint (inst.operands[1].present
8009 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
8010 	      _("can only transfer two consecutive registers"));
8011   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8012   constraint (!inst.operands[2].isreg, _("'[' expected"));
8013 
8014   if (!inst.operands[1].present)
8015     inst.operands[1].reg = inst.operands[0].reg + 1;
8016 
8017   /* encode_arm_addr_mode_3 will diagnose overlap between the base
8018      register and the first register written; we have to diagnose
8019      overlap between the base and the second register written here.  */
8020 
8021   if (inst.operands[2].reg == inst.operands[1].reg
8022       && (inst.operands[2].writeback || inst.operands[2].postind))
8023     as_warn (_("base register written back, and overlaps "
8024 	       "second transfer register"));
8025 
8026   if (!(inst.instruction & V4_STR_BIT))
8027     {
8028       /* For an index-register load, the index register must not overlap the
8029 	destination (even if not write-back).  */
8030       if (inst.operands[2].immisreg
8031 	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8032 	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8033 	as_warn (_("index register overlaps transfer register"));
8034     }
8035   inst.instruction |= inst.operands[0].reg << 12;
8036   encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8037 }
8038 
8039 static void
8040 do_ldrex (void)
8041 {
8042   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8043 	      || inst.operands[1].postind || inst.operands[1].writeback
8044 	      || inst.operands[1].immisreg || inst.operands[1].shifted
8045 	      || inst.operands[1].negative
8046 	      /* This can arise if the programmer has written
8047 		   strex rN, rM, foo
8048 		 or if they have mistakenly used a register name as the last
8049 		 operand,  eg:
8050 		   strex rN, rM, rX
8051 		 It is very difficult to distinguish between these two cases
8052 		 because "rX" might actually be a label. ie the register
8053 		 name has been occluded by a symbol of the same name. So we
8054 		 just generate a general 'bad addressing mode' type error
8055 		 message and leave it up to the programmer to discover the
8056 		 true cause and fix their mistake.  */
8057 	      || (inst.operands[1].reg == REG_PC),
8058 	      BAD_ADDR_MODE);
8059 
8060   constraint (inst.reloc.exp.X_op != O_constant
8061 	      || inst.reloc.exp.X_add_number != 0,
8062 	      _("offset must be zero in ARM encoding"));
8063 
8064   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8065 
8066   inst.instruction |= inst.operands[0].reg << 12;
8067   inst.instruction |= inst.operands[1].reg << 16;
8068   inst.reloc.type = BFD_RELOC_UNUSED;
8069 }
8070 
8071 static void
8072 do_ldrexd (void)
8073 {
8074   constraint (inst.operands[0].reg % 2 != 0,
8075 	      _("even register required"));
8076   constraint (inst.operands[1].present
8077 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
8078 	      _("can only load two consecutive registers"));
8079   /* If op 1 were present and equal to PC, this function wouldn't
8080      have been called in the first place.  */
8081   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8082 
8083   inst.instruction |= inst.operands[0].reg << 12;
8084   inst.instruction |= inst.operands[2].reg << 16;
8085 }
8086 
8087 /* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
8088    which is not a multiple of four is UNPREDICTABLE.  */
8089 static void
8090 check_ldr_r15_aligned (void)
8091 {
8092   constraint (!(inst.operands[1].immisreg)
8093 	      && (inst.operands[0].reg == REG_PC
8094 	      && inst.operands[1].reg == REG_PC
8095 	      && (inst.reloc.exp.X_add_number & 0x3)),
8096 	      _("ldr to register 15 must be 4-byte alligned"));
8097 }
8098 
8099 static void
8100 do_ldst (void)
8101 {
8102   inst.instruction |= inst.operands[0].reg << 12;
8103   if (!inst.operands[1].isreg)
8104     if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
8105       return;
8106   encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8107   check_ldr_r15_aligned ();
8108 }
8109 
8110 static void
8111 do_ldstt (void)
8112 {
8113   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
8114      reject [Rn,...].  */
8115   if (inst.operands[1].preind)
8116     {
8117       constraint (inst.reloc.exp.X_op != O_constant
8118 		  || inst.reloc.exp.X_add_number != 0,
8119 		  _("this instruction requires a post-indexed address"));
8120 
8121       inst.operands[1].preind = 0;
8122       inst.operands[1].postind = 1;
8123       inst.operands[1].writeback = 1;
8124     }
8125   inst.instruction |= inst.operands[0].reg << 12;
8126   encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8127 }
8128 
8129 /* Halfword and signed-byte load/store operations.  */
8130 
8131 static void
8132 do_ldstv4 (void)
8133 {
8134   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8135   inst.instruction |= inst.operands[0].reg << 12;
8136   if (!inst.operands[1].isreg)
8137     if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
8138       return;
8139   encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8140 }
8141 
8142 static void
8143 do_ldsttv4 (void)
8144 {
8145   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
8146      reject [Rn,...].  */
8147   if (inst.operands[1].preind)
8148     {
8149       constraint (inst.reloc.exp.X_op != O_constant
8150 		  || inst.reloc.exp.X_add_number != 0,
8151 		  _("this instruction requires a post-indexed address"));
8152 
8153       inst.operands[1].preind = 0;
8154       inst.operands[1].postind = 1;
8155       inst.operands[1].writeback = 1;
8156     }
8157   inst.instruction |= inst.operands[0].reg << 12;
8158   encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8159 }
8160 
8161 /* Co-processor register load/store.
8162    Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
8163 static void
8164 do_lstc (void)
8165 {
8166   inst.instruction |= inst.operands[0].reg << 8;
8167   inst.instruction |= inst.operands[1].reg << 12;
8168   encode_arm_cp_address (2, TRUE, TRUE, 0);
8169 }
8170 
8171 static void
8172 do_mlas (void)
8173 {
8174   /* This restriction does not apply to mls (nor to mla in v6 or later).  */
8175   if (inst.operands[0].reg == inst.operands[1].reg
8176       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8177       && !(inst.instruction & 0x00400000))
8178     as_tsktsk (_("Rd and Rm should be different in mla"));
8179 
8180   inst.instruction |= inst.operands[0].reg << 16;
8181   inst.instruction |= inst.operands[1].reg;
8182   inst.instruction |= inst.operands[2].reg << 8;
8183   inst.instruction |= inst.operands[3].reg << 12;
8184 }
8185 
8186 static void
8187 do_mov (void)
8188 {
8189   inst.instruction |= inst.operands[0].reg << 12;
8190   encode_arm_shifter_operand (1);
8191 }
8192 
8193 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
8194 static void
8195 do_mov16 (void)
8196 {
8197   bfd_vma imm;
8198   bfd_boolean top;
8199 
8200   top = (inst.instruction & 0x00400000) != 0;
8201   constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8202 	      _(":lower16: not allowed this instruction"));
8203   constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8204 	      _(":upper16: not allowed instruction"));
8205   inst.instruction |= inst.operands[0].reg << 12;
8206   if (inst.reloc.type == BFD_RELOC_UNUSED)
8207     {
8208       imm = inst.reloc.exp.X_add_number;
8209       /* The value is in two pieces: 0:11, 16:19.  */
8210       inst.instruction |= (imm & 0x00000fff);
8211       inst.instruction |= (imm & 0x0000f000) << 4;
8212     }
8213 }
8214 
8215 static void do_vfp_nsyn_opcode (const char *);
8216 
8217 static int
8218 do_vfp_nsyn_mrs (void)
8219 {
8220   if (inst.operands[0].isvec)
8221     {
8222       if (inst.operands[1].reg != 1)
8223         first_error (_("operand 1 must be FPSCR"));
8224       memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8225       memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8226       do_vfp_nsyn_opcode ("fmstat");
8227     }
8228   else if (inst.operands[1].isvec)
8229     do_vfp_nsyn_opcode ("fmrx");
8230   else
8231     return FAIL;
8232 
8233   return SUCCESS;
8234 }
8235 
8236 static int
8237 do_vfp_nsyn_msr (void)
8238 {
8239   if (inst.operands[0].isvec)
8240     do_vfp_nsyn_opcode ("fmxr");
8241   else
8242     return FAIL;
8243 
8244   return SUCCESS;
8245 }
8246 
8247 static void
8248 do_vmrs (void)
8249 {
8250   unsigned Rt = inst.operands[0].reg;
8251 
8252   if (thumb_mode && inst.operands[0].reg == REG_SP)
8253     {
8254       inst.error = BAD_SP;
8255       return;
8256     }
8257 
8258   /* APSR_ sets isvec. All other refs to PC are illegal.  */
8259   if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
8260     {
8261       inst.error = BAD_PC;
8262       return;
8263     }
8264 
8265   switch (inst.operands[1].reg)
8266     {
8267     case 0: /* FPSID */
8268     case 1: /* FPSCR */
8269     case 6: /* MVFR1 */
8270     case 7: /* MVFR0 */
8271     case 8: /* FPEXC */
8272       inst.instruction |= (inst.operands[1].reg << 16);
8273       break;
8274     default:
8275       first_error (_("operand 1 must be a VFP extension System Register"));
8276     }
8277 
8278   inst.instruction |= (Rt << 12);
8279 }
8280 
8281 static void
8282 do_vmsr (void)
8283 {
8284   unsigned Rt = inst.operands[1].reg;
8285 
8286   if (thumb_mode)
8287     reject_bad_reg (Rt);
8288   else if (Rt == REG_PC)
8289     {
8290       inst.error = BAD_PC;
8291       return;
8292     }
8293 
8294   switch (inst.operands[0].reg)
8295     {
8296     case 0: /* FPSID  */
8297     case 1: /* FPSCR  */
8298     case 8: /* FPEXC */
8299       inst.instruction |= (inst.operands[0].reg << 16);
8300       break;
8301     default:
8302       first_error (_("operand 0 must be FPSID or FPSCR pr FPEXC"));
8303     }
8304 
8305   inst.instruction |= (Rt << 12);
8306 }
8307 
8308 static void
8309 do_mrs (void)
8310 {
8311   unsigned br;
8312 
8313   if (do_vfp_nsyn_mrs () == SUCCESS)
8314     return;
8315 
8316   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8317   inst.instruction |= inst.operands[0].reg << 12;
8318 
8319   if (inst.operands[1].isreg)
8320     {
8321       br = inst.operands[1].reg;
8322       if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8323 	as_bad (_("bad register for mrs"));
8324     }
8325   else
8326     {
8327       /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
8328       constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8329 		  != (PSR_c|PSR_f),
8330 		  _("'APSR', 'CPSR' or 'SPSR' expected"));
8331       br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8332     }
8333 
8334   inst.instruction |= br;
8335 }
8336 
8337 /* Two possible forms:
8338       "{C|S}PSR_<field>, Rm",
8339       "{C|S}PSR_f, #expression".  */
8340 
8341 static void
8342 do_msr (void)
8343 {
8344   if (do_vfp_nsyn_msr () == SUCCESS)
8345     return;
8346 
8347   inst.instruction |= inst.operands[0].imm;
8348   if (inst.operands[1].isreg)
8349     inst.instruction |= inst.operands[1].reg;
8350   else
8351     {
8352       inst.instruction |= INST_IMMEDIATE;
8353       inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8354       inst.reloc.pc_rel = 0;
8355     }
8356 }
8357 
8358 static void
8359 do_mul (void)
8360 {
8361   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8362 
8363   if (!inst.operands[2].present)
8364     inst.operands[2].reg = inst.operands[0].reg;
8365   inst.instruction |= inst.operands[0].reg << 16;
8366   inst.instruction |= inst.operands[1].reg;
8367   inst.instruction |= inst.operands[2].reg << 8;
8368 
8369   if (inst.operands[0].reg == inst.operands[1].reg
8370       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8371     as_tsktsk (_("Rd and Rm should be different in mul"));
8372 }
8373 
8374 /* Long Multiply Parser
8375    UMULL RdLo, RdHi, Rm, Rs
8376    SMULL RdLo, RdHi, Rm, Rs
8377    UMLAL RdLo, RdHi, Rm, Rs
8378    SMLAL RdLo, RdHi, Rm, Rs.  */
8379 
8380 static void
8381 do_mull (void)
8382 {
8383   inst.instruction |= inst.operands[0].reg << 12;
8384   inst.instruction |= inst.operands[1].reg << 16;
8385   inst.instruction |= inst.operands[2].reg;
8386   inst.instruction |= inst.operands[3].reg << 8;
8387 
8388   /* rdhi and rdlo must be different.  */
8389   if (inst.operands[0].reg == inst.operands[1].reg)
8390     as_tsktsk (_("rdhi and rdlo must be different"));
8391 
8392   /* rdhi, rdlo and rm must all be different before armv6.  */
8393   if ((inst.operands[0].reg == inst.operands[2].reg
8394       || inst.operands[1].reg == inst.operands[2].reg)
8395       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8396     as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8397 }
8398 
8399 static void
8400 do_nop (void)
8401 {
8402   if (inst.operands[0].present
8403       || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8404     {
8405       /* Architectural NOP hints are CPSR sets with no bits selected.  */
8406       inst.instruction &= 0xf0000000;
8407       inst.instruction |= 0x0320f000;
8408       if (inst.operands[0].present)
8409 	inst.instruction |= inst.operands[0].imm;
8410     }
8411 }
8412 
8413 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8414    PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8415    Condition defaults to COND_ALWAYS.
8416    Error if Rd, Rn or Rm are R15.  */
8417 
8418 static void
8419 do_pkhbt (void)
8420 {
8421   inst.instruction |= inst.operands[0].reg << 12;
8422   inst.instruction |= inst.operands[1].reg << 16;
8423   inst.instruction |= inst.operands[2].reg;
8424   if (inst.operands[3].present)
8425     encode_arm_shift (3);
8426 }
8427 
8428 /* ARM V6 PKHTB (Argument Parse).  */
8429 
8430 static void
8431 do_pkhtb (void)
8432 {
8433   if (!inst.operands[3].present)
8434     {
8435       /* If the shift specifier is omitted, turn the instruction
8436 	 into pkhbt rd, rm, rn. */
8437       inst.instruction &= 0xfff00010;
8438       inst.instruction |= inst.operands[0].reg << 12;
8439       inst.instruction |= inst.operands[1].reg;
8440       inst.instruction |= inst.operands[2].reg << 16;
8441     }
8442   else
8443     {
8444       inst.instruction |= inst.operands[0].reg << 12;
8445       inst.instruction |= inst.operands[1].reg << 16;
8446       inst.instruction |= inst.operands[2].reg;
8447       encode_arm_shift (3);
8448     }
8449 }
8450 
8451 /* ARMv5TE: Preload-Cache
8452    MP Extensions: Preload for write
8453 
8454     PLD(W) <addr_mode>
8455 
8456   Syntactically, like LDR with B=1, W=0, L=1.  */
8457 
8458 static void
8459 do_pld (void)
8460 {
8461   constraint (!inst.operands[0].isreg,
8462 	      _("'[' expected after PLD mnemonic"));
8463   constraint (inst.operands[0].postind,
8464 	      _("post-indexed expression used in preload instruction"));
8465   constraint (inst.operands[0].writeback,
8466 	      _("writeback used in preload instruction"));
8467   constraint (!inst.operands[0].preind,
8468 	      _("unindexed addressing used in preload instruction"));
8469   encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8470 }
8471 
8472 /* ARMv7: PLI <addr_mode>  */
8473 static void
8474 do_pli (void)
8475 {
8476   constraint (!inst.operands[0].isreg,
8477 	      _("'[' expected after PLI mnemonic"));
8478   constraint (inst.operands[0].postind,
8479 	      _("post-indexed expression used in preload instruction"));
8480   constraint (inst.operands[0].writeback,
8481 	      _("writeback used in preload instruction"));
8482   constraint (!inst.operands[0].preind,
8483 	      _("unindexed addressing used in preload instruction"));
8484   encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8485   inst.instruction &= ~PRE_INDEX;
8486 }
8487 
8488 static void
8489 do_push_pop (void)
8490 {
8491   inst.operands[1] = inst.operands[0];
8492   memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8493   inst.operands[0].isreg = 1;
8494   inst.operands[0].writeback = 1;
8495   inst.operands[0].reg = REG_SP;
8496   encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
8497 }
8498 
8499 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8500    word at the specified address and the following word
8501    respectively.
8502    Unconditionally executed.
8503    Error if Rn is R15.	*/
8504 
8505 static void
8506 do_rfe (void)
8507 {
8508   inst.instruction |= inst.operands[0].reg << 16;
8509   if (inst.operands[0].writeback)
8510     inst.instruction |= WRITE_BACK;
8511 }
8512 
8513 /* ARM V6 ssat (argument parse).  */
8514 
8515 static void
8516 do_ssat (void)
8517 {
8518   inst.instruction |= inst.operands[0].reg << 12;
8519   inst.instruction |= (inst.operands[1].imm - 1) << 16;
8520   inst.instruction |= inst.operands[2].reg;
8521 
8522   if (inst.operands[3].present)
8523     encode_arm_shift (3);
8524 }
8525 
8526 /* ARM V6 usat (argument parse).  */
8527 
8528 static void
8529 do_usat (void)
8530 {
8531   inst.instruction |= inst.operands[0].reg << 12;
8532   inst.instruction |= inst.operands[1].imm << 16;
8533   inst.instruction |= inst.operands[2].reg;
8534 
8535   if (inst.operands[3].present)
8536     encode_arm_shift (3);
8537 }
8538 
8539 /* ARM V6 ssat16 (argument parse).  */
8540 
8541 static void
8542 do_ssat16 (void)
8543 {
8544   inst.instruction |= inst.operands[0].reg << 12;
8545   inst.instruction |= ((inst.operands[1].imm - 1) << 16);
8546   inst.instruction |= inst.operands[2].reg;
8547 }
8548 
8549 static void
8550 do_usat16 (void)
8551 {
8552   inst.instruction |= inst.operands[0].reg << 12;
8553   inst.instruction |= inst.operands[1].imm << 16;
8554   inst.instruction |= inst.operands[2].reg;
8555 }
8556 
8557 /* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
8558    preserving the other bits.
8559 
8560    setend <endian_specifier>, where <endian_specifier> is either
8561    BE or LE.  */
8562 
8563 static void
8564 do_setend (void)
8565 {
8566   if (warn_on_deprecated
8567       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8568       as_warn (_("setend use is deprecated for ARMv8"));
8569 
8570   if (inst.operands[0].imm)
8571     inst.instruction |= 0x200;
8572 }
8573 
8574 static void
8575 do_shift (void)
8576 {
8577   unsigned int Rm = (inst.operands[1].present
8578 		     ? inst.operands[1].reg
8579 		     : inst.operands[0].reg);
8580 
8581   inst.instruction |= inst.operands[0].reg << 12;
8582   inst.instruction |= Rm;
8583   if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
8584     {
8585       inst.instruction |= inst.operands[2].reg << 8;
8586       inst.instruction |= SHIFT_BY_REG;
8587       /* PR 12854: Error on extraneous shifts.  */
8588       constraint (inst.operands[2].shifted,
8589 		  _("extraneous shift as part of operand to shift insn"));
8590     }
8591   else
8592     inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
8593 }
8594 
8595 static void
8596 do_smc (void)
8597 {
8598   inst.reloc.type = BFD_RELOC_ARM_SMC;
8599   inst.reloc.pc_rel = 0;
8600 }
8601 
8602 static void
8603 do_hvc (void)
8604 {
8605   inst.reloc.type = BFD_RELOC_ARM_HVC;
8606   inst.reloc.pc_rel = 0;
8607 }
8608 
8609 static void
8610 do_swi (void)
8611 {
8612   inst.reloc.type = BFD_RELOC_ARM_SWI;
8613   inst.reloc.pc_rel = 0;
8614 }
8615 
8616 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8617    SMLAxy{cond} Rd,Rm,Rs,Rn
8618    SMLAWy{cond} Rd,Rm,Rs,Rn
8619    Error if any register is R15.  */
8620 
8621 static void
8622 do_smla (void)
8623 {
8624   inst.instruction |= inst.operands[0].reg << 16;
8625   inst.instruction |= inst.operands[1].reg;
8626   inst.instruction |= inst.operands[2].reg << 8;
8627   inst.instruction |= inst.operands[3].reg << 12;
8628 }
8629 
8630 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8631    SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8632    Error if any register is R15.
8633    Warning if Rdlo == Rdhi.  */
8634 
8635 static void
8636 do_smlal (void)
8637 {
8638   inst.instruction |= inst.operands[0].reg << 12;
8639   inst.instruction |= inst.operands[1].reg << 16;
8640   inst.instruction |= inst.operands[2].reg;
8641   inst.instruction |= inst.operands[3].reg << 8;
8642 
8643   if (inst.operands[0].reg == inst.operands[1].reg)
8644     as_tsktsk (_("rdhi and rdlo must be different"));
8645 }
8646 
8647 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8648    SMULxy{cond} Rd,Rm,Rs
8649    Error if any register is R15.  */
8650 
8651 static void
8652 do_smul (void)
8653 {
8654   inst.instruction |= inst.operands[0].reg << 16;
8655   inst.instruction |= inst.operands[1].reg;
8656   inst.instruction |= inst.operands[2].reg << 8;
8657 }
8658 
8659 /* ARM V6 srs (argument parse).  The variable fields in the encoding are
8660    the same for both ARM and Thumb-2.  */
8661 
8662 static void
8663 do_srs (void)
8664 {
8665   int reg;
8666 
8667   if (inst.operands[0].present)
8668     {
8669       reg = inst.operands[0].reg;
8670       constraint (reg != REG_SP, _("SRS base register must be r13"));
8671     }
8672   else
8673     reg = REG_SP;
8674 
8675   inst.instruction |= reg << 16;
8676   inst.instruction |= inst.operands[1].imm;
8677   if (inst.operands[0].writeback || inst.operands[1].writeback)
8678     inst.instruction |= WRITE_BACK;
8679 }
8680 
8681 /* ARM V6 strex (argument parse).  */
8682 
8683 static void
8684 do_strex (void)
8685 {
8686   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8687 	      || inst.operands[2].postind || inst.operands[2].writeback
8688 	      || inst.operands[2].immisreg || inst.operands[2].shifted
8689 	      || inst.operands[2].negative
8690 	      /* See comment in do_ldrex().  */
8691 	      || (inst.operands[2].reg == REG_PC),
8692 	      BAD_ADDR_MODE);
8693 
8694   constraint (inst.operands[0].reg == inst.operands[1].reg
8695 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8696 
8697   constraint (inst.reloc.exp.X_op != O_constant
8698 	      || inst.reloc.exp.X_add_number != 0,
8699 	      _("offset must be zero in ARM encoding"));
8700 
8701   inst.instruction |= inst.operands[0].reg << 12;
8702   inst.instruction |= inst.operands[1].reg;
8703   inst.instruction |= inst.operands[2].reg << 16;
8704   inst.reloc.type = BFD_RELOC_UNUSED;
8705 }
8706 
8707 static void
8708 do_t_strexbh (void)
8709 {
8710   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8711 	      || inst.operands[2].postind || inst.operands[2].writeback
8712 	      || inst.operands[2].immisreg || inst.operands[2].shifted
8713 	      || inst.operands[2].negative,
8714 	      BAD_ADDR_MODE);
8715 
8716   constraint (inst.operands[0].reg == inst.operands[1].reg
8717 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8718 
8719   do_rm_rd_rn ();
8720 }
8721 
8722 static void
8723 do_strexd (void)
8724 {
8725   constraint (inst.operands[1].reg % 2 != 0,
8726 	      _("even register required"));
8727   constraint (inst.operands[2].present
8728 	      && inst.operands[2].reg != inst.operands[1].reg + 1,
8729 	      _("can only store two consecutive registers"));
8730   /* If op 2 were present and equal to PC, this function wouldn't
8731      have been called in the first place.  */
8732   constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8733 
8734   constraint (inst.operands[0].reg == inst.operands[1].reg
8735 	      || inst.operands[0].reg == inst.operands[1].reg + 1
8736 	      || inst.operands[0].reg == inst.operands[3].reg,
8737 	      BAD_OVERLAP);
8738 
8739   inst.instruction |= inst.operands[0].reg << 12;
8740   inst.instruction |= inst.operands[1].reg;
8741   inst.instruction |= inst.operands[3].reg << 16;
8742 }
8743 
8744 /* ARM V8 STRL.  */
8745 static void
8746 do_stlex (void)
8747 {
8748   constraint (inst.operands[0].reg == inst.operands[1].reg
8749 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8750 
8751   do_rd_rm_rn ();
8752 }
8753 
8754 static void
8755 do_t_stlex (void)
8756 {
8757   constraint (inst.operands[0].reg == inst.operands[1].reg
8758 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8759 
8760   do_rm_rd_rn ();
8761 }
8762 
8763 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8764    extends it to 32-bits, and adds the result to a value in another
8765    register.  You can specify a rotation by 0, 8, 16, or 24 bits
8766    before extracting the 16-bit value.
8767    SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8768    Condition defaults to COND_ALWAYS.
8769    Error if any register uses R15.  */
8770 
8771 static void
8772 do_sxtah (void)
8773 {
8774   inst.instruction |= inst.operands[0].reg << 12;
8775   inst.instruction |= inst.operands[1].reg << 16;
8776   inst.instruction |= inst.operands[2].reg;
8777   inst.instruction |= inst.operands[3].imm << 10;
8778 }
8779 
8780 /* ARM V6 SXTH.
8781 
8782    SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8783    Condition defaults to COND_ALWAYS.
8784    Error if any register uses R15.  */
8785 
8786 static void
8787 do_sxth (void)
8788 {
8789   inst.instruction |= inst.operands[0].reg << 12;
8790   inst.instruction |= inst.operands[1].reg;
8791   inst.instruction |= inst.operands[2].imm << 10;
8792 }
8793 
8794 /* VFP instructions.  In a logical order: SP variant first, monad
8795    before dyad, arithmetic then move then load/store.  */
8796 
8797 static void
8798 do_vfp_sp_monadic (void)
8799 {
8800   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8801   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8802 }
8803 
8804 static void
8805 do_vfp_sp_dyadic (void)
8806 {
8807   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8808   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8809   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8810 }
8811 
8812 static void
8813 do_vfp_sp_compare_z (void)
8814 {
8815   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8816 }
8817 
8818 static void
8819 do_vfp_dp_sp_cvt (void)
8820 {
8821   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8822   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8823 }
8824 
8825 static void
8826 do_vfp_sp_dp_cvt (void)
8827 {
8828   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8829   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8830 }
8831 
8832 static void
8833 do_vfp_reg_from_sp (void)
8834 {
8835   inst.instruction |= inst.operands[0].reg << 12;
8836   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8837 }
8838 
8839 static void
8840 do_vfp_reg2_from_sp2 (void)
8841 {
8842   constraint (inst.operands[2].imm != 2,
8843 	      _("only two consecutive VFP SP registers allowed here"));
8844   inst.instruction |= inst.operands[0].reg << 12;
8845   inst.instruction |= inst.operands[1].reg << 16;
8846   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8847 }
8848 
8849 static void
8850 do_vfp_sp_from_reg (void)
8851 {
8852   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8853   inst.instruction |= inst.operands[1].reg << 12;
8854 }
8855 
8856 static void
8857 do_vfp_sp2_from_reg2 (void)
8858 {
8859   constraint (inst.operands[0].imm != 2,
8860 	      _("only two consecutive VFP SP registers allowed here"));
8861   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8862   inst.instruction |= inst.operands[1].reg << 12;
8863   inst.instruction |= inst.operands[2].reg << 16;
8864 }
8865 
8866 static void
8867 do_vfp_sp_ldst (void)
8868 {
8869   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8870   encode_arm_cp_address (1, FALSE, TRUE, 0);
8871 }
8872 
8873 static void
8874 do_vfp_dp_ldst (void)
8875 {
8876   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8877   encode_arm_cp_address (1, FALSE, TRUE, 0);
8878 }
8879 
8880 
8881 static void
8882 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8883 {
8884   if (inst.operands[0].writeback)
8885     inst.instruction |= WRITE_BACK;
8886   else
8887     constraint (ldstm_type != VFP_LDSTMIA,
8888 		_("this addressing mode requires base-register writeback"));
8889   inst.instruction |= inst.operands[0].reg << 16;
8890   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8891   inst.instruction |= inst.operands[1].imm;
8892 }
8893 
8894 static void
8895 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8896 {
8897   int count;
8898 
8899   if (inst.operands[0].writeback)
8900     inst.instruction |= WRITE_BACK;
8901   else
8902     constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8903 		_("this addressing mode requires base-register writeback"));
8904 
8905   inst.instruction |= inst.operands[0].reg << 16;
8906   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8907 
8908   count = inst.operands[1].imm << 1;
8909   if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8910     count += 1;
8911 
8912   inst.instruction |= count;
8913 }
8914 
8915 static void
8916 do_vfp_sp_ldstmia (void)
8917 {
8918   vfp_sp_ldstm (VFP_LDSTMIA);
8919 }
8920 
8921 static void
8922 do_vfp_sp_ldstmdb (void)
8923 {
8924   vfp_sp_ldstm (VFP_LDSTMDB);
8925 }
8926 
8927 static void
8928 do_vfp_dp_ldstmia (void)
8929 {
8930   vfp_dp_ldstm (VFP_LDSTMIA);
8931 }
8932 
8933 static void
8934 do_vfp_dp_ldstmdb (void)
8935 {
8936   vfp_dp_ldstm (VFP_LDSTMDB);
8937 }
8938 
8939 static void
8940 do_vfp_xp_ldstmia (void)
8941 {
8942   vfp_dp_ldstm (VFP_LDSTMIAX);
8943 }
8944 
8945 static void
8946 do_vfp_xp_ldstmdb (void)
8947 {
8948   vfp_dp_ldstm (VFP_LDSTMDBX);
8949 }
8950 
8951 static void
8952 do_vfp_dp_rd_rm (void)
8953 {
8954   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8955   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8956 }
8957 
8958 static void
8959 do_vfp_dp_rn_rd (void)
8960 {
8961   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8962   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8963 }
8964 
8965 static void
8966 do_vfp_dp_rd_rn (void)
8967 {
8968   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8969   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8970 }
8971 
8972 static void
8973 do_vfp_dp_rd_rn_rm (void)
8974 {
8975   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8976   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8977   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8978 }
8979 
8980 static void
8981 do_vfp_dp_rd (void)
8982 {
8983   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8984 }
8985 
8986 static void
8987 do_vfp_dp_rm_rd_rn (void)
8988 {
8989   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8990   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8991   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8992 }
8993 
8994 /* VFPv3 instructions.  */
8995 static void
8996 do_vfp_sp_const (void)
8997 {
8998   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8999   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9000   inst.instruction |= (inst.operands[1].imm & 0x0f);
9001 }
9002 
9003 static void
9004 do_vfp_dp_const (void)
9005 {
9006   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9007   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9008   inst.instruction |= (inst.operands[1].imm & 0x0f);
9009 }
9010 
9011 static void
9012 vfp_conv (int srcsize)
9013 {
9014   int immbits = srcsize - inst.operands[1].imm;
9015 
9016   if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9017     {
9018       /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9019          i.e. immbits must be in range 0 - 16.  */
9020       inst.error = _("immediate value out of range, expected range [0, 16]");
9021       return;
9022     }
9023   else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9024     {
9025       /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9026          i.e. immbits must be in range 0 - 31.  */
9027       inst.error = _("immediate value out of range, expected range [1, 32]");
9028       return;
9029     }
9030 
9031   inst.instruction |= (immbits & 1) << 5;
9032   inst.instruction |= (immbits >> 1);
9033 }
9034 
9035 static void
9036 do_vfp_sp_conv_16 (void)
9037 {
9038   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9039   vfp_conv (16);
9040 }
9041 
9042 static void
9043 do_vfp_dp_conv_16 (void)
9044 {
9045   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9046   vfp_conv (16);
9047 }
9048 
9049 static void
9050 do_vfp_sp_conv_32 (void)
9051 {
9052   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9053   vfp_conv (32);
9054 }
9055 
9056 static void
9057 do_vfp_dp_conv_32 (void)
9058 {
9059   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9060   vfp_conv (32);
9061 }
9062 
9063 /* FPA instructions.  Also in a logical order.	*/
9064 
9065 static void
9066 do_fpa_cmp (void)
9067 {
9068   inst.instruction |= inst.operands[0].reg << 16;
9069   inst.instruction |= inst.operands[1].reg;
9070 }
9071 
9072 static void
9073 do_fpa_ldmstm (void)
9074 {
9075   inst.instruction |= inst.operands[0].reg << 12;
9076   switch (inst.operands[1].imm)
9077     {
9078     case 1: inst.instruction |= CP_T_X;		 break;
9079     case 2: inst.instruction |= CP_T_Y;		 break;
9080     case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9081     case 4:					 break;
9082     default: abort ();
9083     }
9084 
9085   if (inst.instruction & (PRE_INDEX | INDEX_UP))
9086     {
9087       /* The instruction specified "ea" or "fd", so we can only accept
9088 	 [Rn]{!}.  The instruction does not really support stacking or
9089 	 unstacking, so we have to emulate these by setting appropriate
9090 	 bits and offsets.  */
9091       constraint (inst.reloc.exp.X_op != O_constant
9092 		  || inst.reloc.exp.X_add_number != 0,
9093 		  _("this instruction does not support indexing"));
9094 
9095       if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9096 	inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9097 
9098       if (!(inst.instruction & INDEX_UP))
9099 	inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9100 
9101       if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9102 	{
9103 	  inst.operands[2].preind = 0;
9104 	  inst.operands[2].postind = 1;
9105 	}
9106     }
9107 
9108   encode_arm_cp_address (2, TRUE, TRUE, 0);
9109 }
9110 
9111 /* iWMMXt instructions: strictly in alphabetical order.	 */
9112 
9113 static void
9114 do_iwmmxt_tandorc (void)
9115 {
9116   constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9117 }
9118 
9119 static void
9120 do_iwmmxt_textrc (void)
9121 {
9122   inst.instruction |= inst.operands[0].reg << 12;
9123   inst.instruction |= inst.operands[1].imm;
9124 }
9125 
9126 static void
9127 do_iwmmxt_textrm (void)
9128 {
9129   inst.instruction |= inst.operands[0].reg << 12;
9130   inst.instruction |= inst.operands[1].reg << 16;
9131   inst.instruction |= inst.operands[2].imm;
9132 }
9133 
9134 static void
9135 do_iwmmxt_tinsr (void)
9136 {
9137   inst.instruction |= inst.operands[0].reg << 16;
9138   inst.instruction |= inst.operands[1].reg << 12;
9139   inst.instruction |= inst.operands[2].imm;
9140 }
9141 
9142 static void
9143 do_iwmmxt_tmia (void)
9144 {
9145   inst.instruction |= inst.operands[0].reg << 5;
9146   inst.instruction |= inst.operands[1].reg;
9147   inst.instruction |= inst.operands[2].reg << 12;
9148 }
9149 
9150 static void
9151 do_iwmmxt_waligni (void)
9152 {
9153   inst.instruction |= inst.operands[0].reg << 12;
9154   inst.instruction |= inst.operands[1].reg << 16;
9155   inst.instruction |= inst.operands[2].reg;
9156   inst.instruction |= inst.operands[3].imm << 20;
9157 }
9158 
9159 static void
9160 do_iwmmxt_wmerge (void)
9161 {
9162   inst.instruction |= inst.operands[0].reg << 12;
9163   inst.instruction |= inst.operands[1].reg << 16;
9164   inst.instruction |= inst.operands[2].reg;
9165   inst.instruction |= inst.operands[3].imm << 21;
9166 }
9167 
9168 static void
9169 do_iwmmxt_wmov (void)
9170 {
9171   /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
9172   inst.instruction |= inst.operands[0].reg << 12;
9173   inst.instruction |= inst.operands[1].reg << 16;
9174   inst.instruction |= inst.operands[1].reg;
9175 }
9176 
9177 static void
9178 do_iwmmxt_wldstbh (void)
9179 {
9180   int reloc;
9181   inst.instruction |= inst.operands[0].reg << 12;
9182   if (thumb_mode)
9183     reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9184   else
9185     reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9186   encode_arm_cp_address (1, TRUE, FALSE, reloc);
9187 }
9188 
9189 static void
9190 do_iwmmxt_wldstw (void)
9191 {
9192   /* RIWR_RIWC clears .isreg for a control register.  */
9193   if (!inst.operands[0].isreg)
9194     {
9195       constraint (inst.cond != COND_ALWAYS, BAD_COND);
9196       inst.instruction |= 0xf0000000;
9197     }
9198 
9199   inst.instruction |= inst.operands[0].reg << 12;
9200   encode_arm_cp_address (1, TRUE, TRUE, 0);
9201 }
9202 
9203 static void
9204 do_iwmmxt_wldstd (void)
9205 {
9206   inst.instruction |= inst.operands[0].reg << 12;
9207   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9208       && inst.operands[1].immisreg)
9209     {
9210       inst.instruction &= ~0x1a000ff;
9211       inst.instruction |= (0xf << 28);
9212       if (inst.operands[1].preind)
9213 	inst.instruction |= PRE_INDEX;
9214       if (!inst.operands[1].negative)
9215 	inst.instruction |= INDEX_UP;
9216       if (inst.operands[1].writeback)
9217 	inst.instruction |= WRITE_BACK;
9218       inst.instruction |= inst.operands[1].reg << 16;
9219       inst.instruction |= inst.reloc.exp.X_add_number << 4;
9220       inst.instruction |= inst.operands[1].imm;
9221     }
9222   else
9223     encode_arm_cp_address (1, TRUE, FALSE, 0);
9224 }
9225 
9226 static void
9227 do_iwmmxt_wshufh (void)
9228 {
9229   inst.instruction |= inst.operands[0].reg << 12;
9230   inst.instruction |= inst.operands[1].reg << 16;
9231   inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9232   inst.instruction |= (inst.operands[2].imm & 0x0f);
9233 }
9234 
9235 static void
9236 do_iwmmxt_wzero (void)
9237 {
9238   /* WZERO reg is an alias for WANDN reg, reg, reg.  */
9239   inst.instruction |= inst.operands[0].reg;
9240   inst.instruction |= inst.operands[0].reg << 12;
9241   inst.instruction |= inst.operands[0].reg << 16;
9242 }
9243 
9244 static void
9245 do_iwmmxt_wrwrwr_or_imm5 (void)
9246 {
9247   if (inst.operands[2].isreg)
9248     do_rd_rn_rm ();
9249   else {
9250     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9251 		_("immediate operand requires iWMMXt2"));
9252     do_rd_rn ();
9253     if (inst.operands[2].imm == 0)
9254       {
9255 	switch ((inst.instruction >> 20) & 0xf)
9256 	  {
9257 	  case 4:
9258 	  case 5:
9259 	  case 6:
9260 	  case 7:
9261 	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
9262 	    inst.operands[2].imm = 16;
9263 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9264 	    break;
9265 	  case 8:
9266 	  case 9:
9267 	  case 10:
9268 	  case 11:
9269 	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
9270 	    inst.operands[2].imm = 32;
9271 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9272 	    break;
9273 	  case 12:
9274 	  case 13:
9275 	  case 14:
9276 	  case 15:
9277 	    {
9278 	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
9279 	      unsigned long wrn;
9280 	      wrn = (inst.instruction >> 16) & 0xf;
9281 	      inst.instruction &= 0xff0fff0f;
9282 	      inst.instruction |= wrn;
9283 	      /* Bail out here; the instruction is now assembled.  */
9284 	      return;
9285 	    }
9286 	  }
9287       }
9288     /* Map 32 -> 0, etc.  */
9289     inst.operands[2].imm &= 0x1f;
9290     inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9291   }
9292 }
9293 
9294 /* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
9295    operations first, then control, shift, and load/store.  */
9296 
9297 /* Insns like "foo X,Y,Z".  */
9298 
9299 static void
9300 do_mav_triple (void)
9301 {
9302   inst.instruction |= inst.operands[0].reg << 16;
9303   inst.instruction |= inst.operands[1].reg;
9304   inst.instruction |= inst.operands[2].reg << 12;
9305 }
9306 
9307 /* Insns like "foo W,X,Y,Z".
9308     where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
9309 
9310 static void
9311 do_mav_quad (void)
9312 {
9313   inst.instruction |= inst.operands[0].reg << 5;
9314   inst.instruction |= inst.operands[1].reg << 12;
9315   inst.instruction |= inst.operands[2].reg << 16;
9316   inst.instruction |= inst.operands[3].reg;
9317 }
9318 
9319 /* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
9320 static void
9321 do_mav_dspsc (void)
9322 {
9323   inst.instruction |= inst.operands[1].reg << 12;
9324 }
9325 
9326 /* Maverick shift immediate instructions.
9327    cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9328    cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
9329 
9330 static void
9331 do_mav_shift (void)
9332 {
9333   int imm = inst.operands[2].imm;
9334 
9335   inst.instruction |= inst.operands[0].reg << 12;
9336   inst.instruction |= inst.operands[1].reg << 16;
9337 
9338   /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9339      Bits 5-7 of the insn should have bits 4-6 of the immediate.
9340      Bit 4 should be 0.	 */
9341   imm = (imm & 0xf) | ((imm & 0x70) << 1);
9342 
9343   inst.instruction |= imm;
9344 }
9345 
9346 /* XScale instructions.	 Also sorted arithmetic before move.  */
9347 
9348 /* Xscale multiply-accumulate (argument parse)
9349      MIAcc   acc0,Rm,Rs
9350      MIAPHcc acc0,Rm,Rs
9351      MIAxycc acc0,Rm,Rs.  */
9352 
9353 static void
9354 do_xsc_mia (void)
9355 {
9356   inst.instruction |= inst.operands[1].reg;
9357   inst.instruction |= inst.operands[2].reg << 12;
9358 }
9359 
9360 /* Xscale move-accumulator-register (argument parse)
9361 
9362      MARcc   acc0,RdLo,RdHi.  */
9363 
9364 static void
9365 do_xsc_mar (void)
9366 {
9367   inst.instruction |= inst.operands[1].reg << 12;
9368   inst.instruction |= inst.operands[2].reg << 16;
9369 }
9370 
9371 /* Xscale move-register-accumulator (argument parse)
9372 
9373      MRAcc   RdLo,RdHi,acc0.  */
9374 
9375 static void
9376 do_xsc_mra (void)
9377 {
9378   constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9379   inst.instruction |= inst.operands[0].reg << 12;
9380   inst.instruction |= inst.operands[1].reg << 16;
9381 }
9382 
9383 /* Encoding functions relevant only to Thumb.  */
9384 
9385 /* inst.operands[i] is a shifted-register operand; encode
9386    it into inst.instruction in the format used by Thumb32.  */
9387 
9388 static void
9389 encode_thumb32_shifted_operand (int i)
9390 {
9391   unsigned int value = inst.reloc.exp.X_add_number;
9392   unsigned int shift = inst.operands[i].shift_kind;
9393 
9394   constraint (inst.operands[i].immisreg,
9395 	      _("shift by register not allowed in thumb mode"));
9396   inst.instruction |= inst.operands[i].reg;
9397   if (shift == SHIFT_RRX)
9398     inst.instruction |= SHIFT_ROR << 4;
9399   else
9400     {
9401       constraint (inst.reloc.exp.X_op != O_constant,
9402 		  _("expression too complex"));
9403 
9404       constraint (value > 32
9405 		  || (value == 32 && (shift == SHIFT_LSL
9406 				      || shift == SHIFT_ROR)),
9407 		  _("shift expression is too large"));
9408 
9409       if (value == 0)
9410 	shift = SHIFT_LSL;
9411       else if (value == 32)
9412 	value = 0;
9413 
9414       inst.instruction |= shift << 4;
9415       inst.instruction |= (value & 0x1c) << 10;
9416       inst.instruction |= (value & 0x03) << 6;
9417     }
9418 }
9419 
9420 
9421 /* inst.operands[i] was set up by parse_address.  Encode it into a
9422    Thumb32 format load or store instruction.  Reject forms that cannot
9423    be used with such instructions.  If is_t is true, reject forms that
9424    cannot be used with a T instruction; if is_d is true, reject forms
9425    that cannot be used with a D instruction.  If it is a store insn,
9426    reject PC in Rn.  */
9427 
9428 static void
9429 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9430 {
9431   const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9432 
9433   constraint (!inst.operands[i].isreg,
9434 	      _("Instruction does not support =N addresses"));
9435 
9436   inst.instruction |= inst.operands[i].reg << 16;
9437   if (inst.operands[i].immisreg)
9438     {
9439       constraint (is_pc, BAD_PC_ADDRESSING);
9440       constraint (is_t || is_d, _("cannot use register index with this instruction"));
9441       constraint (inst.operands[i].negative,
9442 		  _("Thumb does not support negative register indexing"));
9443       constraint (inst.operands[i].postind,
9444 		  _("Thumb does not support register post-indexing"));
9445       constraint (inst.operands[i].writeback,
9446 		  _("Thumb does not support register indexing with writeback"));
9447       constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9448 		  _("Thumb supports only LSL in shifted register indexing"));
9449 
9450       inst.instruction |= inst.operands[i].imm;
9451       if (inst.operands[i].shifted)
9452 	{
9453 	  constraint (inst.reloc.exp.X_op != O_constant,
9454 		      _("expression too complex"));
9455 	  constraint (inst.reloc.exp.X_add_number < 0
9456 		      || inst.reloc.exp.X_add_number > 3,
9457 		      _("shift out of range"));
9458 	  inst.instruction |= inst.reloc.exp.X_add_number << 4;
9459 	}
9460       inst.reloc.type = BFD_RELOC_UNUSED;
9461     }
9462   else if (inst.operands[i].preind)
9463     {
9464       constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9465       constraint (is_t && inst.operands[i].writeback,
9466 		  _("cannot use writeback with this instruction"));
9467       constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
9468 		  && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
9469 
9470       if (is_d)
9471 	{
9472 	  inst.instruction |= 0x01000000;
9473 	  if (inst.operands[i].writeback)
9474 	    inst.instruction |= 0x00200000;
9475 	}
9476       else
9477 	{
9478 	  inst.instruction |= 0x00000c00;
9479 	  if (inst.operands[i].writeback)
9480 	    inst.instruction |= 0x00000100;
9481 	}
9482       inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9483     }
9484   else if (inst.operands[i].postind)
9485     {
9486       gas_assert (inst.operands[i].writeback);
9487       constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9488       constraint (is_t, _("cannot use post-indexing with this instruction"));
9489 
9490       if (is_d)
9491 	inst.instruction |= 0x00200000;
9492       else
9493 	inst.instruction |= 0x00000900;
9494       inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9495     }
9496   else /* unindexed - only for coprocessor */
9497     inst.error = _("instruction does not accept unindexed addressing");
9498 }
9499 
9500 /* Table of Thumb instructions which exist in both 16- and 32-bit
9501    encodings (the latter only in post-V6T2 cores).  The index is the
9502    value used in the insns table below.  When there is more than one
9503    possible 16-bit encoding for the instruction, this table always
9504    holds variant (1).
9505    Also contains several pseudo-instructions used during relaxation.  */
9506 #define T16_32_TAB				\
9507   X(_adc,   4140, eb400000),			\
9508   X(_adcs,  4140, eb500000),			\
9509   X(_add,   1c00, eb000000),			\
9510   X(_adds,  1c00, eb100000),			\
9511   X(_addi,  0000, f1000000),			\
9512   X(_addis, 0000, f1100000),			\
9513   X(_add_pc,000f, f20f0000),			\
9514   X(_add_sp,000d, f10d0000),			\
9515   X(_adr,   000f, f20f0000),			\
9516   X(_and,   4000, ea000000),			\
9517   X(_ands,  4000, ea100000),			\
9518   X(_asr,   1000, fa40f000),			\
9519   X(_asrs,  1000, fa50f000),			\
9520   X(_b,     e000, f000b000),			\
9521   X(_bcond, d000, f0008000),			\
9522   X(_bic,   4380, ea200000),			\
9523   X(_bics,  4380, ea300000),			\
9524   X(_cmn,   42c0, eb100f00),			\
9525   X(_cmp,   2800, ebb00f00),			\
9526   X(_cpsie, b660, f3af8400),			\
9527   X(_cpsid, b670, f3af8600),			\
9528   X(_cpy,   4600, ea4f0000),			\
9529   X(_dec_sp,80dd, f1ad0d00),			\
9530   X(_eor,   4040, ea800000),			\
9531   X(_eors,  4040, ea900000),			\
9532   X(_inc_sp,00dd, f10d0d00),			\
9533   X(_ldmia, c800, e8900000),			\
9534   X(_ldr,   6800, f8500000),			\
9535   X(_ldrb,  7800, f8100000),			\
9536   X(_ldrh,  8800, f8300000),			\
9537   X(_ldrsb, 5600, f9100000),			\
9538   X(_ldrsh, 5e00, f9300000),			\
9539   X(_ldr_pc,4800, f85f0000),			\
9540   X(_ldr_pc2,4800, f85f0000),			\
9541   X(_ldr_sp,9800, f85d0000),			\
9542   X(_lsl,   0000, fa00f000),			\
9543   X(_lsls,  0000, fa10f000),			\
9544   X(_lsr,   0800, fa20f000),			\
9545   X(_lsrs,  0800, fa30f000),			\
9546   X(_mov,   2000, ea4f0000),			\
9547   X(_movs,  2000, ea5f0000),			\
9548   X(_mul,   4340, fb00f000),                     \
9549   X(_muls,  4340, ffffffff), /* no 32b muls */	\
9550   X(_mvn,   43c0, ea6f0000),			\
9551   X(_mvns,  43c0, ea7f0000),			\
9552   X(_neg,   4240, f1c00000), /* rsb #0 */	\
9553   X(_negs,  4240, f1d00000), /* rsbs #0 */	\
9554   X(_orr,   4300, ea400000),			\
9555   X(_orrs,  4300, ea500000),			\
9556   X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
9557   X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
9558   X(_rev,   ba00, fa90f080),			\
9559   X(_rev16, ba40, fa90f090),			\
9560   X(_revsh, bac0, fa90f0b0),			\
9561   X(_ror,   41c0, fa60f000),			\
9562   X(_rors,  41c0, fa70f000),			\
9563   X(_sbc,   4180, eb600000),			\
9564   X(_sbcs,  4180, eb700000),			\
9565   X(_stmia, c000, e8800000),			\
9566   X(_str,   6000, f8400000),			\
9567   X(_strb,  7000, f8000000),			\
9568   X(_strh,  8000, f8200000),			\
9569   X(_str_sp,9000, f84d0000),			\
9570   X(_sub,   1e00, eba00000),			\
9571   X(_subs,  1e00, ebb00000),			\
9572   X(_subi,  8000, f1a00000),			\
9573   X(_subis, 8000, f1b00000),			\
9574   X(_sxtb,  b240, fa4ff080),			\
9575   X(_sxth,  b200, fa0ff080),			\
9576   X(_tst,   4200, ea100f00),			\
9577   X(_uxtb,  b2c0, fa5ff080),			\
9578   X(_uxth,  b280, fa1ff080),			\
9579   X(_nop,   bf00, f3af8000),			\
9580   X(_yield, bf10, f3af8001),			\
9581   X(_wfe,   bf20, f3af8002),			\
9582   X(_wfi,   bf30, f3af8003),			\
9583   X(_sev,   bf40, f3af8004),                    \
9584   X(_sevl,  bf50, f3af8005)
9585 
9586 /* To catch errors in encoding functions, the codes are all offset by
9587    0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9588    as 16-bit instructions.  */
9589 #define X(a,b,c) T_MNEM##a
9590 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
9591 #undef X
9592 
9593 #define X(a,b,c) 0x##b
9594 static const unsigned short thumb_op16[] = { T16_32_TAB };
9595 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9596 #undef X
9597 
9598 #define X(a,b,c) 0x##c
9599 static const unsigned int thumb_op32[] = { T16_32_TAB };
9600 #define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9601 #define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
9602 #undef X
9603 #undef T16_32_TAB
9604 
9605 /* Thumb instruction encoders, in alphabetical order.  */
9606 
9607 /* ADDW or SUBW.  */
9608 
9609 static void
9610 do_t_add_sub_w (void)
9611 {
9612   int Rd, Rn;
9613 
9614   Rd = inst.operands[0].reg;
9615   Rn = inst.operands[1].reg;
9616 
9617   /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9618      is the SP-{plus,minus}-immediate form of the instruction.  */
9619   if (Rn == REG_SP)
9620     constraint (Rd == REG_PC, BAD_PC);
9621   else
9622     reject_bad_reg (Rd);
9623 
9624   inst.instruction |= (Rn << 16) | (Rd << 8);
9625   inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9626 }
9627 
9628 /* Parse an add or subtract instruction.  We get here with inst.instruction
9629    equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
9630 
9631 static void
9632 do_t_add_sub (void)
9633 {
9634   int Rd, Rs, Rn;
9635 
9636   Rd = inst.operands[0].reg;
9637   Rs = (inst.operands[1].present
9638 	? inst.operands[1].reg    /* Rd, Rs, foo */
9639 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9640 
9641   if (Rd == REG_PC)
9642     set_it_insn_type_last ();
9643 
9644   if (unified_syntax)
9645     {
9646       bfd_boolean flags;
9647       bfd_boolean narrow;
9648       int opcode;
9649 
9650       flags = (inst.instruction == T_MNEM_adds
9651 	       || inst.instruction == T_MNEM_subs);
9652       if (flags)
9653 	narrow = !in_it_block ();
9654       else
9655 	narrow = in_it_block ();
9656       if (!inst.operands[2].isreg)
9657 	{
9658 	  int add;
9659 
9660 	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9661 
9662 	  add = (inst.instruction == T_MNEM_add
9663 		 || inst.instruction == T_MNEM_adds);
9664 	  opcode = 0;
9665 	  if (inst.size_req != 4)
9666 	    {
9667 	      /* Attempt to use a narrow opcode, with relaxation if
9668 	         appropriate.  */
9669 	      if (Rd == REG_SP && Rs == REG_SP && !flags)
9670 		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
9671 	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
9672 		opcode = T_MNEM_add_sp;
9673 	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9674 		opcode = T_MNEM_add_pc;
9675 	      else if (Rd <= 7 && Rs <= 7 && narrow)
9676 		{
9677 		  if (flags)
9678 		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
9679 		  else
9680 		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
9681 		}
9682 	      if (opcode)
9683 		{
9684 		  inst.instruction = THUMB_OP16(opcode);
9685 		  inst.instruction |= (Rd << 4) | Rs;
9686 		  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9687 		  if (inst.size_req != 2)
9688 		    inst.relax = opcode;
9689 		}
9690 	      else
9691 		constraint (inst.size_req == 2, BAD_HIREG);
9692 	    }
9693 	  if (inst.size_req == 4
9694 	      || (inst.size_req != 2 && !opcode))
9695 	    {
9696 	      if (Rd == REG_PC)
9697 		{
9698 		  constraint (add, BAD_PC);
9699 		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9700 			     _("only SUBS PC, LR, #const allowed"));
9701 		  constraint (inst.reloc.exp.X_op != O_constant,
9702 			      _("expression too complex"));
9703 		  constraint (inst.reloc.exp.X_add_number < 0
9704 			      || inst.reloc.exp.X_add_number > 0xff,
9705 			     _("immediate value out of range"));
9706 		  inst.instruction = T2_SUBS_PC_LR
9707 				     | inst.reloc.exp.X_add_number;
9708 		  inst.reloc.type = BFD_RELOC_UNUSED;
9709 		  return;
9710 		}
9711 	      else if (Rs == REG_PC)
9712 		{
9713 		  /* Always use addw/subw.  */
9714 		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9715 		  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9716 		}
9717 	      else
9718 		{
9719 		  inst.instruction = THUMB_OP32 (inst.instruction);
9720 		  inst.instruction = (inst.instruction & 0xe1ffffff)
9721 				     | 0x10000000;
9722 		  if (flags)
9723 		    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9724 		  else
9725 		    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9726 		}
9727 	      inst.instruction |= Rd << 8;
9728 	      inst.instruction |= Rs << 16;
9729 	    }
9730 	}
9731       else
9732 	{
9733 	  unsigned int value = inst.reloc.exp.X_add_number;
9734 	  unsigned int shift = inst.operands[2].shift_kind;
9735 
9736 	  Rn = inst.operands[2].reg;
9737 	  /* See if we can do this with a 16-bit instruction.  */
9738 	  if (!inst.operands[2].shifted && inst.size_req != 4)
9739 	    {
9740 	      if (Rd > 7 || Rs > 7 || Rn > 7)
9741 		narrow = FALSE;
9742 
9743 	      if (narrow)
9744 		{
9745 		  inst.instruction = ((inst.instruction == T_MNEM_adds
9746 				       || inst.instruction == T_MNEM_add)
9747 				      ? T_OPCODE_ADD_R3
9748 				      : T_OPCODE_SUB_R3);
9749 		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9750 		  return;
9751 		}
9752 
9753 	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9754 		{
9755 		  /* Thumb-1 cores (except v6-M) require at least one high
9756 		     register in a narrow non flag setting add.  */
9757 		  if (Rd > 7 || Rn > 7
9758 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9759 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9760 		    {
9761 		      if (Rd == Rn)
9762 			{
9763 			  Rn = Rs;
9764 			  Rs = Rd;
9765 			}
9766 		      inst.instruction = T_OPCODE_ADD_HI;
9767 		      inst.instruction |= (Rd & 8) << 4;
9768 		      inst.instruction |= (Rd & 7);
9769 		      inst.instruction |= Rn << 3;
9770 		      return;
9771 		    }
9772 		}
9773 	    }
9774 
9775 	  constraint (Rd == REG_PC, BAD_PC);
9776 	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9777 	  constraint (Rs == REG_PC, BAD_PC);
9778 	  reject_bad_reg (Rn);
9779 
9780 	  /* If we get here, it can't be done in 16 bits.  */
9781 	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9782 		      _("shift must be constant"));
9783 	  inst.instruction = THUMB_OP32 (inst.instruction);
9784 	  inst.instruction |= Rd << 8;
9785 	  inst.instruction |= Rs << 16;
9786 	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
9787 		      _("shift value over 3 not allowed in thumb mode"));
9788 	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
9789 		      _("only LSL shift allowed in thumb mode"));
9790 	  encode_thumb32_shifted_operand (2);
9791 	}
9792     }
9793   else
9794     {
9795       constraint (inst.instruction == T_MNEM_adds
9796 		  || inst.instruction == T_MNEM_subs,
9797 		  BAD_THUMB32);
9798 
9799       if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9800 	{
9801 	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9802 		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9803 		      BAD_HIREG);
9804 
9805 	  inst.instruction = (inst.instruction == T_MNEM_add
9806 			      ? 0x0000 : 0x8000);
9807 	  inst.instruction |= (Rd << 4) | Rs;
9808 	  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9809 	  return;
9810 	}
9811 
9812       Rn = inst.operands[2].reg;
9813       constraint (inst.operands[2].shifted, _("unshifted register required"));
9814 
9815       /* We now have Rd, Rs, and Rn set to registers.  */
9816       if (Rd > 7 || Rs > 7 || Rn > 7)
9817 	{
9818 	  /* Can't do this for SUB.	 */
9819 	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9820 	  inst.instruction = T_OPCODE_ADD_HI;
9821 	  inst.instruction |= (Rd & 8) << 4;
9822 	  inst.instruction |= (Rd & 7);
9823 	  if (Rs == Rd)
9824 	    inst.instruction |= Rn << 3;
9825 	  else if (Rn == Rd)
9826 	    inst.instruction |= Rs << 3;
9827 	  else
9828 	    constraint (1, _("dest must overlap one source register"));
9829 	}
9830       else
9831 	{
9832 	  inst.instruction = (inst.instruction == T_MNEM_add
9833 			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9834 	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9835 	}
9836     }
9837 }
9838 
9839 static void
9840 do_t_adr (void)
9841 {
9842   unsigned Rd;
9843 
9844   Rd = inst.operands[0].reg;
9845   reject_bad_reg (Rd);
9846 
9847   if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9848     {
9849       /* Defer to section relaxation.  */
9850       inst.relax = inst.instruction;
9851       inst.instruction = THUMB_OP16 (inst.instruction);
9852       inst.instruction |= Rd << 4;
9853     }
9854   else if (unified_syntax && inst.size_req != 2)
9855     {
9856       /* Generate a 32-bit opcode.  */
9857       inst.instruction = THUMB_OP32 (inst.instruction);
9858       inst.instruction |= Rd << 8;
9859       inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9860       inst.reloc.pc_rel = 1;
9861     }
9862   else
9863     {
9864       /* Generate a 16-bit opcode.  */
9865       inst.instruction = THUMB_OP16 (inst.instruction);
9866       inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9867       inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
9868       inst.reloc.pc_rel = 1;
9869 
9870       inst.instruction |= Rd << 4;
9871     }
9872 }
9873 
9874 /* Arithmetic instructions for which there is just one 16-bit
9875    instruction encoding, and it allows only two low registers.
9876    For maximal compatibility with ARM syntax, we allow three register
9877    operands even when Thumb-32 instructions are not available, as long
9878    as the first two are identical.  For instance, both "sbc r0,r1" and
9879    "sbc r0,r0,r1" are allowed.  */
9880 static void
9881 do_t_arit3 (void)
9882 {
9883   int Rd, Rs, Rn;
9884 
9885   Rd = inst.operands[0].reg;
9886   Rs = (inst.operands[1].present
9887 	? inst.operands[1].reg    /* Rd, Rs, foo */
9888 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9889   Rn = inst.operands[2].reg;
9890 
9891   reject_bad_reg (Rd);
9892   reject_bad_reg (Rs);
9893   if (inst.operands[2].isreg)
9894     reject_bad_reg (Rn);
9895 
9896   if (unified_syntax)
9897     {
9898       if (!inst.operands[2].isreg)
9899 	{
9900 	  /* For an immediate, we always generate a 32-bit opcode;
9901 	     section relaxation will shrink it later if possible.  */
9902 	  inst.instruction = THUMB_OP32 (inst.instruction);
9903 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9904 	  inst.instruction |= Rd << 8;
9905 	  inst.instruction |= Rs << 16;
9906 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9907 	}
9908       else
9909 	{
9910 	  bfd_boolean narrow;
9911 
9912 	  /* See if we can do this with a 16-bit instruction.  */
9913 	  if (THUMB_SETS_FLAGS (inst.instruction))
9914 	    narrow = !in_it_block ();
9915 	  else
9916 	    narrow = in_it_block ();
9917 
9918 	  if (Rd > 7 || Rn > 7 || Rs > 7)
9919 	    narrow = FALSE;
9920 	  if (inst.operands[2].shifted)
9921 	    narrow = FALSE;
9922 	  if (inst.size_req == 4)
9923 	    narrow = FALSE;
9924 
9925 	  if (narrow
9926 	      && Rd == Rs)
9927 	    {
9928 	      inst.instruction = THUMB_OP16 (inst.instruction);
9929 	      inst.instruction |= Rd;
9930 	      inst.instruction |= Rn << 3;
9931 	      return;
9932 	    }
9933 
9934 	  /* If we get here, it can't be done in 16 bits.  */
9935 	  constraint (inst.operands[2].shifted
9936 		      && inst.operands[2].immisreg,
9937 		      _("shift must be constant"));
9938 	  inst.instruction = THUMB_OP32 (inst.instruction);
9939 	  inst.instruction |= Rd << 8;
9940 	  inst.instruction |= Rs << 16;
9941 	  encode_thumb32_shifted_operand (2);
9942 	}
9943     }
9944   else
9945     {
9946       /* On its face this is a lie - the instruction does set the
9947 	 flags.  However, the only supported mnemonic in this mode
9948 	 says it doesn't.  */
9949       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9950 
9951       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9952 		  _("unshifted register required"));
9953       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9954       constraint (Rd != Rs,
9955 		  _("dest and source1 must be the same register"));
9956 
9957       inst.instruction = THUMB_OP16 (inst.instruction);
9958       inst.instruction |= Rd;
9959       inst.instruction |= Rn << 3;
9960     }
9961 }
9962 
9963 /* Similarly, but for instructions where the arithmetic operation is
9964    commutative, so we can allow either of them to be different from
9965    the destination operand in a 16-bit instruction.  For instance, all
9966    three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9967    accepted.  */
9968 static void
9969 do_t_arit3c (void)
9970 {
9971   int Rd, Rs, Rn;
9972 
9973   Rd = inst.operands[0].reg;
9974   Rs = (inst.operands[1].present
9975 	? inst.operands[1].reg    /* Rd, Rs, foo */
9976 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9977   Rn = inst.operands[2].reg;
9978 
9979   reject_bad_reg (Rd);
9980   reject_bad_reg (Rs);
9981   if (inst.operands[2].isreg)
9982     reject_bad_reg (Rn);
9983 
9984   if (unified_syntax)
9985     {
9986       if (!inst.operands[2].isreg)
9987 	{
9988 	  /* For an immediate, we always generate a 32-bit opcode;
9989 	     section relaxation will shrink it later if possible.  */
9990 	  inst.instruction = THUMB_OP32 (inst.instruction);
9991 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9992 	  inst.instruction |= Rd << 8;
9993 	  inst.instruction |= Rs << 16;
9994 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9995 	}
9996       else
9997 	{
9998 	  bfd_boolean narrow;
9999 
10000 	  /* See if we can do this with a 16-bit instruction.  */
10001 	  if (THUMB_SETS_FLAGS (inst.instruction))
10002 	    narrow = !in_it_block ();
10003 	  else
10004 	    narrow = in_it_block ();
10005 
10006 	  if (Rd > 7 || Rn > 7 || Rs > 7)
10007 	    narrow = FALSE;
10008 	  if (inst.operands[2].shifted)
10009 	    narrow = FALSE;
10010 	  if (inst.size_req == 4)
10011 	    narrow = FALSE;
10012 
10013 	  if (narrow)
10014 	    {
10015 	      if (Rd == Rs)
10016 		{
10017 		  inst.instruction = THUMB_OP16 (inst.instruction);
10018 		  inst.instruction |= Rd;
10019 		  inst.instruction |= Rn << 3;
10020 		  return;
10021 		}
10022 	      if (Rd == Rn)
10023 		{
10024 		  inst.instruction = THUMB_OP16 (inst.instruction);
10025 		  inst.instruction |= Rd;
10026 		  inst.instruction |= Rs << 3;
10027 		  return;
10028 		}
10029 	    }
10030 
10031 	  /* If we get here, it can't be done in 16 bits.  */
10032 	  constraint (inst.operands[2].shifted
10033 		      && inst.operands[2].immisreg,
10034 		      _("shift must be constant"));
10035 	  inst.instruction = THUMB_OP32 (inst.instruction);
10036 	  inst.instruction |= Rd << 8;
10037 	  inst.instruction |= Rs << 16;
10038 	  encode_thumb32_shifted_operand (2);
10039 	}
10040     }
10041   else
10042     {
10043       /* On its face this is a lie - the instruction does set the
10044 	 flags.  However, the only supported mnemonic in this mode
10045 	 says it doesn't.  */
10046       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10047 
10048       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10049 		  _("unshifted register required"));
10050       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10051 
10052       inst.instruction = THUMB_OP16 (inst.instruction);
10053       inst.instruction |= Rd;
10054 
10055       if (Rd == Rs)
10056 	inst.instruction |= Rn << 3;
10057       else if (Rd == Rn)
10058 	inst.instruction |= Rs << 3;
10059       else
10060 	constraint (1, _("dest must overlap one source register"));
10061     }
10062 }
10063 
10064 static void
10065 do_t_barrier (void)
10066 {
10067   if (inst.operands[0].present)
10068     {
10069       constraint ((inst.instruction & 0xf0) != 0x40
10070 		  && inst.operands[0].imm > 0xf
10071 		  && inst.operands[0].imm < 0x0,
10072 		  _("bad barrier type"));
10073       inst.instruction |= inst.operands[0].imm;
10074     }
10075   else
10076     inst.instruction |= 0xf;
10077 }
10078 
10079 static void
10080 do_t_bfc (void)
10081 {
10082   unsigned Rd;
10083   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10084   constraint (msb > 32, _("bit-field extends past end of register"));
10085   /* The instruction encoding stores the LSB and MSB,
10086      not the LSB and width.  */
10087   Rd = inst.operands[0].reg;
10088   reject_bad_reg (Rd);
10089   inst.instruction |= Rd << 8;
10090   inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10091   inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10092   inst.instruction |= msb - 1;
10093 }
10094 
10095 static void
10096 do_t_bfi (void)
10097 {
10098   int Rd, Rn;
10099   unsigned int msb;
10100 
10101   Rd = inst.operands[0].reg;
10102   reject_bad_reg (Rd);
10103 
10104   /* #0 in second position is alternative syntax for bfc, which is
10105      the same instruction but with REG_PC in the Rm field.  */
10106   if (!inst.operands[1].isreg)
10107     Rn = REG_PC;
10108   else
10109     {
10110       Rn = inst.operands[1].reg;
10111       reject_bad_reg (Rn);
10112     }
10113 
10114   msb = inst.operands[2].imm + inst.operands[3].imm;
10115   constraint (msb > 32, _("bit-field extends past end of register"));
10116   /* The instruction encoding stores the LSB and MSB,
10117      not the LSB and width.  */
10118   inst.instruction |= Rd << 8;
10119   inst.instruction |= Rn << 16;
10120   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10121   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10122   inst.instruction |= msb - 1;
10123 }
10124 
10125 static void
10126 do_t_bfx (void)
10127 {
10128   unsigned Rd, Rn;
10129 
10130   Rd = inst.operands[0].reg;
10131   Rn = inst.operands[1].reg;
10132 
10133   reject_bad_reg (Rd);
10134   reject_bad_reg (Rn);
10135 
10136   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10137 	      _("bit-field extends past end of register"));
10138   inst.instruction |= Rd << 8;
10139   inst.instruction |= Rn << 16;
10140   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10141   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10142   inst.instruction |= inst.operands[3].imm - 1;
10143 }
10144 
10145 /* ARM V5 Thumb BLX (argument parse)
10146 	BLX <target_addr>	which is BLX(1)
10147 	BLX <Rm>		which is BLX(2)
10148    Unfortunately, there are two different opcodes for this mnemonic.
10149    So, the insns[].value is not used, and the code here zaps values
10150 	into inst.instruction.
10151 
10152    ??? How to take advantage of the additional two bits of displacement
10153    available in Thumb32 mode?  Need new relocation?  */
10154 
10155 static void
10156 do_t_blx (void)
10157 {
10158   set_it_insn_type_last ();
10159 
10160   if (inst.operands[0].isreg)
10161     {
10162       constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10163       /* We have a register, so this is BLX(2).  */
10164       inst.instruction |= inst.operands[0].reg << 3;
10165     }
10166   else
10167     {
10168       /* No register.  This must be BLX(1).  */
10169       inst.instruction = 0xf000e800;
10170       encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10171     }
10172 }
10173 
10174 static void
10175 do_t_branch (void)
10176 {
10177   int opcode;
10178   int cond;
10179   int reloc;
10180 
10181   cond = inst.cond;
10182   set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10183 
10184   if (in_it_block ())
10185     {
10186       /* Conditional branches inside IT blocks are encoded as unconditional
10187          branches.  */
10188       cond = COND_ALWAYS;
10189     }
10190   else
10191     cond = inst.cond;
10192 
10193   if (cond != COND_ALWAYS)
10194     opcode = T_MNEM_bcond;
10195   else
10196     opcode = inst.instruction;
10197 
10198   if (unified_syntax
10199       && (inst.size_req == 4
10200 	  || (inst.size_req != 2
10201 	      && (inst.operands[0].hasreloc
10202 		  || inst.reloc.exp.X_op == O_constant))))
10203     {
10204       inst.instruction = THUMB_OP32(opcode);
10205       if (cond == COND_ALWAYS)
10206 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10207       else
10208 	{
10209 	  gas_assert (cond != 0xF);
10210 	  inst.instruction |= cond << 22;
10211 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10212 	}
10213     }
10214   else
10215     {
10216       inst.instruction = THUMB_OP16(opcode);
10217       if (cond == COND_ALWAYS)
10218 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10219       else
10220 	{
10221 	  inst.instruction |= cond << 8;
10222 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10223 	}
10224       /* Allow section relaxation.  */
10225       if (unified_syntax && inst.size_req != 2)
10226 	inst.relax = opcode;
10227     }
10228   inst.reloc.type = reloc;
10229   inst.reloc.pc_rel = 1;
10230 }
10231 
10232 /* Actually do the work for Thumb state bkpt and hlt.  The only difference
10233    between the two is the maximum immediate allowed - which is passed in
10234    RANGE.  */
10235 static void
10236 do_t_bkpt_hlt1 (int range)
10237 {
10238   constraint (inst.cond != COND_ALWAYS,
10239 	      _("instruction is always unconditional"));
10240   if (inst.operands[0].present)
10241     {
10242       constraint (inst.operands[0].imm > range,
10243 		  _("immediate value out of range"));
10244       inst.instruction |= inst.operands[0].imm;
10245     }
10246 
10247   set_it_insn_type (NEUTRAL_IT_INSN);
10248 }
10249 
10250 static void
10251 do_t_hlt (void)
10252 {
10253   do_t_bkpt_hlt1 (63);
10254 }
10255 
10256 static void
10257 do_t_bkpt (void)
10258 {
10259   do_t_bkpt_hlt1 (255);
10260 }
10261 
10262 static void
10263 do_t_branch23 (void)
10264 {
10265   set_it_insn_type_last ();
10266   encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10267 
10268   /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10269      this file.  We used to simply ignore the PLT reloc type here --
10270      the branch encoding is now needed to deal with TLSCALL relocs.
10271      So if we see a PLT reloc now, put it back to how it used to be to
10272      keep the preexisting behaviour.  */
10273   if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10274     inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10275 
10276 #if defined(OBJ_COFF)
10277   /* If the destination of the branch is a defined symbol which does not have
10278      the THUMB_FUNC attribute, then we must be calling a function which has
10279      the (interfacearm) attribute.  We look for the Thumb entry point to that
10280      function and change the branch to refer to that function instead.	*/
10281   if (	 inst.reloc.exp.X_op == O_symbol
10282       && inst.reloc.exp.X_add_symbol != NULL
10283       && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10284       && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10285     inst.reloc.exp.X_add_symbol =
10286       find_real_start (inst.reloc.exp.X_add_symbol);
10287 #endif
10288 }
10289 
10290 static void
10291 do_t_bx (void)
10292 {
10293   set_it_insn_type_last ();
10294   inst.instruction |= inst.operands[0].reg << 3;
10295   /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
10296      should cause the alignment to be checked once it is known.	 This is
10297      because BX PC only works if the instruction is word aligned.  */
10298 }
10299 
10300 static void
10301 do_t_bxj (void)
10302 {
10303   int Rm;
10304 
10305   set_it_insn_type_last ();
10306   Rm = inst.operands[0].reg;
10307   reject_bad_reg (Rm);
10308   inst.instruction |= Rm << 16;
10309 }
10310 
10311 static void
10312 do_t_clz (void)
10313 {
10314   unsigned Rd;
10315   unsigned Rm;
10316 
10317   Rd = inst.operands[0].reg;
10318   Rm = inst.operands[1].reg;
10319 
10320   reject_bad_reg (Rd);
10321   reject_bad_reg (Rm);
10322 
10323   inst.instruction |= Rd << 8;
10324   inst.instruction |= Rm << 16;
10325   inst.instruction |= Rm;
10326 }
10327 
10328 static void
10329 do_t_cps (void)
10330 {
10331   set_it_insn_type (OUTSIDE_IT_INSN);
10332   inst.instruction |= inst.operands[0].imm;
10333 }
10334 
10335 static void
10336 do_t_cpsi (void)
10337 {
10338   set_it_insn_type (OUTSIDE_IT_INSN);
10339   if (unified_syntax
10340       && (inst.operands[1].present || inst.size_req == 4)
10341       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10342     {
10343       unsigned int imod = (inst.instruction & 0x0030) >> 4;
10344       inst.instruction = 0xf3af8000;
10345       inst.instruction |= imod << 9;
10346       inst.instruction |= inst.operands[0].imm << 5;
10347       if (inst.operands[1].present)
10348 	inst.instruction |= 0x100 | inst.operands[1].imm;
10349     }
10350   else
10351     {
10352       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10353 		  && (inst.operands[0].imm & 4),
10354 		  _("selected processor does not support 'A' form "
10355 		    "of this instruction"));
10356       constraint (inst.operands[1].present || inst.size_req == 4,
10357 		  _("Thumb does not support the 2-argument "
10358 		    "form of this instruction"));
10359       inst.instruction |= inst.operands[0].imm;
10360     }
10361 }
10362 
10363 /* THUMB CPY instruction (argument parse).  */
10364 
10365 static void
10366 do_t_cpy (void)
10367 {
10368   if (inst.size_req == 4)
10369     {
10370       inst.instruction = THUMB_OP32 (T_MNEM_mov);
10371       inst.instruction |= inst.operands[0].reg << 8;
10372       inst.instruction |= inst.operands[1].reg;
10373     }
10374   else
10375     {
10376       inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10377       inst.instruction |= (inst.operands[0].reg & 0x7);
10378       inst.instruction |= inst.operands[1].reg << 3;
10379     }
10380 }
10381 
10382 static void
10383 do_t_cbz (void)
10384 {
10385   set_it_insn_type (OUTSIDE_IT_INSN);
10386   constraint (inst.operands[0].reg > 7, BAD_HIREG);
10387   inst.instruction |= inst.operands[0].reg;
10388   inst.reloc.pc_rel = 1;
10389   inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10390 }
10391 
10392 static void
10393 do_t_dbg (void)
10394 {
10395   inst.instruction |= inst.operands[0].imm;
10396 }
10397 
10398 static void
10399 do_t_div (void)
10400 {
10401   unsigned Rd, Rn, Rm;
10402 
10403   Rd = inst.operands[0].reg;
10404   Rn = (inst.operands[1].present
10405 	? inst.operands[1].reg : Rd);
10406   Rm = inst.operands[2].reg;
10407 
10408   reject_bad_reg (Rd);
10409   reject_bad_reg (Rn);
10410   reject_bad_reg (Rm);
10411 
10412   inst.instruction |= Rd << 8;
10413   inst.instruction |= Rn << 16;
10414   inst.instruction |= Rm;
10415 }
10416 
10417 static void
10418 do_t_hint (void)
10419 {
10420   if (unified_syntax && inst.size_req == 4)
10421     inst.instruction = THUMB_OP32 (inst.instruction);
10422   else
10423     inst.instruction = THUMB_OP16 (inst.instruction);
10424 }
10425 
10426 static void
10427 do_t_it (void)
10428 {
10429   unsigned int cond = inst.operands[0].imm;
10430 
10431   set_it_insn_type (IT_INSN);
10432   now_it.mask = (inst.instruction & 0xf) | 0x10;
10433   now_it.cc = cond;
10434   now_it.warn_deprecated = FALSE;
10435 
10436   /* If the condition is a negative condition, invert the mask.  */
10437   if ((cond & 0x1) == 0x0)
10438     {
10439       unsigned int mask = inst.instruction & 0x000f;
10440 
10441       if ((mask & 0x7) == 0)
10442 	{
10443 	  /* No conversion needed.  */
10444 	  now_it.block_length = 1;
10445 	}
10446       else if ((mask & 0x3) == 0)
10447 	{
10448 	  mask ^= 0x8;
10449 	  now_it.block_length = 2;
10450 	}
10451       else if ((mask & 0x1) == 0)
10452 	{
10453 	  mask ^= 0xC;
10454 	  now_it.block_length = 3;
10455 	}
10456       else
10457 	{
10458 	  mask ^= 0xE;
10459 	  now_it.block_length = 4;
10460 	}
10461 
10462       inst.instruction &= 0xfff0;
10463       inst.instruction |= mask;
10464     }
10465 
10466   inst.instruction |= cond << 4;
10467 }
10468 
10469 /* Helper function used for both push/pop and ldm/stm.  */
10470 static void
10471 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10472 {
10473   bfd_boolean load;
10474 
10475   load = (inst.instruction & (1 << 20)) != 0;
10476 
10477   if (mask & (1 << 13))
10478     inst.error =  _("SP not allowed in register list");
10479 
10480   if ((mask & (1 << base)) != 0
10481       && writeback)
10482     inst.error = _("having the base register in the register list when "
10483 		   "using write back is UNPREDICTABLE");
10484 
10485   if (load)
10486     {
10487       if (mask & (1 << 15))
10488         {
10489           if (mask & (1 << 14))
10490             inst.error = _("LR and PC should not both be in register list");
10491           else
10492             set_it_insn_type_last ();
10493         }
10494     }
10495   else
10496     {
10497       if (mask & (1 << 15))
10498 	inst.error = _("PC not allowed in register list");
10499     }
10500 
10501   if ((mask & (mask - 1)) == 0)
10502     {
10503       /* Single register transfers implemented as str/ldr.  */
10504       if (writeback)
10505 	{
10506 	  if (inst.instruction & (1 << 23))
10507 	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10508 	  else
10509 	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10510 	}
10511       else
10512 	{
10513 	  if (inst.instruction & (1 << 23))
10514 	    inst.instruction = 0x00800000; /* ia -> [base] */
10515 	  else
10516 	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
10517 	}
10518 
10519       inst.instruction |= 0xf8400000;
10520       if (load)
10521 	inst.instruction |= 0x00100000;
10522 
10523       mask = ffs (mask) - 1;
10524       mask <<= 12;
10525     }
10526   else if (writeback)
10527     inst.instruction |= WRITE_BACK;
10528 
10529   inst.instruction |= mask;
10530   inst.instruction |= base << 16;
10531 }
10532 
10533 static void
10534 do_t_ldmstm (void)
10535 {
10536   /* This really doesn't seem worth it.  */
10537   constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10538 	      _("expression too complex"));
10539   constraint (inst.operands[1].writeback,
10540 	      _("Thumb load/store multiple does not support {reglist}^"));
10541 
10542   if (unified_syntax)
10543     {
10544       bfd_boolean narrow;
10545       unsigned mask;
10546 
10547       narrow = FALSE;
10548       /* See if we can use a 16-bit instruction.  */
10549       if (inst.instruction < 0xffff /* not ldmdb/stmdb */
10550 	  && inst.size_req != 4
10551 	  && !(inst.operands[1].imm & ~0xff))
10552 	{
10553 	  mask = 1 << inst.operands[0].reg;
10554 
10555 	  if (inst.operands[0].reg <= 7)
10556 	    {
10557 	      if (inst.instruction == T_MNEM_stmia
10558 		  ? inst.operands[0].writeback
10559 		  : (inst.operands[0].writeback
10560 		     == !(inst.operands[1].imm & mask)))
10561 	        {
10562 		  if (inst.instruction == T_MNEM_stmia
10563 		      && (inst.operands[1].imm & mask)
10564 		      && (inst.operands[1].imm & (mask - 1)))
10565 		    as_warn (_("value stored for r%d is UNKNOWN"),
10566 			     inst.operands[0].reg);
10567 
10568 		  inst.instruction = THUMB_OP16 (inst.instruction);
10569 		  inst.instruction |= inst.operands[0].reg << 8;
10570 		  inst.instruction |= inst.operands[1].imm;
10571 		  narrow = TRUE;
10572 		}
10573 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10574 		{
10575 		  /* This means 1 register in reg list one of 3 situations:
10576 		     1. Instruction is stmia, but without writeback.
10577 		     2. lmdia without writeback, but with Rn not in
10578 		        reglist.
10579 		     3. ldmia with writeback, but with Rn in reglist.
10580 		     Case 3 is UNPREDICTABLE behaviour, so we handle
10581 		     case 1 and 2 which can be converted into a 16-bit
10582 		     str or ldr. The SP cases are handled below.  */
10583 		  unsigned long opcode;
10584 		  /* First, record an error for Case 3.  */
10585 		  if (inst.operands[1].imm & mask
10586 		      && inst.operands[0].writeback)
10587 		    inst.error =
10588 			_("having the base register in the register list when "
10589 			  "using write back is UNPREDICTABLE");
10590 
10591 		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
10592 							     : T_MNEM_ldr);
10593 		  inst.instruction = THUMB_OP16 (opcode);
10594 		  inst.instruction |= inst.operands[0].reg << 3;
10595 		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
10596 		  narrow = TRUE;
10597 		}
10598 	    }
10599 	  else if (inst.operands[0] .reg == REG_SP)
10600 	    {
10601 	      if (inst.operands[0].writeback)
10602 		{
10603 		  inst.instruction =
10604 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
10605 			            ? T_MNEM_push : T_MNEM_pop);
10606 		  inst.instruction |= inst.operands[1].imm;
10607 	          narrow = TRUE;
10608 		}
10609 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10610 		{
10611 		  inst.instruction =
10612 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
10613 			            ? T_MNEM_str_sp : T_MNEM_ldr_sp);
10614 		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
10615 	          narrow = TRUE;
10616 		}
10617 	    }
10618 	}
10619 
10620       if (!narrow)
10621 	{
10622 	  if (inst.instruction < 0xffff)
10623 	    inst.instruction = THUMB_OP32 (inst.instruction);
10624 
10625 	  encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
10626 				inst.operands[0].writeback);
10627 	}
10628     }
10629   else
10630     {
10631       constraint (inst.operands[0].reg > 7
10632 		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
10633       constraint (inst.instruction != T_MNEM_ldmia
10634 		  && inst.instruction != T_MNEM_stmia,
10635 		  _("Thumb-2 instruction only valid in unified syntax"));
10636       if (inst.instruction == T_MNEM_stmia)
10637 	{
10638 	  if (!inst.operands[0].writeback)
10639 	    as_warn (_("this instruction will write back the base register"));
10640 	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
10641 	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
10642 	    as_warn (_("value stored for r%d is UNKNOWN"),
10643 		     inst.operands[0].reg);
10644 	}
10645       else
10646 	{
10647 	  if (!inst.operands[0].writeback
10648 	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
10649 	    as_warn (_("this instruction will write back the base register"));
10650 	  else if (inst.operands[0].writeback
10651 		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
10652 	    as_warn (_("this instruction will not write back the base register"));
10653 	}
10654 
10655       inst.instruction = THUMB_OP16 (inst.instruction);
10656       inst.instruction |= inst.operands[0].reg << 8;
10657       inst.instruction |= inst.operands[1].imm;
10658     }
10659 }
10660 
10661 static void
10662 do_t_ldrex (void)
10663 {
10664   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
10665 	      || inst.operands[1].postind || inst.operands[1].writeback
10666 	      || inst.operands[1].immisreg || inst.operands[1].shifted
10667 	      || inst.operands[1].negative,
10668 	      BAD_ADDR_MODE);
10669 
10670   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
10671 
10672   inst.instruction |= inst.operands[0].reg << 12;
10673   inst.instruction |= inst.operands[1].reg << 16;
10674   inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10675 }
10676 
10677 static void
10678 do_t_ldrexd (void)
10679 {
10680   if (!inst.operands[1].present)
10681     {
10682       constraint (inst.operands[0].reg == REG_LR,
10683 		  _("r14 not allowed as first register "
10684 		    "when second register is omitted"));
10685       inst.operands[1].reg = inst.operands[0].reg + 1;
10686     }
10687   constraint (inst.operands[0].reg == inst.operands[1].reg,
10688 	      BAD_OVERLAP);
10689 
10690   inst.instruction |= inst.operands[0].reg << 12;
10691   inst.instruction |= inst.operands[1].reg << 8;
10692   inst.instruction |= inst.operands[2].reg << 16;
10693 }
10694 
10695 static void
10696 do_t_ldst (void)
10697 {
10698   unsigned long opcode;
10699   int Rn;
10700 
10701   if (inst.operands[0].isreg
10702       && !inst.operands[0].preind
10703       && inst.operands[0].reg == REG_PC)
10704     set_it_insn_type_last ();
10705 
10706   opcode = inst.instruction;
10707   if (unified_syntax)
10708     {
10709       if (!inst.operands[1].isreg)
10710 	{
10711 	  if (opcode <= 0xffff)
10712 	    inst.instruction = THUMB_OP32 (opcode);
10713 	  if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10714 	    return;
10715 	}
10716       if (inst.operands[1].isreg
10717 	  && !inst.operands[1].writeback
10718 	  && !inst.operands[1].shifted && !inst.operands[1].postind
10719 	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
10720 	  && opcode <= 0xffff
10721 	  && inst.size_req != 4)
10722 	{
10723 	  /* Insn may have a 16-bit form.  */
10724 	  Rn = inst.operands[1].reg;
10725 	  if (inst.operands[1].immisreg)
10726 	    {
10727 	      inst.instruction = THUMB_OP16 (opcode);
10728 	      /* [Rn, Rik] */
10729 	      if (Rn <= 7 && inst.operands[1].imm <= 7)
10730 		goto op16;
10731 	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
10732 		reject_bad_reg (inst.operands[1].imm);
10733 	    }
10734 	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
10735 		    && opcode != T_MNEM_ldrsb)
10736 		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
10737 		   || (Rn == REG_SP && opcode == T_MNEM_str))
10738 	    {
10739 	      /* [Rn, #const] */
10740 	      if (Rn > 7)
10741 		{
10742 		  if (Rn == REG_PC)
10743 		    {
10744 		      if (inst.reloc.pc_rel)
10745 			opcode = T_MNEM_ldr_pc2;
10746 		      else
10747 			opcode = T_MNEM_ldr_pc;
10748 		    }
10749 		  else
10750 		    {
10751 		      if (opcode == T_MNEM_ldr)
10752 			opcode = T_MNEM_ldr_sp;
10753 		      else
10754 			opcode = T_MNEM_str_sp;
10755 		    }
10756 		  inst.instruction = inst.operands[0].reg << 8;
10757 		}
10758 	      else
10759 		{
10760 		  inst.instruction = inst.operands[0].reg;
10761 		  inst.instruction |= inst.operands[1].reg << 3;
10762 		}
10763 	      inst.instruction |= THUMB_OP16 (opcode);
10764 	      if (inst.size_req == 2)
10765 		inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10766 	      else
10767 		inst.relax = opcode;
10768 	      return;
10769 	    }
10770 	}
10771       /* Definitely a 32-bit variant.  */
10772 
10773       /* Warning for Erratum 752419.  */
10774       if (opcode == T_MNEM_ldr
10775 	  && inst.operands[0].reg == REG_SP
10776 	  && inst.operands[1].writeback == 1
10777 	  && !inst.operands[1].immisreg)
10778 	{
10779 	  if (no_cpu_selected ()
10780 	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
10781 	          && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
10782 	          && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
10783 	    as_warn (_("This instruction may be unpredictable "
10784 		       "if executed on M-profile cores "
10785 		       "with interrupts enabled."));
10786 	}
10787 
10788       /* Do some validations regarding addressing modes.  */
10789       if (inst.operands[1].immisreg)
10790 	reject_bad_reg (inst.operands[1].imm);
10791 
10792       constraint (inst.operands[1].writeback == 1
10793 		  && inst.operands[0].reg == inst.operands[1].reg,
10794 		  BAD_OVERLAP);
10795 
10796       inst.instruction = THUMB_OP32 (opcode);
10797       inst.instruction |= inst.operands[0].reg << 12;
10798       encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10799       check_ldr_r15_aligned ();
10800       return;
10801     }
10802 
10803   constraint (inst.operands[0].reg > 7, BAD_HIREG);
10804 
10805   if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10806     {
10807       /* Only [Rn,Rm] is acceptable.  */
10808       constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10809       constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10810 		  || inst.operands[1].postind || inst.operands[1].shifted
10811 		  || inst.operands[1].negative,
10812 		  _("Thumb does not support this addressing mode"));
10813       inst.instruction = THUMB_OP16 (inst.instruction);
10814       goto op16;
10815     }
10816 
10817   inst.instruction = THUMB_OP16 (inst.instruction);
10818   if (!inst.operands[1].isreg)
10819     if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10820       return;
10821 
10822   constraint (!inst.operands[1].preind
10823 	      || inst.operands[1].shifted
10824 	      || inst.operands[1].writeback,
10825 	      _("Thumb does not support this addressing mode"));
10826   if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10827     {
10828       constraint (inst.instruction & 0x0600,
10829 		  _("byte or halfword not valid for base register"));
10830       constraint (inst.operands[1].reg == REG_PC
10831 		  && !(inst.instruction & THUMB_LOAD_BIT),
10832 		  _("r15 based store not allowed"));
10833       constraint (inst.operands[1].immisreg,
10834 		  _("invalid base register for register offset"));
10835 
10836       if (inst.operands[1].reg == REG_PC)
10837 	inst.instruction = T_OPCODE_LDR_PC;
10838       else if (inst.instruction & THUMB_LOAD_BIT)
10839 	inst.instruction = T_OPCODE_LDR_SP;
10840       else
10841 	inst.instruction = T_OPCODE_STR_SP;
10842 
10843       inst.instruction |= inst.operands[0].reg << 8;
10844       inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10845       return;
10846     }
10847 
10848   constraint (inst.operands[1].reg > 7, BAD_HIREG);
10849   if (!inst.operands[1].immisreg)
10850     {
10851       /* Immediate offset.  */
10852       inst.instruction |= inst.operands[0].reg;
10853       inst.instruction |= inst.operands[1].reg << 3;
10854       inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10855       return;
10856     }
10857 
10858   /* Register offset.  */
10859   constraint (inst.operands[1].imm > 7, BAD_HIREG);
10860   constraint (inst.operands[1].negative,
10861 	      _("Thumb does not support this addressing mode"));
10862 
10863  op16:
10864   switch (inst.instruction)
10865     {
10866     case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10867     case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10868     case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10869     case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10870     case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10871     case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10872     case 0x5600 /* ldrsb */:
10873     case 0x5e00 /* ldrsh */: break;
10874     default: abort ();
10875     }
10876 
10877   inst.instruction |= inst.operands[0].reg;
10878   inst.instruction |= inst.operands[1].reg << 3;
10879   inst.instruction |= inst.operands[1].imm << 6;
10880 }
10881 
10882 static void
10883 do_t_ldstd (void)
10884 {
10885   if (!inst.operands[1].present)
10886     {
10887       inst.operands[1].reg = inst.operands[0].reg + 1;
10888       constraint (inst.operands[0].reg == REG_LR,
10889 		  _("r14 not allowed here"));
10890       constraint (inst.operands[0].reg == REG_R12,
10891                   _("r12 not allowed here"));
10892     }
10893 
10894   if (inst.operands[2].writeback
10895       && (inst.operands[0].reg == inst.operands[2].reg
10896       || inst.operands[1].reg == inst.operands[2].reg))
10897     as_warn (_("base register written back, and overlaps "
10898                "one of transfer registers"));
10899 
10900   inst.instruction |= inst.operands[0].reg << 12;
10901   inst.instruction |= inst.operands[1].reg << 8;
10902   encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10903 }
10904 
10905 static void
10906 do_t_ldstt (void)
10907 {
10908   inst.instruction |= inst.operands[0].reg << 12;
10909   encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10910 }
10911 
10912 static void
10913 do_t_mla (void)
10914 {
10915   unsigned Rd, Rn, Rm, Ra;
10916 
10917   Rd = inst.operands[0].reg;
10918   Rn = inst.operands[1].reg;
10919   Rm = inst.operands[2].reg;
10920   Ra = inst.operands[3].reg;
10921 
10922   reject_bad_reg (Rd);
10923   reject_bad_reg (Rn);
10924   reject_bad_reg (Rm);
10925   reject_bad_reg (Ra);
10926 
10927   inst.instruction |= Rd << 8;
10928   inst.instruction |= Rn << 16;
10929   inst.instruction |= Rm;
10930   inst.instruction |= Ra << 12;
10931 }
10932 
10933 static void
10934 do_t_mlal (void)
10935 {
10936   unsigned RdLo, RdHi, Rn, Rm;
10937 
10938   RdLo = inst.operands[0].reg;
10939   RdHi = inst.operands[1].reg;
10940   Rn = inst.operands[2].reg;
10941   Rm = inst.operands[3].reg;
10942 
10943   reject_bad_reg (RdLo);
10944   reject_bad_reg (RdHi);
10945   reject_bad_reg (Rn);
10946   reject_bad_reg (Rm);
10947 
10948   inst.instruction |= RdLo << 12;
10949   inst.instruction |= RdHi << 8;
10950   inst.instruction |= Rn << 16;
10951   inst.instruction |= Rm;
10952 }
10953 
10954 static void
10955 do_t_mov_cmp (void)
10956 {
10957   unsigned Rn, Rm;
10958 
10959   Rn = inst.operands[0].reg;
10960   Rm = inst.operands[1].reg;
10961 
10962   if (Rn == REG_PC)
10963     set_it_insn_type_last ();
10964 
10965   if (unified_syntax)
10966     {
10967       int r0off = (inst.instruction == T_MNEM_mov
10968 		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
10969       unsigned long opcode;
10970       bfd_boolean narrow;
10971       bfd_boolean low_regs;
10972 
10973       low_regs = (Rn <= 7 && Rm <= 7);
10974       opcode = inst.instruction;
10975       if (in_it_block ())
10976 	narrow = opcode != T_MNEM_movs;
10977       else
10978 	narrow = opcode != T_MNEM_movs || low_regs;
10979       if (inst.size_req == 4
10980 	  || inst.operands[1].shifted)
10981 	narrow = FALSE;
10982 
10983       /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
10984       if (opcode == T_MNEM_movs && inst.operands[1].isreg
10985 	  && !inst.operands[1].shifted
10986 	  && Rn == REG_PC
10987 	  && Rm == REG_LR)
10988 	{
10989 	  inst.instruction = T2_SUBS_PC_LR;
10990 	  return;
10991 	}
10992 
10993       if (opcode == T_MNEM_cmp)
10994 	{
10995 	  constraint (Rn == REG_PC, BAD_PC);
10996 	  if (narrow)
10997 	    {
10998 	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10999 		 but valid.  */
11000 	      warn_deprecated_sp (Rm);
11001 	      /* R15 was documented as a valid choice for Rm in ARMv6,
11002 		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
11003 		 tools reject R15, so we do too.  */
11004 	      constraint (Rm == REG_PC, BAD_PC);
11005 	    }
11006 	  else
11007 	    reject_bad_reg (Rm);
11008 	}
11009       else if (opcode == T_MNEM_mov
11010 	       || opcode == T_MNEM_movs)
11011 	{
11012 	  if (inst.operands[1].isreg)
11013 	    {
11014 	      if (opcode == T_MNEM_movs)
11015 		{
11016 		  reject_bad_reg (Rn);
11017 		  reject_bad_reg (Rm);
11018 		}
11019 	      else if (narrow)
11020 		{
11021 		  /* This is mov.n.  */
11022 		  if ((Rn == REG_SP || Rn == REG_PC)
11023 		      && (Rm == REG_SP || Rm == REG_PC))
11024 		    {
11025 		      as_warn (_("Use of r%u as a source register is "
11026 				 "deprecated when r%u is the destination "
11027 				 "register."), Rm, Rn);
11028 		    }
11029 		}
11030 	      else
11031 		{
11032 		  /* This is mov.w.  */
11033 		  constraint (Rn == REG_PC, BAD_PC);
11034 		  constraint (Rm == REG_PC, BAD_PC);
11035 		  constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11036 		}
11037 	    }
11038 	  else
11039 	    reject_bad_reg (Rn);
11040 	}
11041 
11042       if (!inst.operands[1].isreg)
11043 	{
11044 	  /* Immediate operand.  */
11045 	  if (!in_it_block () && opcode == T_MNEM_mov)
11046 	    narrow = 0;
11047 	  if (low_regs && narrow)
11048 	    {
11049 	      inst.instruction = THUMB_OP16 (opcode);
11050 	      inst.instruction |= Rn << 8;
11051 	      if (inst.size_req == 2)
11052 		inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11053 	      else
11054 		inst.relax = opcode;
11055 	    }
11056 	  else
11057 	    {
11058 	      inst.instruction = THUMB_OP32 (inst.instruction);
11059 	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11060 	      inst.instruction |= Rn << r0off;
11061 	      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11062 	    }
11063 	}
11064       else if (inst.operands[1].shifted && inst.operands[1].immisreg
11065 	       && (inst.instruction == T_MNEM_mov
11066 		   || inst.instruction == T_MNEM_movs))
11067 	{
11068 	  /* Register shifts are encoded as separate shift instructions.  */
11069 	  bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11070 
11071 	  if (in_it_block ())
11072 	    narrow = !flags;
11073 	  else
11074 	    narrow = flags;
11075 
11076 	  if (inst.size_req == 4)
11077 	    narrow = FALSE;
11078 
11079 	  if (!low_regs || inst.operands[1].imm > 7)
11080 	    narrow = FALSE;
11081 
11082 	  if (Rn != Rm)
11083 	    narrow = FALSE;
11084 
11085 	  switch (inst.operands[1].shift_kind)
11086 	    {
11087 	    case SHIFT_LSL:
11088 	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11089 	      break;
11090 	    case SHIFT_ASR:
11091 	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11092 	      break;
11093 	    case SHIFT_LSR:
11094 	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11095 	      break;
11096 	    case SHIFT_ROR:
11097 	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11098 	      break;
11099 	    default:
11100 	      abort ();
11101 	    }
11102 
11103 	  inst.instruction = opcode;
11104 	  if (narrow)
11105 	    {
11106 	      inst.instruction |= Rn;
11107 	      inst.instruction |= inst.operands[1].imm << 3;
11108 	    }
11109 	  else
11110 	    {
11111 	      if (flags)
11112 		inst.instruction |= CONDS_BIT;
11113 
11114 	      inst.instruction |= Rn << 8;
11115 	      inst.instruction |= Rm << 16;
11116 	      inst.instruction |= inst.operands[1].imm;
11117 	    }
11118 	}
11119       else if (!narrow)
11120 	{
11121 	  /* Some mov with immediate shift have narrow variants.
11122 	     Register shifts are handled above.  */
11123 	  if (low_regs && inst.operands[1].shifted
11124 	      && (inst.instruction == T_MNEM_mov
11125 		  || inst.instruction == T_MNEM_movs))
11126 	    {
11127 	      if (in_it_block ())
11128 		narrow = (inst.instruction == T_MNEM_mov);
11129 	      else
11130 		narrow = (inst.instruction == T_MNEM_movs);
11131 	    }
11132 
11133 	  if (narrow)
11134 	    {
11135 	      switch (inst.operands[1].shift_kind)
11136 		{
11137 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11138 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11139 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11140 		default: narrow = FALSE; break;
11141 		}
11142 	    }
11143 
11144 	  if (narrow)
11145 	    {
11146 	      inst.instruction |= Rn;
11147 	      inst.instruction |= Rm << 3;
11148 	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11149 	    }
11150 	  else
11151 	    {
11152 	      inst.instruction = THUMB_OP32 (inst.instruction);
11153 	      inst.instruction |= Rn << r0off;
11154 	      encode_thumb32_shifted_operand (1);
11155 	    }
11156 	}
11157       else
11158 	switch (inst.instruction)
11159 	  {
11160 	  case T_MNEM_mov:
11161 	    /* In v4t or v5t a move of two lowregs produces unpredictable
11162 	       results. Don't allow this.  */
11163 	    if (low_regs)
11164 	      {
11165 		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11166 			    "MOV Rd, Rs with two low registers is not "
11167 			    "permitted on this architecture");
11168 		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11169 					arm_ext_v6);
11170 	      }
11171 
11172 	    inst.instruction = T_OPCODE_MOV_HR;
11173 	    inst.instruction |= (Rn & 0x8) << 4;
11174 	    inst.instruction |= (Rn & 0x7);
11175 	    inst.instruction |= Rm << 3;
11176 	    break;
11177 
11178 	  case T_MNEM_movs:
11179 	    /* We know we have low registers at this point.
11180 	       Generate LSLS Rd, Rs, #0.  */
11181 	    inst.instruction = T_OPCODE_LSL_I;
11182 	    inst.instruction |= Rn;
11183 	    inst.instruction |= Rm << 3;
11184 	    break;
11185 
11186 	  case T_MNEM_cmp:
11187 	    if (low_regs)
11188 	      {
11189 		inst.instruction = T_OPCODE_CMP_LR;
11190 		inst.instruction |= Rn;
11191 		inst.instruction |= Rm << 3;
11192 	      }
11193 	    else
11194 	      {
11195 		inst.instruction = T_OPCODE_CMP_HR;
11196 		inst.instruction |= (Rn & 0x8) << 4;
11197 		inst.instruction |= (Rn & 0x7);
11198 		inst.instruction |= Rm << 3;
11199 	      }
11200 	    break;
11201 	  }
11202       return;
11203     }
11204 
11205   inst.instruction = THUMB_OP16 (inst.instruction);
11206 
11207   /* PR 10443: Do not silently ignore shifted operands.  */
11208   constraint (inst.operands[1].shifted,
11209 	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11210 
11211   if (inst.operands[1].isreg)
11212     {
11213       if (Rn < 8 && Rm < 8)
11214 	{
11215 	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11216 	     since a MOV instruction produces unpredictable results.  */
11217 	  if (inst.instruction == T_OPCODE_MOV_I8)
11218 	    inst.instruction = T_OPCODE_ADD_I3;
11219 	  else
11220 	    inst.instruction = T_OPCODE_CMP_LR;
11221 
11222 	  inst.instruction |= Rn;
11223 	  inst.instruction |= Rm << 3;
11224 	}
11225       else
11226 	{
11227 	  if (inst.instruction == T_OPCODE_MOV_I8)
11228 	    inst.instruction = T_OPCODE_MOV_HR;
11229 	  else
11230 	    inst.instruction = T_OPCODE_CMP_HR;
11231 	  do_t_cpy ();
11232 	}
11233     }
11234   else
11235     {
11236       constraint (Rn > 7,
11237 		  _("only lo regs allowed with immediate"));
11238       inst.instruction |= Rn << 8;
11239       inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11240     }
11241 }
11242 
11243 static void
11244 do_t_mov16 (void)
11245 {
11246   unsigned Rd;
11247   bfd_vma imm;
11248   bfd_boolean top;
11249 
11250   top = (inst.instruction & 0x00800000) != 0;
11251   if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11252     {
11253       constraint (top, _(":lower16: not allowed this instruction"));
11254       inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11255     }
11256   else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11257     {
11258       constraint (!top, _(":upper16: not allowed this instruction"));
11259       inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11260     }
11261 
11262   Rd = inst.operands[0].reg;
11263   reject_bad_reg (Rd);
11264 
11265   inst.instruction |= Rd << 8;
11266   if (inst.reloc.type == BFD_RELOC_UNUSED)
11267     {
11268       imm = inst.reloc.exp.X_add_number;
11269       inst.instruction |= (imm & 0xf000) << 4;
11270       inst.instruction |= (imm & 0x0800) << 15;
11271       inst.instruction |= (imm & 0x0700) << 4;
11272       inst.instruction |= (imm & 0x00ff);
11273     }
11274 }
11275 
11276 static void
11277 do_t_mvn_tst (void)
11278 {
11279   unsigned Rn, Rm;
11280 
11281   Rn = inst.operands[0].reg;
11282   Rm = inst.operands[1].reg;
11283 
11284   if (inst.instruction == T_MNEM_cmp
11285       || inst.instruction == T_MNEM_cmn)
11286     constraint (Rn == REG_PC, BAD_PC);
11287   else
11288     reject_bad_reg (Rn);
11289   reject_bad_reg (Rm);
11290 
11291   if (unified_syntax)
11292     {
11293       int r0off = (inst.instruction == T_MNEM_mvn
11294 		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11295       bfd_boolean narrow;
11296 
11297       if (inst.size_req == 4
11298 	  || inst.instruction > 0xffff
11299 	  || inst.operands[1].shifted
11300 	  || Rn > 7 || Rm > 7)
11301 	narrow = FALSE;
11302       else if (inst.instruction == T_MNEM_cmn)
11303 	narrow = TRUE;
11304       else if (THUMB_SETS_FLAGS (inst.instruction))
11305 	narrow = !in_it_block ();
11306       else
11307 	narrow = in_it_block ();
11308 
11309       if (!inst.operands[1].isreg)
11310 	{
11311 	  /* For an immediate, we always generate a 32-bit opcode;
11312 	     section relaxation will shrink it later if possible.  */
11313 	  if (inst.instruction < 0xffff)
11314 	    inst.instruction = THUMB_OP32 (inst.instruction);
11315 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11316 	  inst.instruction |= Rn << r0off;
11317 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11318 	}
11319       else
11320 	{
11321 	  /* See if we can do this with a 16-bit instruction.  */
11322 	  if (narrow)
11323 	    {
11324 	      inst.instruction = THUMB_OP16 (inst.instruction);
11325 	      inst.instruction |= Rn;
11326 	      inst.instruction |= Rm << 3;
11327 	    }
11328 	  else
11329 	    {
11330 	      constraint (inst.operands[1].shifted
11331 			  && inst.operands[1].immisreg,
11332 			  _("shift must be constant"));
11333 	      if (inst.instruction < 0xffff)
11334 		inst.instruction = THUMB_OP32 (inst.instruction);
11335 	      inst.instruction |= Rn << r0off;
11336 	      encode_thumb32_shifted_operand (1);
11337 	    }
11338 	}
11339     }
11340   else
11341     {
11342       constraint (inst.instruction > 0xffff
11343 		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11344       constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11345 		  _("unshifted register required"));
11346       constraint (Rn > 7 || Rm > 7,
11347 		  BAD_HIREG);
11348 
11349       inst.instruction = THUMB_OP16 (inst.instruction);
11350       inst.instruction |= Rn;
11351       inst.instruction |= Rm << 3;
11352     }
11353 }
11354 
11355 static void
11356 do_t_mrs (void)
11357 {
11358   unsigned Rd;
11359 
11360   if (do_vfp_nsyn_mrs () == SUCCESS)
11361     return;
11362 
11363   Rd = inst.operands[0].reg;
11364   reject_bad_reg (Rd);
11365   inst.instruction |= Rd << 8;
11366 
11367   if (inst.operands[1].isreg)
11368     {
11369       unsigned br = inst.operands[1].reg;
11370       if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11371 	as_bad (_("bad register for mrs"));
11372 
11373       inst.instruction |= br & (0xf << 16);
11374       inst.instruction |= (br & 0x300) >> 4;
11375       inst.instruction |= (br & SPSR_BIT) >> 2;
11376     }
11377   else
11378     {
11379       int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11380 
11381       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11382 	{
11383 	  /* PR gas/12698:  The constraint is only applied for m_profile.
11384 	     If the user has specified -march=all, we want to ignore it as
11385 	     we are building for any CPU type, including non-m variants.  */
11386 	  bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11387 	  constraint ((flags != 0) && m_profile, _("selected processor does "
11388 						   "not support requested special purpose register"));
11389 	}
11390       else
11391 	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11392 	   devices).  */
11393 	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11394 		    _("'APSR', 'CPSR' or 'SPSR' expected"));
11395 
11396       inst.instruction |= (flags & SPSR_BIT) >> 2;
11397       inst.instruction |= inst.operands[1].imm & 0xff;
11398       inst.instruction |= 0xf0000;
11399     }
11400 }
11401 
11402 static void
11403 do_t_msr (void)
11404 {
11405   int flags;
11406   unsigned Rn;
11407 
11408   if (do_vfp_nsyn_msr () == SUCCESS)
11409     return;
11410 
11411   constraint (!inst.operands[1].isreg,
11412 	      _("Thumb encoding does not support an immediate here"));
11413 
11414   if (inst.operands[0].isreg)
11415     flags = (int)(inst.operands[0].reg);
11416   else
11417     flags = inst.operands[0].imm;
11418 
11419   if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11420     {
11421       int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11422 
11423       /* PR gas/12698:  The constraint is only applied for m_profile.
11424          If the user has specified -march=all, we want to ignore it as
11425          we are building for any CPU type, including non-m variants.  */
11426       bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11427       constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11428            && (bits & ~(PSR_s | PSR_f)) != 0)
11429           || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11430               && bits != PSR_f)) && m_profile,
11431           _("selected processor does not support requested special "
11432             "purpose register"));
11433     }
11434   else
11435      constraint ((flags & 0xff) != 0, _("selected processor does not support "
11436 		 "requested special purpose register"));
11437 
11438   Rn = inst.operands[1].reg;
11439   reject_bad_reg (Rn);
11440 
11441   inst.instruction |= (flags & SPSR_BIT) >> 2;
11442   inst.instruction |= (flags & 0xf0000) >> 8;
11443   inst.instruction |= (flags & 0x300) >> 4;
11444   inst.instruction |= (flags & 0xff);
11445   inst.instruction |= Rn << 16;
11446 }
11447 
11448 static void
11449 do_t_mul (void)
11450 {
11451   bfd_boolean narrow;
11452   unsigned Rd, Rn, Rm;
11453 
11454   if (!inst.operands[2].present)
11455     inst.operands[2].reg = inst.operands[0].reg;
11456 
11457   Rd = inst.operands[0].reg;
11458   Rn = inst.operands[1].reg;
11459   Rm = inst.operands[2].reg;
11460 
11461   if (unified_syntax)
11462     {
11463       if (inst.size_req == 4
11464 	  || (Rd != Rn
11465 	      && Rd != Rm)
11466 	  || Rn > 7
11467 	  || Rm > 7)
11468 	narrow = FALSE;
11469       else if (inst.instruction == T_MNEM_muls)
11470 	narrow = !in_it_block ();
11471       else
11472 	narrow = in_it_block ();
11473     }
11474   else
11475     {
11476       constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11477       constraint (Rn > 7 || Rm > 7,
11478 		  BAD_HIREG);
11479       narrow = TRUE;
11480     }
11481 
11482   if (narrow)
11483     {
11484       /* 16-bit MULS/Conditional MUL.  */
11485       inst.instruction = THUMB_OP16 (inst.instruction);
11486       inst.instruction |= Rd;
11487 
11488       if (Rd == Rn)
11489 	inst.instruction |= Rm << 3;
11490       else if (Rd == Rm)
11491 	inst.instruction |= Rn << 3;
11492       else
11493 	constraint (1, _("dest must overlap one source register"));
11494     }
11495   else
11496     {
11497       constraint (inst.instruction != T_MNEM_mul,
11498 		  _("Thumb-2 MUL must not set flags"));
11499       /* 32-bit MUL.  */
11500       inst.instruction = THUMB_OP32 (inst.instruction);
11501       inst.instruction |= Rd << 8;
11502       inst.instruction |= Rn << 16;
11503       inst.instruction |= Rm << 0;
11504 
11505       reject_bad_reg (Rd);
11506       reject_bad_reg (Rn);
11507       reject_bad_reg (Rm);
11508     }
11509 }
11510 
11511 static void
11512 do_t_mull (void)
11513 {
11514   unsigned RdLo, RdHi, Rn, Rm;
11515 
11516   RdLo = inst.operands[0].reg;
11517   RdHi = inst.operands[1].reg;
11518   Rn = inst.operands[2].reg;
11519   Rm = inst.operands[3].reg;
11520 
11521   reject_bad_reg (RdLo);
11522   reject_bad_reg (RdHi);
11523   reject_bad_reg (Rn);
11524   reject_bad_reg (Rm);
11525 
11526   inst.instruction |= RdLo << 12;
11527   inst.instruction |= RdHi << 8;
11528   inst.instruction |= Rn << 16;
11529   inst.instruction |= Rm;
11530 
11531  if (RdLo == RdHi)
11532     as_tsktsk (_("rdhi and rdlo must be different"));
11533 }
11534 
11535 static void
11536 do_t_nop (void)
11537 {
11538   set_it_insn_type (NEUTRAL_IT_INSN);
11539 
11540   if (unified_syntax)
11541     {
11542       if (inst.size_req == 4 || inst.operands[0].imm > 15)
11543 	{
11544 	  inst.instruction = THUMB_OP32 (inst.instruction);
11545 	  inst.instruction |= inst.operands[0].imm;
11546 	}
11547       else
11548 	{
11549 	  /* PR9722: Check for Thumb2 availability before
11550 	     generating a thumb2 nop instruction.  */
11551 	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
11552 	    {
11553 	      inst.instruction = THUMB_OP16 (inst.instruction);
11554 	      inst.instruction |= inst.operands[0].imm << 4;
11555 	    }
11556 	  else
11557 	    inst.instruction = 0x46c0;
11558 	}
11559     }
11560   else
11561     {
11562       constraint (inst.operands[0].present,
11563 		  _("Thumb does not support NOP with hints"));
11564       inst.instruction = 0x46c0;
11565     }
11566 }
11567 
11568 static void
11569 do_t_neg (void)
11570 {
11571   if (unified_syntax)
11572     {
11573       bfd_boolean narrow;
11574 
11575       if (THUMB_SETS_FLAGS (inst.instruction))
11576 	narrow = !in_it_block ();
11577       else
11578 	narrow = in_it_block ();
11579       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11580 	narrow = FALSE;
11581       if (inst.size_req == 4)
11582 	narrow = FALSE;
11583 
11584       if (!narrow)
11585 	{
11586 	  inst.instruction = THUMB_OP32 (inst.instruction);
11587 	  inst.instruction |= inst.operands[0].reg << 8;
11588 	  inst.instruction |= inst.operands[1].reg << 16;
11589 	}
11590       else
11591 	{
11592 	  inst.instruction = THUMB_OP16 (inst.instruction);
11593 	  inst.instruction |= inst.operands[0].reg;
11594 	  inst.instruction |= inst.operands[1].reg << 3;
11595 	}
11596     }
11597   else
11598     {
11599       constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
11600 		  BAD_HIREG);
11601       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11602 
11603       inst.instruction = THUMB_OP16 (inst.instruction);
11604       inst.instruction |= inst.operands[0].reg;
11605       inst.instruction |= inst.operands[1].reg << 3;
11606     }
11607 }
11608 
11609 static void
11610 do_t_orn (void)
11611 {
11612   unsigned Rd, Rn;
11613 
11614   Rd = inst.operands[0].reg;
11615   Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
11616 
11617   reject_bad_reg (Rd);
11618   /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
11619   reject_bad_reg (Rn);
11620 
11621   inst.instruction |= Rd << 8;
11622   inst.instruction |= Rn << 16;
11623 
11624   if (!inst.operands[2].isreg)
11625     {
11626       inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11627       inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11628     }
11629   else
11630     {
11631       unsigned Rm;
11632 
11633       Rm = inst.operands[2].reg;
11634       reject_bad_reg (Rm);
11635 
11636       constraint (inst.operands[2].shifted
11637 		  && inst.operands[2].immisreg,
11638 		  _("shift must be constant"));
11639       encode_thumb32_shifted_operand (2);
11640     }
11641 }
11642 
11643 static void
11644 do_t_pkhbt (void)
11645 {
11646   unsigned Rd, Rn, Rm;
11647 
11648   Rd = inst.operands[0].reg;
11649   Rn = inst.operands[1].reg;
11650   Rm = inst.operands[2].reg;
11651 
11652   reject_bad_reg (Rd);
11653   reject_bad_reg (Rn);
11654   reject_bad_reg (Rm);
11655 
11656   inst.instruction |= Rd << 8;
11657   inst.instruction |= Rn << 16;
11658   inst.instruction |= Rm;
11659   if (inst.operands[3].present)
11660     {
11661       unsigned int val = inst.reloc.exp.X_add_number;
11662       constraint (inst.reloc.exp.X_op != O_constant,
11663 		  _("expression too complex"));
11664       inst.instruction |= (val & 0x1c) << 10;
11665       inst.instruction |= (val & 0x03) << 6;
11666     }
11667 }
11668 
11669 static void
11670 do_t_pkhtb (void)
11671 {
11672   if (!inst.operands[3].present)
11673     {
11674       unsigned Rtmp;
11675 
11676       inst.instruction &= ~0x00000020;
11677 
11678       /* PR 10168.  Swap the Rm and Rn registers.  */
11679       Rtmp = inst.operands[1].reg;
11680       inst.operands[1].reg = inst.operands[2].reg;
11681       inst.operands[2].reg = Rtmp;
11682     }
11683   do_t_pkhbt ();
11684 }
11685 
11686 static void
11687 do_t_pld (void)
11688 {
11689   if (inst.operands[0].immisreg)
11690     reject_bad_reg (inst.operands[0].imm);
11691 
11692   encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
11693 }
11694 
11695 static void
11696 do_t_push_pop (void)
11697 {
11698   unsigned mask;
11699 
11700   constraint (inst.operands[0].writeback,
11701 	      _("push/pop do not support {reglist}^"));
11702   constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11703 	      _("expression too complex"));
11704 
11705   mask = inst.operands[0].imm;
11706   if ((mask & ~0xff) == 0)
11707     inst.instruction = THUMB_OP16 (inst.instruction) | mask;
11708   else if ((inst.instruction == T_MNEM_push
11709 	    && (mask & ~0xff) == 1 << REG_LR)
11710 	   || (inst.instruction == T_MNEM_pop
11711 	       && (mask & ~0xff) == 1 << REG_PC))
11712     {
11713       inst.instruction = THUMB_OP16 (inst.instruction);
11714       inst.instruction |= THUMB_PP_PC_LR;
11715       inst.instruction |= mask & 0xff;
11716     }
11717   else if (unified_syntax)
11718     {
11719       inst.instruction = THUMB_OP32 (inst.instruction);
11720       encode_thumb2_ldmstm (13, mask, TRUE);
11721     }
11722   else
11723     {
11724       inst.error = _("invalid register list to push/pop instruction");
11725       return;
11726     }
11727 }
11728 
11729 static void
11730 do_t_rbit (void)
11731 {
11732   unsigned Rd, Rm;
11733 
11734   Rd = inst.operands[0].reg;
11735   Rm = inst.operands[1].reg;
11736 
11737   reject_bad_reg (Rd);
11738   reject_bad_reg (Rm);
11739 
11740   inst.instruction |= Rd << 8;
11741   inst.instruction |= Rm << 16;
11742   inst.instruction |= Rm;
11743 }
11744 
11745 static void
11746 do_t_rev (void)
11747 {
11748   unsigned Rd, Rm;
11749 
11750   Rd = inst.operands[0].reg;
11751   Rm = inst.operands[1].reg;
11752 
11753   reject_bad_reg (Rd);
11754   reject_bad_reg (Rm);
11755 
11756   if (Rd <= 7 && Rm <= 7
11757       && inst.size_req != 4)
11758     {
11759       inst.instruction = THUMB_OP16 (inst.instruction);
11760       inst.instruction |= Rd;
11761       inst.instruction |= Rm << 3;
11762     }
11763   else if (unified_syntax)
11764     {
11765       inst.instruction = THUMB_OP32 (inst.instruction);
11766       inst.instruction |= Rd << 8;
11767       inst.instruction |= Rm << 16;
11768       inst.instruction |= Rm;
11769     }
11770   else
11771     inst.error = BAD_HIREG;
11772 }
11773 
11774 static void
11775 do_t_rrx (void)
11776 {
11777   unsigned Rd, Rm;
11778 
11779   Rd = inst.operands[0].reg;
11780   Rm = inst.operands[1].reg;
11781 
11782   reject_bad_reg (Rd);
11783   reject_bad_reg (Rm);
11784 
11785   inst.instruction |= Rd << 8;
11786   inst.instruction |= Rm;
11787 }
11788 
11789 static void
11790 do_t_rsb (void)
11791 {
11792   unsigned Rd, Rs;
11793 
11794   Rd = inst.operands[0].reg;
11795   Rs = (inst.operands[1].present
11796 	? inst.operands[1].reg    /* Rd, Rs, foo */
11797 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11798 
11799   reject_bad_reg (Rd);
11800   reject_bad_reg (Rs);
11801   if (inst.operands[2].isreg)
11802     reject_bad_reg (inst.operands[2].reg);
11803 
11804   inst.instruction |= Rd << 8;
11805   inst.instruction |= Rs << 16;
11806   if (!inst.operands[2].isreg)
11807     {
11808       bfd_boolean narrow;
11809 
11810       if ((inst.instruction & 0x00100000) != 0)
11811 	narrow = !in_it_block ();
11812       else
11813 	narrow = in_it_block ();
11814 
11815       if (Rd > 7 || Rs > 7)
11816 	narrow = FALSE;
11817 
11818       if (inst.size_req == 4 || !unified_syntax)
11819 	narrow = FALSE;
11820 
11821       if (inst.reloc.exp.X_op != O_constant
11822 	  || inst.reloc.exp.X_add_number != 0)
11823 	narrow = FALSE;
11824 
11825       /* Turn rsb #0 into 16-bit neg.  We should probably do this via
11826          relaxation, but it doesn't seem worth the hassle.  */
11827       if (narrow)
11828 	{
11829 	  inst.reloc.type = BFD_RELOC_UNUSED;
11830 	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
11831 	  inst.instruction |= Rs << 3;
11832 	  inst.instruction |= Rd;
11833 	}
11834       else
11835 	{
11836 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11837 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11838 	}
11839     }
11840   else
11841     encode_thumb32_shifted_operand (2);
11842 }
11843 
11844 static void
11845 do_t_setend (void)
11846 {
11847   if (warn_on_deprecated
11848       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11849       as_warn (_("setend use is deprecated for ARMv8"));
11850 
11851   set_it_insn_type (OUTSIDE_IT_INSN);
11852   if (inst.operands[0].imm)
11853     inst.instruction |= 0x8;
11854 }
11855 
11856 static void
11857 do_t_shift (void)
11858 {
11859   if (!inst.operands[1].present)
11860     inst.operands[1].reg = inst.operands[0].reg;
11861 
11862   if (unified_syntax)
11863     {
11864       bfd_boolean narrow;
11865       int shift_kind;
11866 
11867       switch (inst.instruction)
11868 	{
11869 	case T_MNEM_asr:
11870 	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11871 	case T_MNEM_lsl:
11872 	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11873 	case T_MNEM_lsr:
11874 	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11875 	case T_MNEM_ror:
11876 	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11877 	default: abort ();
11878 	}
11879 
11880       if (THUMB_SETS_FLAGS (inst.instruction))
11881 	narrow = !in_it_block ();
11882       else
11883 	narrow = in_it_block ();
11884       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11885 	narrow = FALSE;
11886       if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11887 	narrow = FALSE;
11888       if (inst.operands[2].isreg
11889 	  && (inst.operands[1].reg != inst.operands[0].reg
11890 	      || inst.operands[2].reg > 7))
11891 	narrow = FALSE;
11892       if (inst.size_req == 4)
11893 	narrow = FALSE;
11894 
11895       reject_bad_reg (inst.operands[0].reg);
11896       reject_bad_reg (inst.operands[1].reg);
11897 
11898       if (!narrow)
11899 	{
11900 	  if (inst.operands[2].isreg)
11901 	    {
11902 	      reject_bad_reg (inst.operands[2].reg);
11903 	      inst.instruction = THUMB_OP32 (inst.instruction);
11904 	      inst.instruction |= inst.operands[0].reg << 8;
11905 	      inst.instruction |= inst.operands[1].reg << 16;
11906 	      inst.instruction |= inst.operands[2].reg;
11907 
11908 	      /* PR 12854: Error on extraneous shifts.  */
11909 	      constraint (inst.operands[2].shifted,
11910 			  _("extraneous shift as part of operand to shift insn"));
11911 	    }
11912 	  else
11913 	    {
11914 	      inst.operands[1].shifted = 1;
11915 	      inst.operands[1].shift_kind = shift_kind;
11916 	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11917 					     ? T_MNEM_movs : T_MNEM_mov);
11918 	      inst.instruction |= inst.operands[0].reg << 8;
11919 	      encode_thumb32_shifted_operand (1);
11920 	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
11921 	      inst.reloc.type = BFD_RELOC_UNUSED;
11922 	    }
11923 	}
11924       else
11925 	{
11926 	  if (inst.operands[2].isreg)
11927 	    {
11928 	      switch (shift_kind)
11929 		{
11930 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11931 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11932 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11933 		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11934 		default: abort ();
11935 		}
11936 
11937 	      inst.instruction |= inst.operands[0].reg;
11938 	      inst.instruction |= inst.operands[2].reg << 3;
11939 
11940 	      /* PR 12854: Error on extraneous shifts.  */
11941 	      constraint (inst.operands[2].shifted,
11942 			  _("extraneous shift as part of operand to shift insn"));
11943 	    }
11944 	  else
11945 	    {
11946 	      switch (shift_kind)
11947 		{
11948 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11949 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11950 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11951 		default: abort ();
11952 		}
11953 	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11954 	      inst.instruction |= inst.operands[0].reg;
11955 	      inst.instruction |= inst.operands[1].reg << 3;
11956 	    }
11957 	}
11958     }
11959   else
11960     {
11961       constraint (inst.operands[0].reg > 7
11962 		  || inst.operands[1].reg > 7, BAD_HIREG);
11963       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11964 
11965       if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
11966 	{
11967 	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
11968 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
11969 		      _("source1 and dest must be same register"));
11970 
11971 	  switch (inst.instruction)
11972 	    {
11973 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11974 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11975 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11976 	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11977 	    default: abort ();
11978 	    }
11979 
11980 	  inst.instruction |= inst.operands[0].reg;
11981 	  inst.instruction |= inst.operands[2].reg << 3;
11982 
11983 	  /* PR 12854: Error on extraneous shifts.  */
11984 	  constraint (inst.operands[2].shifted,
11985 		      _("extraneous shift as part of operand to shift insn"));
11986 	}
11987       else
11988 	{
11989 	  switch (inst.instruction)
11990 	    {
11991 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11992 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11993 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11994 	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11995 	    default: abort ();
11996 	    }
11997 	  inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11998 	  inst.instruction |= inst.operands[0].reg;
11999 	  inst.instruction |= inst.operands[1].reg << 3;
12000 	}
12001     }
12002 }
12003 
12004 static void
12005 do_t_simd (void)
12006 {
12007   unsigned Rd, Rn, Rm;
12008 
12009   Rd = inst.operands[0].reg;
12010   Rn = inst.operands[1].reg;
12011   Rm = inst.operands[2].reg;
12012 
12013   reject_bad_reg (Rd);
12014   reject_bad_reg (Rn);
12015   reject_bad_reg (Rm);
12016 
12017   inst.instruction |= Rd << 8;
12018   inst.instruction |= Rn << 16;
12019   inst.instruction |= Rm;
12020 }
12021 
12022 static void
12023 do_t_simd2 (void)
12024 {
12025   unsigned Rd, Rn, Rm;
12026 
12027   Rd = inst.operands[0].reg;
12028   Rm = inst.operands[1].reg;
12029   Rn = inst.operands[2].reg;
12030 
12031   reject_bad_reg (Rd);
12032   reject_bad_reg (Rn);
12033   reject_bad_reg (Rm);
12034 
12035   inst.instruction |= Rd << 8;
12036   inst.instruction |= Rn << 16;
12037   inst.instruction |= Rm;
12038 }
12039 
12040 static void
12041 do_t_smc (void)
12042 {
12043   unsigned int value = inst.reloc.exp.X_add_number;
12044   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12045 	      _("SMC is not permitted on this architecture"));
12046   constraint (inst.reloc.exp.X_op != O_constant,
12047 	      _("expression too complex"));
12048   inst.reloc.type = BFD_RELOC_UNUSED;
12049   inst.instruction |= (value & 0xf000) >> 12;
12050   inst.instruction |= (value & 0x0ff0);
12051   inst.instruction |= (value & 0x000f) << 16;
12052 }
12053 
12054 static void
12055 do_t_hvc (void)
12056 {
12057   unsigned int value = inst.reloc.exp.X_add_number;
12058 
12059   inst.reloc.type = BFD_RELOC_UNUSED;
12060   inst.instruction |= (value & 0x0fff);
12061   inst.instruction |= (value & 0xf000) << 4;
12062 }
12063 
12064 static void
12065 do_t_ssat_usat (int bias)
12066 {
12067   unsigned Rd, Rn;
12068 
12069   Rd = inst.operands[0].reg;
12070   Rn = inst.operands[2].reg;
12071 
12072   reject_bad_reg (Rd);
12073   reject_bad_reg (Rn);
12074 
12075   inst.instruction |= Rd << 8;
12076   inst.instruction |= inst.operands[1].imm - bias;
12077   inst.instruction |= Rn << 16;
12078 
12079   if (inst.operands[3].present)
12080     {
12081       offsetT shift_amount = inst.reloc.exp.X_add_number;
12082 
12083       inst.reloc.type = BFD_RELOC_UNUSED;
12084 
12085       constraint (inst.reloc.exp.X_op != O_constant,
12086 		  _("expression too complex"));
12087 
12088       if (shift_amount != 0)
12089 	{
12090 	  constraint (shift_amount > 31,
12091 		      _("shift expression is too large"));
12092 
12093 	  if (inst.operands[3].shift_kind == SHIFT_ASR)
12094 	    inst.instruction |= 0x00200000;  /* sh bit.  */
12095 
12096 	  inst.instruction |= (shift_amount & 0x1c) << 10;
12097 	  inst.instruction |= (shift_amount & 0x03) << 6;
12098 	}
12099     }
12100 }
12101 
12102 static void
12103 do_t_ssat (void)
12104 {
12105   do_t_ssat_usat (1);
12106 }
12107 
12108 static void
12109 do_t_ssat16 (void)
12110 {
12111   unsigned Rd, Rn;
12112 
12113   Rd = inst.operands[0].reg;
12114   Rn = inst.operands[2].reg;
12115 
12116   reject_bad_reg (Rd);
12117   reject_bad_reg (Rn);
12118 
12119   inst.instruction |= Rd << 8;
12120   inst.instruction |= inst.operands[1].imm - 1;
12121   inst.instruction |= Rn << 16;
12122 }
12123 
12124 static void
12125 do_t_strex (void)
12126 {
12127   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12128 	      || inst.operands[2].postind || inst.operands[2].writeback
12129 	      || inst.operands[2].immisreg || inst.operands[2].shifted
12130 	      || inst.operands[2].negative,
12131 	      BAD_ADDR_MODE);
12132 
12133   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12134 
12135   inst.instruction |= inst.operands[0].reg << 8;
12136   inst.instruction |= inst.operands[1].reg << 12;
12137   inst.instruction |= inst.operands[2].reg << 16;
12138   inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12139 }
12140 
12141 static void
12142 do_t_strexd (void)
12143 {
12144   if (!inst.operands[2].present)
12145     inst.operands[2].reg = inst.operands[1].reg + 1;
12146 
12147   constraint (inst.operands[0].reg == inst.operands[1].reg
12148 	      || inst.operands[0].reg == inst.operands[2].reg
12149 	      || inst.operands[0].reg == inst.operands[3].reg,
12150 	      BAD_OVERLAP);
12151 
12152   inst.instruction |= inst.operands[0].reg;
12153   inst.instruction |= inst.operands[1].reg << 12;
12154   inst.instruction |= inst.operands[2].reg << 8;
12155   inst.instruction |= inst.operands[3].reg << 16;
12156 }
12157 
12158 static void
12159 do_t_sxtah (void)
12160 {
12161   unsigned Rd, Rn, Rm;
12162 
12163   Rd = inst.operands[0].reg;
12164   Rn = inst.operands[1].reg;
12165   Rm = inst.operands[2].reg;
12166 
12167   reject_bad_reg (Rd);
12168   reject_bad_reg (Rn);
12169   reject_bad_reg (Rm);
12170 
12171   inst.instruction |= Rd << 8;
12172   inst.instruction |= Rn << 16;
12173   inst.instruction |= Rm;
12174   inst.instruction |= inst.operands[3].imm << 4;
12175 }
12176 
12177 static void
12178 do_t_sxth (void)
12179 {
12180   unsigned Rd, Rm;
12181 
12182   Rd = inst.operands[0].reg;
12183   Rm = inst.operands[1].reg;
12184 
12185   reject_bad_reg (Rd);
12186   reject_bad_reg (Rm);
12187 
12188   if (inst.instruction <= 0xffff
12189       && inst.size_req != 4
12190       && Rd <= 7 && Rm <= 7
12191       && (!inst.operands[2].present || inst.operands[2].imm == 0))
12192     {
12193       inst.instruction = THUMB_OP16 (inst.instruction);
12194       inst.instruction |= Rd;
12195       inst.instruction |= Rm << 3;
12196     }
12197   else if (unified_syntax)
12198     {
12199       if (inst.instruction <= 0xffff)
12200 	inst.instruction = THUMB_OP32 (inst.instruction);
12201       inst.instruction |= Rd << 8;
12202       inst.instruction |= Rm;
12203       inst.instruction |= inst.operands[2].imm << 4;
12204     }
12205   else
12206     {
12207       constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12208 		  _("Thumb encoding does not support rotation"));
12209       constraint (1, BAD_HIREG);
12210     }
12211 }
12212 
12213 static void
12214 do_t_swi (void)
12215 {
12216   /* We have to do the following check manually as ARM_EXT_OS only applies
12217      to ARM_EXT_V6M.  */
12218   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12219     {
12220       if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12221 	  /* This only applies to the v6m howver, not later architectures.  */
12222 	  && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12223 	as_bad (_("SVC is not permitted on this architecture"));
12224       ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12225     }
12226 
12227   inst.reloc.type = BFD_RELOC_ARM_SWI;
12228 }
12229 
12230 static void
12231 do_t_tb (void)
12232 {
12233   unsigned Rn, Rm;
12234   int half;
12235 
12236   half = (inst.instruction & 0x10) != 0;
12237   set_it_insn_type_last ();
12238   constraint (inst.operands[0].immisreg,
12239 	      _("instruction requires register index"));
12240 
12241   Rn = inst.operands[0].reg;
12242   Rm = inst.operands[0].imm;
12243 
12244   constraint (Rn == REG_SP, BAD_SP);
12245   reject_bad_reg (Rm);
12246 
12247   constraint (!half && inst.operands[0].shifted,
12248 	      _("instruction does not allow shifted index"));
12249   inst.instruction |= (Rn << 16) | Rm;
12250 }
12251 
12252 static void
12253 do_t_usat (void)
12254 {
12255   do_t_ssat_usat (0);
12256 }
12257 
12258 static void
12259 do_t_usat16 (void)
12260 {
12261   unsigned Rd, Rn;
12262 
12263   Rd = inst.operands[0].reg;
12264   Rn = inst.operands[2].reg;
12265 
12266   reject_bad_reg (Rd);
12267   reject_bad_reg (Rn);
12268 
12269   inst.instruction |= Rd << 8;
12270   inst.instruction |= inst.operands[1].imm;
12271   inst.instruction |= Rn << 16;
12272 }
12273 
12274 /* Neon instruction encoder helpers.  */
12275 
12276 /* Encodings for the different types for various Neon opcodes.  */
12277 
12278 /* An "invalid" code for the following tables.  */
12279 #define N_INV -1u
12280 
12281 struct neon_tab_entry
12282 {
12283   unsigned integer;
12284   unsigned float_or_poly;
12285   unsigned scalar_or_imm;
12286 };
12287 
12288 /* Map overloaded Neon opcodes to their respective encodings.  */
12289 #define NEON_ENC_TAB					\
12290   X(vabd,	0x0000700, 0x1200d00, N_INV),		\
12291   X(vmax,	0x0000600, 0x0000f00, N_INV),		\
12292   X(vmin,	0x0000610, 0x0200f00, N_INV),		\
12293   X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
12294   X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
12295   X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
12296   X(vadd,	0x0000800, 0x0000d00, N_INV),		\
12297   X(vsub,	0x1000800, 0x0200d00, N_INV),		\
12298   X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
12299   X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
12300   X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
12301   /* Register variants of the following two instructions are encoded as
12302      vcge / vcgt with the operands reversed.  */  	\
12303   X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
12304   X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
12305   X(vfma,	N_INV, 0x0000c10, N_INV),		\
12306   X(vfms,	N_INV, 0x0200c10, N_INV),		\
12307   X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
12308   X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
12309   X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
12310   X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
12311   X(vmlal,	0x0800800, N_INV,     0x0800240),	\
12312   X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
12313   X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
12314   X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
12315   X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
12316   X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
12317   X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
12318   X(vshl,	0x0000400, N_INV,     0x0800510),	\
12319   X(vqshl,	0x0000410, N_INV,     0x0800710),	\
12320   X(vand,	0x0000110, N_INV,     0x0800030),	\
12321   X(vbic,	0x0100110, N_INV,     0x0800030),	\
12322   X(veor,	0x1000110, N_INV,     N_INV),		\
12323   X(vorn,	0x0300110, N_INV,     0x0800010),	\
12324   X(vorr,	0x0200110, N_INV,     0x0800010),	\
12325   X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
12326   X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
12327   X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
12328   X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
12329   X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
12330   X(vst1,	0x0000000, 0x0800000, N_INV),		\
12331   X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
12332   X(vst2,	0x0000100, 0x0800100, N_INV),		\
12333   X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
12334   X(vst3,	0x0000200, 0x0800200, N_INV),		\
12335   X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
12336   X(vst4,	0x0000300, 0x0800300, N_INV),		\
12337   X(vmovn,	0x1b20200, N_INV,     N_INV),		\
12338   X(vtrn,	0x1b20080, N_INV,     N_INV),		\
12339   X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
12340   X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
12341   X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
12342   X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
12343   X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
12344   X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
12345   X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
12346   X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
12347   X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
12348   X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
12349   X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
12350   X(vseleq,	0xe000a00, N_INV,     N_INV),		\
12351   X(vselvs,	0xe100a00, N_INV,     N_INV),		\
12352   X(vselge,	0xe200a00, N_INV,     N_INV),		\
12353   X(vselgt,	0xe300a00, N_INV,     N_INV),		\
12354   X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
12355   X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
12356   X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
12357   X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
12358   X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
12359   X(aes,	0x3b00300, N_INV,     N_INV),		\
12360   X(sha3op,	0x2000c00, N_INV,     N_INV),		\
12361   X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
12362   X(sha2op,     0x3ba0380, N_INV,     N_INV)
12363 
12364 enum neon_opc
12365 {
12366 #define X(OPC,I,F,S) N_MNEM_##OPC
12367 NEON_ENC_TAB
12368 #undef X
12369 };
12370 
12371 static const struct neon_tab_entry neon_enc_tab[] =
12372 {
12373 #define X(OPC,I,F,S) { (I), (F), (S) }
12374 NEON_ENC_TAB
12375 #undef X
12376 };
12377 
12378 /* Do not use these macros; instead, use NEON_ENCODE defined below.  */
12379 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12380 #define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
12381 #define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12382 #define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12383 #define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12384 #define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12385 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12386 #define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12387 #define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12388 #define NEON_ENC_SINGLE_(X) \
12389   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12390 #define NEON_ENC_DOUBLE_(X) \
12391   ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12392 #define NEON_ENC_FPV8_(X) \
12393   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12394 
12395 #define NEON_ENCODE(type, inst)					\
12396   do								\
12397     {								\
12398       inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
12399       inst.is_neon = 1;						\
12400     }								\
12401   while (0)
12402 
12403 #define check_neon_suffixes						\
12404   do									\
12405     {									\
12406       if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
12407 	{								\
12408 	  as_bad (_("invalid neon suffix for non neon instruction"));	\
12409 	  return;							\
12410 	}								\
12411     }									\
12412   while (0)
12413 
12414 /* Define shapes for instruction operands. The following mnemonic characters
12415    are used in this table:
12416 
12417      F - VFP S<n> register
12418      D - Neon D<n> register
12419      Q - Neon Q<n> register
12420      I - Immediate
12421      S - Scalar
12422      R - ARM register
12423      L - D<n> register list
12424 
12425    This table is used to generate various data:
12426      - enumerations of the form NS_DDR to be used as arguments to
12427        neon_select_shape.
12428      - a table classifying shapes into single, double, quad, mixed.
12429      - a table used to drive neon_select_shape.  */
12430 
12431 #define NEON_SHAPE_DEF			\
12432   X(3, (D, D, D), DOUBLE),		\
12433   X(3, (Q, Q, Q), QUAD),		\
12434   X(3, (D, D, I), DOUBLE),		\
12435   X(3, (Q, Q, I), QUAD),		\
12436   X(3, (D, D, S), DOUBLE),		\
12437   X(3, (Q, Q, S), QUAD),		\
12438   X(2, (D, D), DOUBLE),			\
12439   X(2, (Q, Q), QUAD),			\
12440   X(2, (D, S), DOUBLE),			\
12441   X(2, (Q, S), QUAD),			\
12442   X(2, (D, R), DOUBLE),			\
12443   X(2, (Q, R), QUAD),			\
12444   X(2, (D, I), DOUBLE),			\
12445   X(2, (Q, I), QUAD),			\
12446   X(3, (D, L, D), DOUBLE),		\
12447   X(2, (D, Q), MIXED),			\
12448   X(2, (Q, D), MIXED),			\
12449   X(3, (D, Q, I), MIXED),		\
12450   X(3, (Q, D, I), MIXED),		\
12451   X(3, (Q, D, D), MIXED),		\
12452   X(3, (D, Q, Q), MIXED),		\
12453   X(3, (Q, Q, D), MIXED),		\
12454   X(3, (Q, D, S), MIXED),		\
12455   X(3, (D, Q, S), MIXED),		\
12456   X(4, (D, D, D, I), DOUBLE),		\
12457   X(4, (Q, Q, Q, I), QUAD),		\
12458   X(2, (F, F), SINGLE),			\
12459   X(3, (F, F, F), SINGLE),		\
12460   X(2, (F, I), SINGLE),			\
12461   X(2, (F, D), MIXED),			\
12462   X(2, (D, F), MIXED),			\
12463   X(3, (F, F, I), MIXED),		\
12464   X(4, (R, R, F, F), SINGLE),		\
12465   X(4, (F, F, R, R), SINGLE),		\
12466   X(3, (D, R, R), DOUBLE),		\
12467   X(3, (R, R, D), DOUBLE),		\
12468   X(2, (S, R), SINGLE),			\
12469   X(2, (R, S), SINGLE),			\
12470   X(2, (F, R), SINGLE),			\
12471   X(2, (R, F), SINGLE)
12472 
12473 #define S2(A,B)		NS_##A##B
12474 #define S3(A,B,C)	NS_##A##B##C
12475 #define S4(A,B,C,D)	NS_##A##B##C##D
12476 
12477 #define X(N, L, C) S##N L
12478 
12479 enum neon_shape
12480 {
12481   NEON_SHAPE_DEF,
12482   NS_NULL
12483 };
12484 
12485 #undef X
12486 #undef S2
12487 #undef S3
12488 #undef S4
12489 
12490 enum neon_shape_class
12491 {
12492   SC_SINGLE,
12493   SC_DOUBLE,
12494   SC_QUAD,
12495   SC_MIXED
12496 };
12497 
12498 #define X(N, L, C) SC_##C
12499 
12500 static enum neon_shape_class neon_shape_class[] =
12501 {
12502   NEON_SHAPE_DEF
12503 };
12504 
12505 #undef X
12506 
12507 enum neon_shape_el
12508 {
12509   SE_F,
12510   SE_D,
12511   SE_Q,
12512   SE_I,
12513   SE_S,
12514   SE_R,
12515   SE_L
12516 };
12517 
12518 /* Register widths of above.  */
12519 static unsigned neon_shape_el_size[] =
12520 {
12521   32,
12522   64,
12523   128,
12524   0,
12525   32,
12526   32,
12527   0
12528 };
12529 
12530 struct neon_shape_info
12531 {
12532   unsigned els;
12533   enum neon_shape_el el[NEON_MAX_TYPE_ELS];
12534 };
12535 
12536 #define S2(A,B)		{ SE_##A, SE_##B }
12537 #define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
12538 #define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
12539 
12540 #define X(N, L, C) { N, S##N L }
12541 
12542 static struct neon_shape_info neon_shape_tab[] =
12543 {
12544   NEON_SHAPE_DEF
12545 };
12546 
12547 #undef X
12548 #undef S2
12549 #undef S3
12550 #undef S4
12551 
12552 /* Bit masks used in type checking given instructions.
12553   'N_EQK' means the type must be the same as (or based on in some way) the key
12554    type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
12555    set, various other bits can be set as well in order to modify the meaning of
12556    the type constraint.  */
12557 
12558 enum neon_type_mask
12559 {
12560   N_S8   = 0x0000001,
12561   N_S16  = 0x0000002,
12562   N_S32  = 0x0000004,
12563   N_S64  = 0x0000008,
12564   N_U8   = 0x0000010,
12565   N_U16  = 0x0000020,
12566   N_U32  = 0x0000040,
12567   N_U64  = 0x0000080,
12568   N_I8   = 0x0000100,
12569   N_I16  = 0x0000200,
12570   N_I32  = 0x0000400,
12571   N_I64  = 0x0000800,
12572   N_8    = 0x0001000,
12573   N_16   = 0x0002000,
12574   N_32   = 0x0004000,
12575   N_64   = 0x0008000,
12576   N_P8   = 0x0010000,
12577   N_P16  = 0x0020000,
12578   N_F16  = 0x0040000,
12579   N_F32  = 0x0080000,
12580   N_F64  = 0x0100000,
12581   N_P64	 = 0x0200000,
12582   N_KEY  = 0x1000000, /* Key element (main type specifier).  */
12583   N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
12584   N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
12585   N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
12586   N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
12587   N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
12588   N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
12589   N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
12590   N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
12591   N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
12592   N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
12593   N_UTYP = 0,
12594   N_MAX_NONSPECIAL = N_P64
12595 };
12596 
12597 #define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
12598 
12599 #define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
12600 #define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
12601 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
12602 #define N_SUF_32   (N_SU_32 | N_F32)
12603 #define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
12604 #define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F32)
12605 
12606 /* Pass this as the first type argument to neon_check_type to ignore types
12607    altogether.  */
12608 #define N_IGNORE_TYPE (N_KEY | N_EQK)
12609 
12610 /* Select a "shape" for the current instruction (describing register types or
12611    sizes) from a list of alternatives. Return NS_NULL if the current instruction
12612    doesn't fit. For non-polymorphic shapes, checking is usually done as a
12613    function of operand parsing, so this function doesn't need to be called.
12614    Shapes should be listed in order of decreasing length.  */
12615 
12616 static enum neon_shape
12617 neon_select_shape (enum neon_shape shape, ...)
12618 {
12619   va_list ap;
12620   enum neon_shape first_shape = shape;
12621 
12622   /* Fix missing optional operands. FIXME: we don't know at this point how
12623      many arguments we should have, so this makes the assumption that we have
12624      > 1. This is true of all current Neon opcodes, I think, but may not be
12625      true in the future.  */
12626   if (!inst.operands[1].present)
12627     inst.operands[1] = inst.operands[0];
12628 
12629   va_start (ap, shape);
12630 
12631   for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
12632     {
12633       unsigned j;
12634       int matches = 1;
12635 
12636       for (j = 0; j < neon_shape_tab[shape].els; j++)
12637         {
12638           if (!inst.operands[j].present)
12639             {
12640               matches = 0;
12641               break;
12642             }
12643 
12644           switch (neon_shape_tab[shape].el[j])
12645             {
12646             case SE_F:
12647               if (!(inst.operands[j].isreg
12648                     && inst.operands[j].isvec
12649                     && inst.operands[j].issingle
12650                     && !inst.operands[j].isquad))
12651                 matches = 0;
12652               break;
12653 
12654             case SE_D:
12655               if (!(inst.operands[j].isreg
12656                     && inst.operands[j].isvec
12657                     && !inst.operands[j].isquad
12658                     && !inst.operands[j].issingle))
12659                 matches = 0;
12660               break;
12661 
12662             case SE_R:
12663               if (!(inst.operands[j].isreg
12664                     && !inst.operands[j].isvec))
12665                 matches = 0;
12666               break;
12667 
12668             case SE_Q:
12669               if (!(inst.operands[j].isreg
12670                     && inst.operands[j].isvec
12671                     && inst.operands[j].isquad
12672                     && !inst.operands[j].issingle))
12673                 matches = 0;
12674               break;
12675 
12676             case SE_I:
12677               if (!(!inst.operands[j].isreg
12678                     && !inst.operands[j].isscalar))
12679                 matches = 0;
12680               break;
12681 
12682             case SE_S:
12683               if (!(!inst.operands[j].isreg
12684                     && inst.operands[j].isscalar))
12685                 matches = 0;
12686               break;
12687 
12688             case SE_L:
12689               break;
12690             }
12691 	  if (!matches)
12692 	    break;
12693         }
12694       if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
12695 	/* We've matched all the entries in the shape table, and we don't
12696 	   have any left over operands which have not been matched.  */
12697         break;
12698     }
12699 
12700   va_end (ap);
12701 
12702   if (shape == NS_NULL && first_shape != NS_NULL)
12703     first_error (_("invalid instruction shape"));
12704 
12705   return shape;
12706 }
12707 
12708 /* True if SHAPE is predominantly a quadword operation (most of the time, this
12709    means the Q bit should be set).  */
12710 
12711 static int
12712 neon_quad (enum neon_shape shape)
12713 {
12714   return neon_shape_class[shape] == SC_QUAD;
12715 }
12716 
12717 static void
12718 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
12719                        unsigned *g_size)
12720 {
12721   /* Allow modification to be made to types which are constrained to be
12722      based on the key element, based on bits set alongside N_EQK.  */
12723   if ((typebits & N_EQK) != 0)
12724     {
12725       if ((typebits & N_HLF) != 0)
12726 	*g_size /= 2;
12727       else if ((typebits & N_DBL) != 0)
12728 	*g_size *= 2;
12729       if ((typebits & N_SGN) != 0)
12730 	*g_type = NT_signed;
12731       else if ((typebits & N_UNS) != 0)
12732         *g_type = NT_unsigned;
12733       else if ((typebits & N_INT) != 0)
12734         *g_type = NT_integer;
12735       else if ((typebits & N_FLT) != 0)
12736         *g_type = NT_float;
12737       else if ((typebits & N_SIZ) != 0)
12738         *g_type = NT_untyped;
12739     }
12740 }
12741 
12742 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12743    operand type, i.e. the single type specified in a Neon instruction when it
12744    is the only one given.  */
12745 
12746 static struct neon_type_el
12747 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
12748 {
12749   struct neon_type_el dest = *key;
12750 
12751   gas_assert ((thisarg & N_EQK) != 0);
12752 
12753   neon_modify_type_size (thisarg, &dest.type, &dest.size);
12754 
12755   return dest;
12756 }
12757 
12758 /* Convert Neon type and size into compact bitmask representation.  */
12759 
12760 static enum neon_type_mask
12761 type_chk_of_el_type (enum neon_el_type type, unsigned size)
12762 {
12763   switch (type)
12764     {
12765     case NT_untyped:
12766       switch (size)
12767         {
12768         case 8:  return N_8;
12769         case 16: return N_16;
12770         case 32: return N_32;
12771         case 64: return N_64;
12772         default: ;
12773         }
12774       break;
12775 
12776     case NT_integer:
12777       switch (size)
12778         {
12779         case 8:  return N_I8;
12780         case 16: return N_I16;
12781         case 32: return N_I32;
12782         case 64: return N_I64;
12783         default: ;
12784         }
12785       break;
12786 
12787     case NT_float:
12788       switch (size)
12789         {
12790 	case 16: return N_F16;
12791         case 32: return N_F32;
12792         case 64: return N_F64;
12793         default: ;
12794         }
12795       break;
12796 
12797     case NT_poly:
12798       switch (size)
12799         {
12800         case 8:  return N_P8;
12801         case 16: return N_P16;
12802 	case 64: return N_P64;
12803         default: ;
12804         }
12805       break;
12806 
12807     case NT_signed:
12808       switch (size)
12809         {
12810         case 8:  return N_S8;
12811         case 16: return N_S16;
12812         case 32: return N_S32;
12813         case 64: return N_S64;
12814         default: ;
12815         }
12816       break;
12817 
12818     case NT_unsigned:
12819       switch (size)
12820         {
12821         case 8:  return N_U8;
12822         case 16: return N_U16;
12823         case 32: return N_U32;
12824         case 64: return N_U64;
12825         default: ;
12826         }
12827       break;
12828 
12829     default: ;
12830     }
12831 
12832   return N_UTYP;
12833 }
12834 
12835 /* Convert compact Neon bitmask type representation to a type and size. Only
12836    handles the case where a single bit is set in the mask.  */
12837 
12838 static int
12839 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
12840                      enum neon_type_mask mask)
12841 {
12842   if ((mask & N_EQK) != 0)
12843     return FAIL;
12844 
12845   if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
12846     *size = 8;
12847   else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
12848     *size = 16;
12849   else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
12850     *size = 32;
12851   else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
12852     *size = 64;
12853   else
12854     return FAIL;
12855 
12856   if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
12857     *type = NT_signed;
12858   else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
12859     *type = NT_unsigned;
12860   else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
12861     *type = NT_integer;
12862   else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
12863     *type = NT_untyped;
12864   else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
12865     *type = NT_poly;
12866   else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
12867     *type = NT_float;
12868   else
12869     return FAIL;
12870 
12871   return SUCCESS;
12872 }
12873 
12874 /* Modify a bitmask of allowed types. This is only needed for type
12875    relaxation.  */
12876 
12877 static unsigned
12878 modify_types_allowed (unsigned allowed, unsigned mods)
12879 {
12880   unsigned size;
12881   enum neon_el_type type;
12882   unsigned destmask;
12883   int i;
12884 
12885   destmask = 0;
12886 
12887   for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
12888     {
12889       if (el_type_of_type_chk (&type, &size,
12890                                (enum neon_type_mask) (allowed & i)) == SUCCESS)
12891         {
12892           neon_modify_type_size (mods, &type, &size);
12893           destmask |= type_chk_of_el_type (type, size);
12894         }
12895     }
12896 
12897   return destmask;
12898 }
12899 
12900 /* Check type and return type classification.
12901    The manual states (paraphrase): If one datatype is given, it indicates the
12902    type given in:
12903     - the second operand, if there is one
12904     - the operand, if there is no second operand
12905     - the result, if there are no operands.
12906    This isn't quite good enough though, so we use a concept of a "key" datatype
12907    which is set on a per-instruction basis, which is the one which matters when
12908    only one data type is written.
12909    Note: this function has side-effects (e.g. filling in missing operands). All
12910    Neon instructions should call it before performing bit encoding.  */
12911 
12912 static struct neon_type_el
12913 neon_check_type (unsigned els, enum neon_shape ns, ...)
12914 {
12915   va_list ap;
12916   unsigned i, pass, key_el = 0;
12917   unsigned types[NEON_MAX_TYPE_ELS];
12918   enum neon_el_type k_type = NT_invtype;
12919   unsigned k_size = -1u;
12920   struct neon_type_el badtype = {NT_invtype, -1};
12921   unsigned key_allowed = 0;
12922 
12923   /* Optional registers in Neon instructions are always (not) in operand 1.
12924      Fill in the missing operand here, if it was omitted.  */
12925   if (els > 1 && !inst.operands[1].present)
12926     inst.operands[1] = inst.operands[0];
12927 
12928   /* Suck up all the varargs.  */
12929   va_start (ap, ns);
12930   for (i = 0; i < els; i++)
12931     {
12932       unsigned thisarg = va_arg (ap, unsigned);
12933       if (thisarg == N_IGNORE_TYPE)
12934         {
12935           va_end (ap);
12936           return badtype;
12937         }
12938       types[i] = thisarg;
12939       if ((thisarg & N_KEY) != 0)
12940         key_el = i;
12941     }
12942   va_end (ap);
12943 
12944   if (inst.vectype.elems > 0)
12945     for (i = 0; i < els; i++)
12946       if (inst.operands[i].vectype.type != NT_invtype)
12947         {
12948           first_error (_("types specified in both the mnemonic and operands"));
12949           return badtype;
12950         }
12951 
12952   /* Duplicate inst.vectype elements here as necessary.
12953      FIXME: No idea if this is exactly the same as the ARM assembler,
12954      particularly when an insn takes one register and one non-register
12955      operand. */
12956   if (inst.vectype.elems == 1 && els > 1)
12957     {
12958       unsigned j;
12959       inst.vectype.elems = els;
12960       inst.vectype.el[key_el] = inst.vectype.el[0];
12961       for (j = 0; j < els; j++)
12962         if (j != key_el)
12963           inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12964                                                   types[j]);
12965     }
12966   else if (inst.vectype.elems == 0 && els > 0)
12967     {
12968       unsigned j;
12969       /* No types were given after the mnemonic, so look for types specified
12970          after each operand. We allow some flexibility here; as long as the
12971          "key" operand has a type, we can infer the others.  */
12972       for (j = 0; j < els; j++)
12973         if (inst.operands[j].vectype.type != NT_invtype)
12974           inst.vectype.el[j] = inst.operands[j].vectype;
12975 
12976       if (inst.operands[key_el].vectype.type != NT_invtype)
12977         {
12978           for (j = 0; j < els; j++)
12979             if (inst.operands[j].vectype.type == NT_invtype)
12980               inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12981                                                       types[j]);
12982         }
12983       else
12984         {
12985           first_error (_("operand types can't be inferred"));
12986           return badtype;
12987         }
12988     }
12989   else if (inst.vectype.elems != els)
12990     {
12991       first_error (_("type specifier has the wrong number of parts"));
12992       return badtype;
12993     }
12994 
12995   for (pass = 0; pass < 2; pass++)
12996     {
12997       for (i = 0; i < els; i++)
12998         {
12999           unsigned thisarg = types[i];
13000           unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13001             ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13002           enum neon_el_type g_type = inst.vectype.el[i].type;
13003           unsigned g_size = inst.vectype.el[i].size;
13004 
13005           /* Decay more-specific signed & unsigned types to sign-insensitive
13006 	     integer types if sign-specific variants are unavailable.  */
13007           if ((g_type == NT_signed || g_type == NT_unsigned)
13008 	      && (types_allowed & N_SU_ALL) == 0)
13009 	    g_type = NT_integer;
13010 
13011           /* If only untyped args are allowed, decay any more specific types to
13012 	     them. Some instructions only care about signs for some element
13013 	     sizes, so handle that properly.  */
13014           if (((types_allowed & N_UNT) == 0)
13015 	      && ((g_size == 8 && (types_allowed & N_8) != 0)
13016 		  || (g_size == 16 && (types_allowed & N_16) != 0)
13017 		  || (g_size == 32 && (types_allowed & N_32) != 0)
13018 		  || (g_size == 64 && (types_allowed & N_64) != 0)))
13019 	    g_type = NT_untyped;
13020 
13021           if (pass == 0)
13022             {
13023               if ((thisarg & N_KEY) != 0)
13024                 {
13025                   k_type = g_type;
13026                   k_size = g_size;
13027                   key_allowed = thisarg & ~N_KEY;
13028                 }
13029             }
13030           else
13031             {
13032               if ((thisarg & N_VFP) != 0)
13033                 {
13034                   enum neon_shape_el regshape;
13035                   unsigned regwidth, match;
13036 
13037 		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
13038 		  if (ns == NS_NULL)
13039 		    {
13040 		      first_error (_("invalid instruction shape"));
13041 		      return badtype;
13042 		    }
13043                   regshape = neon_shape_tab[ns].el[i];
13044                   regwidth = neon_shape_el_size[regshape];
13045 
13046                   /* In VFP mode, operands must match register widths. If we
13047                      have a key operand, use its width, else use the width of
13048                      the current operand.  */
13049                   if (k_size != -1u)
13050                     match = k_size;
13051                   else
13052                     match = g_size;
13053 
13054                   if (regwidth != match)
13055                     {
13056                       first_error (_("operand size must match register width"));
13057                       return badtype;
13058                     }
13059                 }
13060 
13061               if ((thisarg & N_EQK) == 0)
13062                 {
13063                   unsigned given_type = type_chk_of_el_type (g_type, g_size);
13064 
13065                   if ((given_type & types_allowed) == 0)
13066                     {
13067 	              first_error (_("bad type in Neon instruction"));
13068 	              return badtype;
13069                     }
13070                 }
13071               else
13072                 {
13073                   enum neon_el_type mod_k_type = k_type;
13074                   unsigned mod_k_size = k_size;
13075                   neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13076                   if (g_type != mod_k_type || g_size != mod_k_size)
13077                     {
13078                       first_error (_("inconsistent types in Neon instruction"));
13079                       return badtype;
13080                     }
13081                 }
13082             }
13083         }
13084     }
13085 
13086   return inst.vectype.el[key_el];
13087 }
13088 
13089 /* Neon-style VFP instruction forwarding.  */
13090 
13091 /* Thumb VFP instructions have 0xE in the condition field.  */
13092 
13093 static void
13094 do_vfp_cond_or_thumb (void)
13095 {
13096   inst.is_neon = 1;
13097 
13098   if (thumb_mode)
13099     inst.instruction |= 0xe0000000;
13100   else
13101     inst.instruction |= inst.cond << 28;
13102 }
13103 
13104 /* Look up and encode a simple mnemonic, for use as a helper function for the
13105    Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
13106    etc.  It is assumed that operand parsing has already been done, and that the
13107    operands are in the form expected by the given opcode (this isn't necessarily
13108    the same as the form in which they were parsed, hence some massaging must
13109    take place before this function is called).
13110    Checks current arch version against that in the looked-up opcode.  */
13111 
13112 static void
13113 do_vfp_nsyn_opcode (const char *opname)
13114 {
13115   const struct asm_opcode *opcode;
13116 
13117   opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13118 
13119   if (!opcode)
13120     abort ();
13121 
13122   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13123                 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13124               _(BAD_FPU));
13125 
13126   inst.is_neon = 1;
13127 
13128   if (thumb_mode)
13129     {
13130       inst.instruction = opcode->tvalue;
13131       opcode->tencode ();
13132     }
13133   else
13134     {
13135       inst.instruction = (inst.cond << 28) | opcode->avalue;
13136       opcode->aencode ();
13137     }
13138 }
13139 
13140 static void
13141 do_vfp_nsyn_add_sub (enum neon_shape rs)
13142 {
13143   int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13144 
13145   if (rs == NS_FFF)
13146     {
13147       if (is_add)
13148         do_vfp_nsyn_opcode ("fadds");
13149       else
13150         do_vfp_nsyn_opcode ("fsubs");
13151     }
13152   else
13153     {
13154       if (is_add)
13155         do_vfp_nsyn_opcode ("faddd");
13156       else
13157         do_vfp_nsyn_opcode ("fsubd");
13158     }
13159 }
13160 
13161 /* Check operand types to see if this is a VFP instruction, and if so call
13162    PFN ().  */
13163 
13164 static int
13165 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13166 {
13167   enum neon_shape rs;
13168   struct neon_type_el et;
13169 
13170   switch (args)
13171     {
13172     case 2:
13173       rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13174       et = neon_check_type (2, rs,
13175         N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13176       break;
13177 
13178     case 3:
13179       rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13180       et = neon_check_type (3, rs,
13181         N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13182       break;
13183 
13184     default:
13185       abort ();
13186     }
13187 
13188   if (et.type != NT_invtype)
13189     {
13190       pfn (rs);
13191       return SUCCESS;
13192     }
13193 
13194   inst.error = NULL;
13195   return FAIL;
13196 }
13197 
13198 static void
13199 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13200 {
13201   int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13202 
13203   if (rs == NS_FFF)
13204     {
13205       if (is_mla)
13206         do_vfp_nsyn_opcode ("fmacs");
13207       else
13208         do_vfp_nsyn_opcode ("fnmacs");
13209     }
13210   else
13211     {
13212       if (is_mla)
13213         do_vfp_nsyn_opcode ("fmacd");
13214       else
13215         do_vfp_nsyn_opcode ("fnmacd");
13216     }
13217 }
13218 
13219 static void
13220 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13221 {
13222   int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13223 
13224   if (rs == NS_FFF)
13225     {
13226       if (is_fma)
13227         do_vfp_nsyn_opcode ("ffmas");
13228       else
13229         do_vfp_nsyn_opcode ("ffnmas");
13230     }
13231   else
13232     {
13233       if (is_fma)
13234         do_vfp_nsyn_opcode ("ffmad");
13235       else
13236         do_vfp_nsyn_opcode ("ffnmad");
13237     }
13238 }
13239 
13240 static void
13241 do_vfp_nsyn_mul (enum neon_shape rs)
13242 {
13243   if (rs == NS_FFF)
13244     do_vfp_nsyn_opcode ("fmuls");
13245   else
13246     do_vfp_nsyn_opcode ("fmuld");
13247 }
13248 
13249 static void
13250 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13251 {
13252   int is_neg = (inst.instruction & 0x80) != 0;
13253   neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13254 
13255   if (rs == NS_FF)
13256     {
13257       if (is_neg)
13258         do_vfp_nsyn_opcode ("fnegs");
13259       else
13260         do_vfp_nsyn_opcode ("fabss");
13261     }
13262   else
13263     {
13264       if (is_neg)
13265         do_vfp_nsyn_opcode ("fnegd");
13266       else
13267         do_vfp_nsyn_opcode ("fabsd");
13268     }
13269 }
13270 
13271 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13272    insns belong to Neon, and are handled elsewhere.  */
13273 
13274 static void
13275 do_vfp_nsyn_ldm_stm (int is_dbmode)
13276 {
13277   int is_ldm = (inst.instruction & (1 << 20)) != 0;
13278   if (is_ldm)
13279     {
13280       if (is_dbmode)
13281         do_vfp_nsyn_opcode ("fldmdbs");
13282       else
13283         do_vfp_nsyn_opcode ("fldmias");
13284     }
13285   else
13286     {
13287       if (is_dbmode)
13288         do_vfp_nsyn_opcode ("fstmdbs");
13289       else
13290         do_vfp_nsyn_opcode ("fstmias");
13291     }
13292 }
13293 
13294 static void
13295 do_vfp_nsyn_sqrt (void)
13296 {
13297   enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13298   neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13299 
13300   if (rs == NS_FF)
13301     do_vfp_nsyn_opcode ("fsqrts");
13302   else
13303     do_vfp_nsyn_opcode ("fsqrtd");
13304 }
13305 
13306 static void
13307 do_vfp_nsyn_div (void)
13308 {
13309   enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13310   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13311     N_F32 | N_F64 | N_KEY | N_VFP);
13312 
13313   if (rs == NS_FFF)
13314     do_vfp_nsyn_opcode ("fdivs");
13315   else
13316     do_vfp_nsyn_opcode ("fdivd");
13317 }
13318 
13319 static void
13320 do_vfp_nsyn_nmul (void)
13321 {
13322   enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13323   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13324     N_F32 | N_F64 | N_KEY | N_VFP);
13325 
13326   if (rs == NS_FFF)
13327     {
13328       NEON_ENCODE (SINGLE, inst);
13329       do_vfp_sp_dyadic ();
13330     }
13331   else
13332     {
13333       NEON_ENCODE (DOUBLE, inst);
13334       do_vfp_dp_rd_rn_rm ();
13335     }
13336   do_vfp_cond_or_thumb ();
13337 }
13338 
13339 static void
13340 do_vfp_nsyn_cmp (void)
13341 {
13342   if (inst.operands[1].isreg)
13343     {
13344       enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13345       neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13346 
13347       if (rs == NS_FF)
13348         {
13349           NEON_ENCODE (SINGLE, inst);
13350           do_vfp_sp_monadic ();
13351         }
13352       else
13353         {
13354           NEON_ENCODE (DOUBLE, inst);
13355           do_vfp_dp_rd_rm ();
13356         }
13357     }
13358   else
13359     {
13360       enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13361       neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13362 
13363       switch (inst.instruction & 0x0fffffff)
13364         {
13365         case N_MNEM_vcmp:
13366           inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13367           break;
13368         case N_MNEM_vcmpe:
13369           inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13370           break;
13371         default:
13372           abort ();
13373         }
13374 
13375       if (rs == NS_FI)
13376         {
13377           NEON_ENCODE (SINGLE, inst);
13378           do_vfp_sp_compare_z ();
13379         }
13380       else
13381         {
13382           NEON_ENCODE (DOUBLE, inst);
13383           do_vfp_dp_rd ();
13384         }
13385     }
13386   do_vfp_cond_or_thumb ();
13387 }
13388 
13389 static void
13390 nsyn_insert_sp (void)
13391 {
13392   inst.operands[1] = inst.operands[0];
13393   memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13394   inst.operands[0].reg = REG_SP;
13395   inst.operands[0].isreg = 1;
13396   inst.operands[0].writeback = 1;
13397   inst.operands[0].present = 1;
13398 }
13399 
13400 static void
13401 do_vfp_nsyn_push (void)
13402 {
13403   nsyn_insert_sp ();
13404   if (inst.operands[1].issingle)
13405     do_vfp_nsyn_opcode ("fstmdbs");
13406   else
13407     do_vfp_nsyn_opcode ("fstmdbd");
13408 }
13409 
13410 static void
13411 do_vfp_nsyn_pop (void)
13412 {
13413   nsyn_insert_sp ();
13414   if (inst.operands[1].issingle)
13415     do_vfp_nsyn_opcode ("fldmias");
13416   else
13417     do_vfp_nsyn_opcode ("fldmiad");
13418 }
13419 
13420 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13421    ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
13422 
13423 static void
13424 neon_dp_fixup (struct arm_it* insn)
13425 {
13426   unsigned int i = insn->instruction;
13427   insn->is_neon = 1;
13428 
13429   if (thumb_mode)
13430     {
13431       /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
13432       if (i & (1 << 24))
13433         i |= 1 << 28;
13434 
13435       i &= ~(1 << 24);
13436 
13437       i |= 0xef000000;
13438     }
13439   else
13440     i |= 0xf2000000;
13441 
13442   insn->instruction = i;
13443 }
13444 
13445 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13446    (0, 1, 2, 3).  */
13447 
13448 static unsigned
13449 neon_logbits (unsigned x)
13450 {
13451   return ffs (x) - 4;
13452 }
13453 
13454 #define LOW4(R) ((R) & 0xf)
13455 #define HI1(R) (((R) >> 4) & 1)
13456 
13457 /* Encode insns with bit pattern:
13458 
13459   |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
13460   |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
13461 
13462   SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13463   different meaning for some instruction.  */
13464 
13465 static void
13466 neon_three_same (int isquad, int ubit, int size)
13467 {
13468   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13469   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13470   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13471   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13472   inst.instruction |= LOW4 (inst.operands[2].reg);
13473   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13474   inst.instruction |= (isquad != 0) << 6;
13475   inst.instruction |= (ubit != 0) << 24;
13476   if (size != -1)
13477     inst.instruction |= neon_logbits (size) << 20;
13478 
13479   neon_dp_fixup (&inst);
13480 }
13481 
13482 /* Encode instructions of the form:
13483 
13484   |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
13485   |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
13486 
13487   Don't write size if SIZE == -1.  */
13488 
13489 static void
13490 neon_two_same (int qbit, int ubit, int size)
13491 {
13492   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13493   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13494   inst.instruction |= LOW4 (inst.operands[1].reg);
13495   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13496   inst.instruction |= (qbit != 0) << 6;
13497   inst.instruction |= (ubit != 0) << 24;
13498 
13499   if (size != -1)
13500     inst.instruction |= neon_logbits (size) << 18;
13501 
13502   neon_dp_fixup (&inst);
13503 }
13504 
13505 /* Neon instruction encoders, in approximate order of appearance.  */
13506 
13507 static void
13508 do_neon_dyadic_i_su (void)
13509 {
13510   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13511   struct neon_type_el et = neon_check_type (3, rs,
13512     N_EQK, N_EQK, N_SU_32 | N_KEY);
13513   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13514 }
13515 
13516 static void
13517 do_neon_dyadic_i64_su (void)
13518 {
13519   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13520   struct neon_type_el et = neon_check_type (3, rs,
13521     N_EQK, N_EQK, N_SU_ALL | N_KEY);
13522   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13523 }
13524 
13525 static void
13526 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
13527                 unsigned immbits)
13528 {
13529   unsigned size = et.size >> 3;
13530   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13531   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13532   inst.instruction |= LOW4 (inst.operands[1].reg);
13533   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13534   inst.instruction |= (isquad != 0) << 6;
13535   inst.instruction |= immbits << 16;
13536   inst.instruction |= (size >> 3) << 7;
13537   inst.instruction |= (size & 0x7) << 19;
13538   if (write_ubit)
13539     inst.instruction |= (uval != 0) << 24;
13540 
13541   neon_dp_fixup (&inst);
13542 }
13543 
13544 static void
13545 do_neon_shl_imm (void)
13546 {
13547   if (!inst.operands[2].isreg)
13548     {
13549       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13550       struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
13551       NEON_ENCODE (IMMED, inst);
13552       neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
13553     }
13554   else
13555     {
13556       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13557       struct neon_type_el et = neon_check_type (3, rs,
13558         N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13559       unsigned int tmp;
13560 
13561       /* VSHL/VQSHL 3-register variants have syntax such as:
13562            vshl.xx Dd, Dm, Dn
13563          whereas other 3-register operations encoded by neon_three_same have
13564          syntax like:
13565            vadd.xx Dd, Dn, Dm
13566          (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
13567          here.  */
13568       tmp = inst.operands[2].reg;
13569       inst.operands[2].reg = inst.operands[1].reg;
13570       inst.operands[1].reg = tmp;
13571       NEON_ENCODE (INTEGER, inst);
13572       neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13573     }
13574 }
13575 
13576 static void
13577 do_neon_qshl_imm (void)
13578 {
13579   if (!inst.operands[2].isreg)
13580     {
13581       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13582       struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13583 
13584       NEON_ENCODE (IMMED, inst);
13585       neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13586                       inst.operands[2].imm);
13587     }
13588   else
13589     {
13590       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13591       struct neon_type_el et = neon_check_type (3, rs,
13592         N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13593       unsigned int tmp;
13594 
13595       /* See note in do_neon_shl_imm.  */
13596       tmp = inst.operands[2].reg;
13597       inst.operands[2].reg = inst.operands[1].reg;
13598       inst.operands[1].reg = tmp;
13599       NEON_ENCODE (INTEGER, inst);
13600       neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13601     }
13602 }
13603 
13604 static void
13605 do_neon_rshl (void)
13606 {
13607   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13608   struct neon_type_el et = neon_check_type (3, rs,
13609     N_EQK, N_EQK, N_SU_ALL | N_KEY);
13610   unsigned int tmp;
13611 
13612   tmp = inst.operands[2].reg;
13613   inst.operands[2].reg = inst.operands[1].reg;
13614   inst.operands[1].reg = tmp;
13615   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13616 }
13617 
13618 static int
13619 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
13620 {
13621   /* Handle .I8 pseudo-instructions.  */
13622   if (size == 8)
13623     {
13624       /* Unfortunately, this will make everything apart from zero out-of-range.
13625          FIXME is this the intended semantics? There doesn't seem much point in
13626          accepting .I8 if so.  */
13627       immediate |= immediate << 8;
13628       size = 16;
13629     }
13630 
13631   if (size >= 32)
13632     {
13633       if (immediate == (immediate & 0x000000ff))
13634 	{
13635 	  *immbits = immediate;
13636 	  return 0x1;
13637 	}
13638       else if (immediate == (immediate & 0x0000ff00))
13639 	{
13640 	  *immbits = immediate >> 8;
13641 	  return 0x3;
13642 	}
13643       else if (immediate == (immediate & 0x00ff0000))
13644 	{
13645 	  *immbits = immediate >> 16;
13646 	  return 0x5;
13647 	}
13648       else if (immediate == (immediate & 0xff000000))
13649 	{
13650 	  *immbits = immediate >> 24;
13651 	  return 0x7;
13652 	}
13653       if ((immediate & 0xffff) != (immediate >> 16))
13654 	goto bad_immediate;
13655       immediate &= 0xffff;
13656     }
13657 
13658   if (immediate == (immediate & 0x000000ff))
13659     {
13660       *immbits = immediate;
13661       return 0x9;
13662     }
13663   else if (immediate == (immediate & 0x0000ff00))
13664     {
13665       *immbits = immediate >> 8;
13666       return 0xb;
13667     }
13668 
13669   bad_immediate:
13670   first_error (_("immediate value out of range"));
13671   return FAIL;
13672 }
13673 
13674 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
13675    A, B, C, D.  */
13676 
13677 static int
13678 neon_bits_same_in_bytes (unsigned imm)
13679 {
13680   return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
13681          && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
13682          && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
13683          && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
13684 }
13685 
13686 /* For immediate of above form, return 0bABCD.  */
13687 
13688 static unsigned
13689 neon_squash_bits (unsigned imm)
13690 {
13691   return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
13692          | ((imm & 0x01000000) >> 21);
13693 }
13694 
13695 /* Compress quarter-float representation to 0b...000 abcdefgh.  */
13696 
13697 static unsigned
13698 neon_qfloat_bits (unsigned imm)
13699 {
13700   return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
13701 }
13702 
13703 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13704    the instruction. *OP is passed as the initial value of the op field, and
13705    may be set to a different value depending on the constant (i.e.
13706    "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13707    MVN).  If the immediate looks like a repeated pattern then also
13708    try smaller element sizes.  */
13709 
13710 static int
13711 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
13712 			 unsigned *immbits, int *op, int size,
13713 			 enum neon_el_type type)
13714 {
13715   /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13716      float.  */
13717   if (type == NT_float && !float_p)
13718     return FAIL;
13719 
13720   if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
13721     {
13722       if (size != 32 || *op == 1)
13723         return FAIL;
13724       *immbits = neon_qfloat_bits (immlo);
13725       return 0xf;
13726     }
13727 
13728   if (size == 64)
13729     {
13730       if (neon_bits_same_in_bytes (immhi)
13731 	  && neon_bits_same_in_bytes (immlo))
13732 	{
13733 	  if (*op == 1)
13734 	    return FAIL;
13735 	  *immbits = (neon_squash_bits (immhi) << 4)
13736 		     | neon_squash_bits (immlo);
13737 	  *op = 1;
13738 	  return 0xe;
13739 	}
13740 
13741       if (immhi != immlo)
13742 	return FAIL;
13743     }
13744 
13745   if (size >= 32)
13746     {
13747       if (immlo == (immlo & 0x000000ff))
13748 	{
13749 	  *immbits = immlo;
13750 	  return 0x0;
13751 	}
13752       else if (immlo == (immlo & 0x0000ff00))
13753 	{
13754 	  *immbits = immlo >> 8;
13755 	  return 0x2;
13756 	}
13757       else if (immlo == (immlo & 0x00ff0000))
13758 	{
13759 	  *immbits = immlo >> 16;
13760 	  return 0x4;
13761 	}
13762       else if (immlo == (immlo & 0xff000000))
13763 	{
13764 	  *immbits = immlo >> 24;
13765 	  return 0x6;
13766 	}
13767       else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
13768 	{
13769 	  *immbits = (immlo >> 8) & 0xff;
13770 	  return 0xc;
13771 	}
13772       else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
13773 	{
13774 	  *immbits = (immlo >> 16) & 0xff;
13775 	  return 0xd;
13776 	}
13777 
13778       if ((immlo & 0xffff) != (immlo >> 16))
13779 	return FAIL;
13780       immlo &= 0xffff;
13781     }
13782 
13783   if (size >= 16)
13784     {
13785       if (immlo == (immlo & 0x000000ff))
13786 	{
13787 	  *immbits = immlo;
13788 	  return 0x8;
13789 	}
13790       else if (immlo == (immlo & 0x0000ff00))
13791 	{
13792 	  *immbits = immlo >> 8;
13793 	  return 0xa;
13794 	}
13795 
13796       if ((immlo & 0xff) != (immlo >> 8))
13797 	return FAIL;
13798       immlo &= 0xff;
13799     }
13800 
13801   if (immlo == (immlo & 0x000000ff))
13802     {
13803       /* Don't allow MVN with 8-bit immediate.  */
13804       if (*op == 1)
13805 	return FAIL;
13806       *immbits = immlo;
13807       return 0xe;
13808     }
13809 
13810   return FAIL;
13811 }
13812 
13813 /* Write immediate bits [7:0] to the following locations:
13814 
13815   |28/24|23     19|18 16|15                    4|3     0|
13816   |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13817 
13818   This function is used by VMOV/VMVN/VORR/VBIC.  */
13819 
13820 static void
13821 neon_write_immbits (unsigned immbits)
13822 {
13823   inst.instruction |= immbits & 0xf;
13824   inst.instruction |= ((immbits >> 4) & 0x7) << 16;
13825   inst.instruction |= ((immbits >> 7) & 0x1) << 24;
13826 }
13827 
13828 /* Invert low-order SIZE bits of XHI:XLO.  */
13829 
13830 static void
13831 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
13832 {
13833   unsigned immlo = xlo ? *xlo : 0;
13834   unsigned immhi = xhi ? *xhi : 0;
13835 
13836   switch (size)
13837     {
13838     case 8:
13839       immlo = (~immlo) & 0xff;
13840       break;
13841 
13842     case 16:
13843       immlo = (~immlo) & 0xffff;
13844       break;
13845 
13846     case 64:
13847       immhi = (~immhi) & 0xffffffff;
13848       /* fall through.  */
13849 
13850     case 32:
13851       immlo = (~immlo) & 0xffffffff;
13852       break;
13853 
13854     default:
13855       abort ();
13856     }
13857 
13858   if (xlo)
13859     *xlo = immlo;
13860 
13861   if (xhi)
13862     *xhi = immhi;
13863 }
13864 
13865 static void
13866 do_neon_logic (void)
13867 {
13868   if (inst.operands[2].present && inst.operands[2].isreg)
13869     {
13870       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13871       neon_check_type (3, rs, N_IGNORE_TYPE);
13872       /* U bit and size field were set as part of the bitmask.  */
13873       NEON_ENCODE (INTEGER, inst);
13874       neon_three_same (neon_quad (rs), 0, -1);
13875     }
13876   else
13877     {
13878       const int three_ops_form = (inst.operands[2].present
13879 				  && !inst.operands[2].isreg);
13880       const int immoperand = (three_ops_form ? 2 : 1);
13881       enum neon_shape rs = (three_ops_form
13882 			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
13883 			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
13884       struct neon_type_el et = neon_check_type (2, rs,
13885         N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13886       enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
13887       unsigned immbits;
13888       int cmode;
13889 
13890       if (et.type == NT_invtype)
13891         return;
13892 
13893       if (three_ops_form)
13894 	constraint (inst.operands[0].reg != inst.operands[1].reg,
13895 		    _("first and second operands shall be the same register"));
13896 
13897       NEON_ENCODE (IMMED, inst);
13898 
13899       immbits = inst.operands[immoperand].imm;
13900       if (et.size == 64)
13901 	{
13902 	  /* .i64 is a pseudo-op, so the immediate must be a repeating
13903 	     pattern.  */
13904 	  if (immbits != (inst.operands[immoperand].regisimm ?
13905 			  inst.operands[immoperand].reg : 0))
13906 	    {
13907 	      /* Set immbits to an invalid constant.  */
13908 	      immbits = 0xdeadbeef;
13909 	    }
13910 	}
13911 
13912       switch (opcode)
13913         {
13914         case N_MNEM_vbic:
13915           cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13916           break;
13917 
13918         case N_MNEM_vorr:
13919           cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13920           break;
13921 
13922         case N_MNEM_vand:
13923           /* Pseudo-instruction for VBIC.  */
13924           neon_invert_size (&immbits, 0, et.size);
13925           cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13926           break;
13927 
13928         case N_MNEM_vorn:
13929           /* Pseudo-instruction for VORR.  */
13930           neon_invert_size (&immbits, 0, et.size);
13931           cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13932           break;
13933 
13934         default:
13935           abort ();
13936         }
13937 
13938       if (cmode == FAIL)
13939         return;
13940 
13941       inst.instruction |= neon_quad (rs) << 6;
13942       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13943       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13944       inst.instruction |= cmode << 8;
13945       neon_write_immbits (immbits);
13946 
13947       neon_dp_fixup (&inst);
13948     }
13949 }
13950 
13951 static void
13952 do_neon_bitfield (void)
13953 {
13954   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13955   neon_check_type (3, rs, N_IGNORE_TYPE);
13956   neon_three_same (neon_quad (rs), 0, -1);
13957 }
13958 
13959 static void
13960 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13961                   unsigned destbits)
13962 {
13963   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13964   struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13965                                             types | N_KEY);
13966   if (et.type == NT_float)
13967     {
13968       NEON_ENCODE (FLOAT, inst);
13969       neon_three_same (neon_quad (rs), 0, -1);
13970     }
13971   else
13972     {
13973       NEON_ENCODE (INTEGER, inst);
13974       neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13975     }
13976 }
13977 
13978 static void
13979 do_neon_dyadic_if_su (void)
13980 {
13981   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13982 }
13983 
13984 static void
13985 do_neon_dyadic_if_su_d (void)
13986 {
13987   /* This version only allow D registers, but that constraint is enforced during
13988      operand parsing so we don't need to do anything extra here.  */
13989   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13990 }
13991 
13992 static void
13993 do_neon_dyadic_if_i_d (void)
13994 {
13995   /* The "untyped" case can't happen. Do this to stop the "U" bit being
13996      affected if we specify unsigned args.  */
13997   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13998 }
13999 
14000 enum vfp_or_neon_is_neon_bits
14001 {
14002   NEON_CHECK_CC = 1,
14003   NEON_CHECK_ARCH = 2,
14004   NEON_CHECK_ARCH8 = 4
14005 };
14006 
14007 /* Call this function if an instruction which may have belonged to the VFP or
14008    Neon instruction sets, but turned out to be a Neon instruction (due to the
14009    operand types involved, etc.). We have to check and/or fix-up a couple of
14010    things:
14011 
14012      - Make sure the user hasn't attempted to make a Neon instruction
14013        conditional.
14014      - Alter the value in the condition code field if necessary.
14015      - Make sure that the arch supports Neon instructions.
14016 
14017    Which of these operations take place depends on bits from enum
14018    vfp_or_neon_is_neon_bits.
14019 
14020    WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14021    current instruction's condition is COND_ALWAYS, the condition field is
14022    changed to inst.uncond_value. This is necessary because instructions shared
14023    between VFP and Neon may be conditional for the VFP variants only, and the
14024    unconditional Neon version must have, e.g., 0xF in the condition field.  */
14025 
14026 static int
14027 vfp_or_neon_is_neon (unsigned check)
14028 {
14029   /* Conditions are always legal in Thumb mode (IT blocks).  */
14030   if (!thumb_mode && (check & NEON_CHECK_CC))
14031     {
14032       if (inst.cond != COND_ALWAYS)
14033         {
14034           first_error (_(BAD_COND));
14035           return FAIL;
14036         }
14037       if (inst.uncond_value != -1)
14038         inst.instruction |= inst.uncond_value << 28;
14039     }
14040 
14041   if ((check & NEON_CHECK_ARCH)
14042       && !mark_feature_used (&fpu_neon_ext_v1))
14043     {
14044       first_error (_(BAD_FPU));
14045       return FAIL;
14046     }
14047 
14048   if ((check & NEON_CHECK_ARCH8)
14049       && !mark_feature_used (&fpu_neon_ext_armv8))
14050     {
14051       first_error (_(BAD_FPU));
14052       return FAIL;
14053     }
14054 
14055   return SUCCESS;
14056 }
14057 
14058 static void
14059 do_neon_addsub_if_i (void)
14060 {
14061   if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14062     return;
14063 
14064   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14065     return;
14066 
14067   /* The "untyped" case can't happen. Do this to stop the "U" bit being
14068      affected if we specify unsigned args.  */
14069   neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14070 }
14071 
14072 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14073    result to be:
14074      V<op> A,B     (A is operand 0, B is operand 2)
14075    to mean:
14076      V<op> A,B,A
14077    not:
14078      V<op> A,B,B
14079    so handle that case specially.  */
14080 
14081 static void
14082 neon_exchange_operands (void)
14083 {
14084   void *scratch = alloca (sizeof (inst.operands[0]));
14085   if (inst.operands[1].present)
14086     {
14087       /* Swap operands[1] and operands[2].  */
14088       memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14089       inst.operands[1] = inst.operands[2];
14090       memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14091     }
14092   else
14093     {
14094       inst.operands[1] = inst.operands[2];
14095       inst.operands[2] = inst.operands[0];
14096     }
14097 }
14098 
14099 static void
14100 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14101 {
14102   if (inst.operands[2].isreg)
14103     {
14104       if (invert)
14105         neon_exchange_operands ();
14106       neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14107     }
14108   else
14109     {
14110       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14111       struct neon_type_el et = neon_check_type (2, rs,
14112         N_EQK | N_SIZ, immtypes | N_KEY);
14113 
14114       NEON_ENCODE (IMMED, inst);
14115       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14116       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14117       inst.instruction |= LOW4 (inst.operands[1].reg);
14118       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14119       inst.instruction |= neon_quad (rs) << 6;
14120       inst.instruction |= (et.type == NT_float) << 10;
14121       inst.instruction |= neon_logbits (et.size) << 18;
14122 
14123       neon_dp_fixup (&inst);
14124     }
14125 }
14126 
14127 static void
14128 do_neon_cmp (void)
14129 {
14130   neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14131 }
14132 
14133 static void
14134 do_neon_cmp_inv (void)
14135 {
14136   neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14137 }
14138 
14139 static void
14140 do_neon_ceq (void)
14141 {
14142   neon_compare (N_IF_32, N_IF_32, FALSE);
14143 }
14144 
14145 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14146    scalars, which are encoded in 5 bits, M : Rm.
14147    For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14148    M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14149    index in M.  */
14150 
14151 static unsigned
14152 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14153 {
14154   unsigned regno = NEON_SCALAR_REG (scalar);
14155   unsigned elno = NEON_SCALAR_INDEX (scalar);
14156 
14157   switch (elsize)
14158     {
14159     case 16:
14160       if (regno > 7 || elno > 3)
14161         goto bad_scalar;
14162       return regno | (elno << 3);
14163 
14164     case 32:
14165       if (regno > 15 || elno > 1)
14166         goto bad_scalar;
14167       return regno | (elno << 4);
14168 
14169     default:
14170     bad_scalar:
14171       first_error (_("scalar out of range for multiply instruction"));
14172     }
14173 
14174   return 0;
14175 }
14176 
14177 /* Encode multiply / multiply-accumulate scalar instructions.  */
14178 
14179 static void
14180 neon_mul_mac (struct neon_type_el et, int ubit)
14181 {
14182   unsigned scalar;
14183 
14184   /* Give a more helpful error message if we have an invalid type.  */
14185   if (et.type == NT_invtype)
14186     return;
14187 
14188   scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14189   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14190   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14191   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14192   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14193   inst.instruction |= LOW4 (scalar);
14194   inst.instruction |= HI1 (scalar) << 5;
14195   inst.instruction |= (et.type == NT_float) << 8;
14196   inst.instruction |= neon_logbits (et.size) << 20;
14197   inst.instruction |= (ubit != 0) << 24;
14198 
14199   neon_dp_fixup (&inst);
14200 }
14201 
14202 static void
14203 do_neon_mac_maybe_scalar (void)
14204 {
14205   if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14206     return;
14207 
14208   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14209     return;
14210 
14211   if (inst.operands[2].isscalar)
14212     {
14213       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14214       struct neon_type_el et = neon_check_type (3, rs,
14215         N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14216       NEON_ENCODE (SCALAR, inst);
14217       neon_mul_mac (et, neon_quad (rs));
14218     }
14219   else
14220     {
14221       /* The "untyped" case can't happen.  Do this to stop the "U" bit being
14222 	 affected if we specify unsigned args.  */
14223       neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14224     }
14225 }
14226 
14227 static void
14228 do_neon_fmac (void)
14229 {
14230   if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14231     return;
14232 
14233   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14234     return;
14235 
14236   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14237 }
14238 
14239 static void
14240 do_neon_tst (void)
14241 {
14242   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14243   struct neon_type_el et = neon_check_type (3, rs,
14244     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14245   neon_three_same (neon_quad (rs), 0, et.size);
14246 }
14247 
14248 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14249    same types as the MAC equivalents. The polynomial type for this instruction
14250    is encoded the same as the integer type.  */
14251 
14252 static void
14253 do_neon_mul (void)
14254 {
14255   if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14256     return;
14257 
14258   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14259     return;
14260 
14261   if (inst.operands[2].isscalar)
14262     do_neon_mac_maybe_scalar ();
14263   else
14264     neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14265 }
14266 
14267 static void
14268 do_neon_qdmulh (void)
14269 {
14270   if (inst.operands[2].isscalar)
14271     {
14272       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14273       struct neon_type_el et = neon_check_type (3, rs,
14274         N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14275       NEON_ENCODE (SCALAR, inst);
14276       neon_mul_mac (et, neon_quad (rs));
14277     }
14278   else
14279     {
14280       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14281       struct neon_type_el et = neon_check_type (3, rs,
14282         N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14283       NEON_ENCODE (INTEGER, inst);
14284       /* The U bit (rounding) comes from bit mask.  */
14285       neon_three_same (neon_quad (rs), 0, et.size);
14286     }
14287 }
14288 
14289 static void
14290 do_neon_fcmp_absolute (void)
14291 {
14292   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14293   neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14294   /* Size field comes from bit mask.  */
14295   neon_three_same (neon_quad (rs), 1, -1);
14296 }
14297 
14298 static void
14299 do_neon_fcmp_absolute_inv (void)
14300 {
14301   neon_exchange_operands ();
14302   do_neon_fcmp_absolute ();
14303 }
14304 
14305 static void
14306 do_neon_step (void)
14307 {
14308   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14309   neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14310   neon_three_same (neon_quad (rs), 0, -1);
14311 }
14312 
14313 static void
14314 do_neon_abs_neg (void)
14315 {
14316   enum neon_shape rs;
14317   struct neon_type_el et;
14318 
14319   if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14320     return;
14321 
14322   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14323     return;
14324 
14325   rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14326   et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14327 
14328   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14329   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14330   inst.instruction |= LOW4 (inst.operands[1].reg);
14331   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14332   inst.instruction |= neon_quad (rs) << 6;
14333   inst.instruction |= (et.type == NT_float) << 10;
14334   inst.instruction |= neon_logbits (et.size) << 18;
14335 
14336   neon_dp_fixup (&inst);
14337 }
14338 
14339 static void
14340 do_neon_sli (void)
14341 {
14342   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14343   struct neon_type_el et = neon_check_type (2, rs,
14344     N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14345   int imm = inst.operands[2].imm;
14346   constraint (imm < 0 || (unsigned)imm >= et.size,
14347               _("immediate out of range for insert"));
14348   neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14349 }
14350 
14351 static void
14352 do_neon_sri (void)
14353 {
14354   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14355   struct neon_type_el et = neon_check_type (2, rs,
14356     N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14357   int imm = inst.operands[2].imm;
14358   constraint (imm < 1 || (unsigned)imm > et.size,
14359               _("immediate out of range for insert"));
14360   neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14361 }
14362 
14363 static void
14364 do_neon_qshlu_imm (void)
14365 {
14366   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14367   struct neon_type_el et = neon_check_type (2, rs,
14368     N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14369   int imm = inst.operands[2].imm;
14370   constraint (imm < 0 || (unsigned)imm >= et.size,
14371               _("immediate out of range for shift"));
14372   /* Only encodes the 'U present' variant of the instruction.
14373      In this case, signed types have OP (bit 8) set to 0.
14374      Unsigned types have OP set to 1.  */
14375   inst.instruction |= (et.type == NT_unsigned) << 8;
14376   /* The rest of the bits are the same as other immediate shifts.  */
14377   neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14378 }
14379 
14380 static void
14381 do_neon_qmovn (void)
14382 {
14383   struct neon_type_el et = neon_check_type (2, NS_DQ,
14384     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14385   /* Saturating move where operands can be signed or unsigned, and the
14386      destination has the same signedness.  */
14387   NEON_ENCODE (INTEGER, inst);
14388   if (et.type == NT_unsigned)
14389     inst.instruction |= 0xc0;
14390   else
14391     inst.instruction |= 0x80;
14392   neon_two_same (0, 1, et.size / 2);
14393 }
14394 
14395 static void
14396 do_neon_qmovun (void)
14397 {
14398   struct neon_type_el et = neon_check_type (2, NS_DQ,
14399     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14400   /* Saturating move with unsigned results. Operands must be signed.  */
14401   NEON_ENCODE (INTEGER, inst);
14402   neon_two_same (0, 1, et.size / 2);
14403 }
14404 
14405 static void
14406 do_neon_rshift_sat_narrow (void)
14407 {
14408   /* FIXME: Types for narrowing. If operands are signed, results can be signed
14409      or unsigned. If operands are unsigned, results must also be unsigned.  */
14410   struct neon_type_el et = neon_check_type (2, NS_DQI,
14411     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14412   int imm = inst.operands[2].imm;
14413   /* This gets the bounds check, size encoding and immediate bits calculation
14414      right.  */
14415   et.size /= 2;
14416 
14417   /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14418      VQMOVN.I<size> <Dd>, <Qm>.  */
14419   if (imm == 0)
14420     {
14421       inst.operands[2].present = 0;
14422       inst.instruction = N_MNEM_vqmovn;
14423       do_neon_qmovn ();
14424       return;
14425     }
14426 
14427   constraint (imm < 1 || (unsigned)imm > et.size,
14428               _("immediate out of range"));
14429   neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14430 }
14431 
14432 static void
14433 do_neon_rshift_sat_narrow_u (void)
14434 {
14435   /* FIXME: Types for narrowing. If operands are signed, results can be signed
14436      or unsigned. If operands are unsigned, results must also be unsigned.  */
14437   struct neon_type_el et = neon_check_type (2, NS_DQI,
14438     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14439   int imm = inst.operands[2].imm;
14440   /* This gets the bounds check, size encoding and immediate bits calculation
14441      right.  */
14442   et.size /= 2;
14443 
14444   /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14445      VQMOVUN.I<size> <Dd>, <Qm>.  */
14446   if (imm == 0)
14447     {
14448       inst.operands[2].present = 0;
14449       inst.instruction = N_MNEM_vqmovun;
14450       do_neon_qmovun ();
14451       return;
14452     }
14453 
14454   constraint (imm < 1 || (unsigned)imm > et.size,
14455               _("immediate out of range"));
14456   /* FIXME: The manual is kind of unclear about what value U should have in
14457      VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14458      must be 1.  */
14459   neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14460 }
14461 
14462 static void
14463 do_neon_movn (void)
14464 {
14465   struct neon_type_el et = neon_check_type (2, NS_DQ,
14466     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14467   NEON_ENCODE (INTEGER, inst);
14468   neon_two_same (0, 1, et.size / 2);
14469 }
14470 
14471 static void
14472 do_neon_rshift_narrow (void)
14473 {
14474   struct neon_type_el et = neon_check_type (2, NS_DQI,
14475     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14476   int imm = inst.operands[2].imm;
14477   /* This gets the bounds check, size encoding and immediate bits calculation
14478      right.  */
14479   et.size /= 2;
14480 
14481   /* If immediate is zero then we are a pseudo-instruction for
14482      VMOVN.I<size> <Dd>, <Qm>  */
14483   if (imm == 0)
14484     {
14485       inst.operands[2].present = 0;
14486       inst.instruction = N_MNEM_vmovn;
14487       do_neon_movn ();
14488       return;
14489     }
14490 
14491   constraint (imm < 1 || (unsigned)imm > et.size,
14492               _("immediate out of range for narrowing operation"));
14493   neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14494 }
14495 
14496 static void
14497 do_neon_shll (void)
14498 {
14499   /* FIXME: Type checking when lengthening.  */
14500   struct neon_type_el et = neon_check_type (2, NS_QDI,
14501     N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14502   unsigned imm = inst.operands[2].imm;
14503 
14504   if (imm == et.size)
14505     {
14506       /* Maximum shift variant.  */
14507       NEON_ENCODE (INTEGER, inst);
14508       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14509       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14510       inst.instruction |= LOW4 (inst.operands[1].reg);
14511       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14512       inst.instruction |= neon_logbits (et.size) << 18;
14513 
14514       neon_dp_fixup (&inst);
14515     }
14516   else
14517     {
14518       /* A more-specific type check for non-max versions.  */
14519       et = neon_check_type (2, NS_QDI,
14520         N_EQK | N_DBL, N_SU_32 | N_KEY);
14521       NEON_ENCODE (IMMED, inst);
14522       neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14523     }
14524 }
14525 
14526 /* Check the various types for the VCVT instruction, and return which version
14527    the current instruction is.  */
14528 
14529 #define CVT_FLAVOUR_VAR							      \
14530   CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
14531   CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
14532   CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
14533   CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
14534   /* Half-precision conversions.  */					      \
14535   CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
14536   CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
14537   /* VFP instructions.  */						      \
14538   CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
14539   CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
14540   CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14541   CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14542   CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
14543   CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
14544   /* VFP instructions with bitshift.  */				      \
14545   CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
14546   CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
14547   CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
14548   CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
14549   CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
14550   CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
14551   CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
14552   CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
14553 
14554 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14555   neon_cvt_flavour_##C,
14556 
14557 /* The different types of conversions we can do.  */
14558 enum neon_cvt_flavour
14559 {
14560   CVT_FLAVOUR_VAR
14561   neon_cvt_flavour_invalid,
14562   neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
14563 };
14564 
14565 #undef CVT_VAR
14566 
14567 static enum neon_cvt_flavour
14568 get_neon_cvt_flavour (enum neon_shape rs)
14569 {
14570 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
14571   et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
14572   if (et.type != NT_invtype)				\
14573     {							\
14574       inst.error = NULL;				\
14575       return (neon_cvt_flavour_##C);			\
14576     }
14577 
14578   struct neon_type_el et;
14579   unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14580                         || rs == NS_FF) ? N_VFP : 0;
14581   /* The instruction versions which take an immediate take one register
14582      argument, which is extended to the width of the full register. Thus the
14583      "source" and "destination" registers must have the same width.  Hack that
14584      here by making the size equal to the key (wider, in this case) operand.  */
14585   unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14586 
14587   CVT_FLAVOUR_VAR;
14588 
14589   return neon_cvt_flavour_invalid;
14590 #undef CVT_VAR
14591 }
14592 
14593 enum neon_cvt_mode
14594 {
14595   neon_cvt_mode_a,
14596   neon_cvt_mode_n,
14597   neon_cvt_mode_p,
14598   neon_cvt_mode_m,
14599   neon_cvt_mode_z,
14600   neon_cvt_mode_x,
14601   neon_cvt_mode_r
14602 };
14603 
14604 /* Neon-syntax VFP conversions.  */
14605 
14606 static void
14607 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
14608 {
14609   const char *opname = 0;
14610 
14611   if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14612     {
14613       /* Conversions with immediate bitshift.  */
14614       const char *enc[] =
14615         {
14616 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
14617 	  CVT_FLAVOUR_VAR
14618 	  NULL
14619 #undef CVT_VAR
14620         };
14621 
14622       if (flavour < (int) ARRAY_SIZE (enc))
14623         {
14624           opname = enc[flavour];
14625           constraint (inst.operands[0].reg != inst.operands[1].reg,
14626                       _("operands 0 and 1 must be the same register"));
14627           inst.operands[1] = inst.operands[2];
14628           memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14629         }
14630     }
14631   else
14632     {
14633       /* Conversions without bitshift.  */
14634       const char *enc[] =
14635         {
14636 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
14637 	  CVT_FLAVOUR_VAR
14638 	  NULL
14639 #undef CVT_VAR
14640         };
14641 
14642       if (flavour < (int) ARRAY_SIZE (enc))
14643         opname = enc[flavour];
14644     }
14645 
14646   if (opname)
14647     do_vfp_nsyn_opcode (opname);
14648 }
14649 
14650 static void
14651 do_vfp_nsyn_cvtz (void)
14652 {
14653   enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14654   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
14655   const char *enc[] =
14656     {
14657 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
14658       CVT_FLAVOUR_VAR
14659       NULL
14660 #undef CVT_VAR
14661     };
14662 
14663   if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14664     do_vfp_nsyn_opcode (enc[flavour]);
14665 }
14666 
14667 static void
14668 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
14669 		      enum neon_cvt_mode mode)
14670 {
14671   int sz, op;
14672   int rm;
14673 
14674   set_it_insn_type (OUTSIDE_IT_INSN);
14675 
14676   switch (flavour)
14677     {
14678     case neon_cvt_flavour_s32_f64:
14679       sz = 1;
14680       op = 0;
14681       break;
14682     case neon_cvt_flavour_s32_f32:
14683       sz = 0;
14684       op = 1;
14685       break;
14686     case neon_cvt_flavour_u32_f64:
14687       sz = 1;
14688       op = 0;
14689       break;
14690     case neon_cvt_flavour_u32_f32:
14691       sz = 0;
14692       op = 0;
14693       break;
14694     default:
14695       first_error (_("invalid instruction shape"));
14696       return;
14697     }
14698 
14699   switch (mode)
14700     {
14701     case neon_cvt_mode_a: rm = 0; break;
14702     case neon_cvt_mode_n: rm = 1; break;
14703     case neon_cvt_mode_p: rm = 2; break;
14704     case neon_cvt_mode_m: rm = 3; break;
14705     default: first_error (_("invalid rounding mode")); return;
14706     }
14707 
14708   NEON_ENCODE (FPV8, inst);
14709   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
14710   encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
14711   inst.instruction |= sz << 8;
14712   inst.instruction |= op << 7;
14713   inst.instruction |= rm << 16;
14714   inst.instruction |= 0xf0000000;
14715   inst.is_neon = TRUE;
14716 }
14717 
14718 static void
14719 do_neon_cvt_1 (enum neon_cvt_mode mode)
14720 {
14721   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
14722     NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
14723   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
14724 
14725   /* PR11109: Handle round-to-zero for VCVT conversions.  */
14726   if (mode == neon_cvt_mode_z
14727       && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
14728       && (flavour == neon_cvt_flavour_s32_f32
14729 	  || flavour == neon_cvt_flavour_u32_f32
14730 	  || flavour == neon_cvt_flavour_s32_f64
14731 	  || flavour == neon_cvt_flavour_u32_f64)
14732       && (rs == NS_FD || rs == NS_FF))
14733     {
14734       do_vfp_nsyn_cvtz ();
14735       return;
14736     }
14737 
14738   /* VFP rather than Neon conversions.  */
14739   if (flavour >= neon_cvt_flavour_first_fp)
14740     {
14741       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
14742 	do_vfp_nsyn_cvt (rs, flavour);
14743       else
14744 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
14745 
14746       return;
14747     }
14748 
14749   switch (rs)
14750     {
14751     case NS_DDI:
14752     case NS_QQI:
14753       {
14754         unsigned immbits;
14755         unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14756 
14757         if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14758           return;
14759 
14760         /* Fixed-point conversion with #0 immediate is encoded as an
14761            integer conversion.  */
14762         if (inst.operands[2].present && inst.operands[2].imm == 0)
14763           goto int_encode;
14764        immbits = 32 - inst.operands[2].imm;
14765         NEON_ENCODE (IMMED, inst);
14766         if (flavour != neon_cvt_flavour_invalid)
14767           inst.instruction |= enctab[flavour];
14768         inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14769         inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14770         inst.instruction |= LOW4 (inst.operands[1].reg);
14771         inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14772         inst.instruction |= neon_quad (rs) << 6;
14773         inst.instruction |= 1 << 21;
14774         inst.instruction |= immbits << 16;
14775 
14776         neon_dp_fixup (&inst);
14777       }
14778       break;
14779 
14780     case NS_DD:
14781     case NS_QQ:
14782       if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
14783 	{
14784 	  NEON_ENCODE (FLOAT, inst);
14785 	  set_it_insn_type (OUTSIDE_IT_INSN);
14786 
14787 	  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
14788 	    return;
14789 
14790 	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14791 	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14792 	  inst.instruction |= LOW4 (inst.operands[1].reg);
14793 	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14794 	  inst.instruction |= neon_quad (rs) << 6;
14795 	  inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
14796 	  inst.instruction |= mode << 8;
14797 	  if (thumb_mode)
14798 	    inst.instruction |= 0xfc000000;
14799 	  else
14800 	    inst.instruction |= 0xf0000000;
14801 	}
14802       else
14803 	{
14804     int_encode:
14805 	  {
14806 	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
14807 
14808 	    NEON_ENCODE (INTEGER, inst);
14809 
14810 	    if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14811 	      return;
14812 
14813 	    if (flavour != neon_cvt_flavour_invalid)
14814 	      inst.instruction |= enctab[flavour];
14815 
14816 	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14817 	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14818 	    inst.instruction |= LOW4 (inst.operands[1].reg);
14819 	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14820 	    inst.instruction |= neon_quad (rs) << 6;
14821 	    inst.instruction |= 2 << 18;
14822 
14823 	    neon_dp_fixup (&inst);
14824 	  }
14825 	}
14826       break;
14827 
14828     /* Half-precision conversions for Advanced SIMD -- neon.  */
14829     case NS_QD:
14830     case NS_DQ:
14831 
14832       if ((rs == NS_DQ)
14833 	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
14834 	  {
14835 	    as_bad (_("operand size must match register width"));
14836 	    break;
14837 	  }
14838 
14839       if ((rs == NS_QD)
14840 	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
14841 	  {
14842 	    as_bad (_("operand size must match register width"));
14843 	    break;
14844 	  }
14845 
14846       if (rs == NS_DQ)
14847         inst.instruction = 0x3b60600;
14848       else
14849 	inst.instruction = 0x3b60700;
14850 
14851       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14852       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14853       inst.instruction |= LOW4 (inst.operands[1].reg);
14854       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14855       neon_dp_fixup (&inst);
14856       break;
14857 
14858     default:
14859       /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
14860       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
14861 	do_vfp_nsyn_cvt (rs, flavour);
14862       else
14863 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
14864     }
14865 }
14866 
14867 static void
14868 do_neon_cvtr (void)
14869 {
14870   do_neon_cvt_1 (neon_cvt_mode_x);
14871 }
14872 
14873 static void
14874 do_neon_cvt (void)
14875 {
14876   do_neon_cvt_1 (neon_cvt_mode_z);
14877 }
14878 
14879 static void
14880 do_neon_cvta (void)
14881 {
14882   do_neon_cvt_1 (neon_cvt_mode_a);
14883 }
14884 
14885 static void
14886 do_neon_cvtn (void)
14887 {
14888   do_neon_cvt_1 (neon_cvt_mode_n);
14889 }
14890 
14891 static void
14892 do_neon_cvtp (void)
14893 {
14894   do_neon_cvt_1 (neon_cvt_mode_p);
14895 }
14896 
14897 static void
14898 do_neon_cvtm (void)
14899 {
14900   do_neon_cvt_1 (neon_cvt_mode_m);
14901 }
14902 
14903 static void
14904 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
14905 {
14906   if (is_double)
14907     mark_feature_used (&fpu_vfp_ext_armv8);
14908 
14909   encode_arm_vfp_reg (inst.operands[0].reg,
14910 		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
14911   encode_arm_vfp_reg (inst.operands[1].reg,
14912 		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
14913   inst.instruction |= to ? 0x10000 : 0;
14914   inst.instruction |= t ? 0x80 : 0;
14915   inst.instruction |= is_double ? 0x100 : 0;
14916   do_vfp_cond_or_thumb ();
14917 }
14918 
14919 static void
14920 do_neon_cvttb_1 (bfd_boolean t)
14921 {
14922   enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
14923 
14924   if (rs == NS_NULL)
14925     return;
14926   else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
14927     {
14928       inst.error = NULL;
14929       do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
14930     }
14931   else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
14932     {
14933       inst.error = NULL;
14934       do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
14935     }
14936   else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
14937     {
14938       inst.error = NULL;
14939       do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
14940     }
14941   else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
14942     {
14943       inst.error = NULL;
14944       do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
14945     }
14946   else
14947     return;
14948 }
14949 
14950 static void
14951 do_neon_cvtb (void)
14952 {
14953   do_neon_cvttb_1 (FALSE);
14954 }
14955 
14956 
14957 static void
14958 do_neon_cvtt (void)
14959 {
14960   do_neon_cvttb_1 (TRUE);
14961 }
14962 
14963 static void
14964 neon_move_immediate (void)
14965 {
14966   enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
14967   struct neon_type_el et = neon_check_type (2, rs,
14968     N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14969   unsigned immlo, immhi = 0, immbits;
14970   int op, cmode, float_p;
14971 
14972   constraint (et.type == NT_invtype,
14973               _("operand size must be specified for immediate VMOV"));
14974 
14975   /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
14976   op = (inst.instruction & (1 << 5)) != 0;
14977 
14978   immlo = inst.operands[1].imm;
14979   if (inst.operands[1].regisimm)
14980     immhi = inst.operands[1].reg;
14981 
14982   constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
14983               _("immediate has bits set outside the operand size"));
14984 
14985   float_p = inst.operands[1].immisfloat;
14986 
14987   if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
14988                                         et.size, et.type)) == FAIL)
14989     {
14990       /* Invert relevant bits only.  */
14991       neon_invert_size (&immlo, &immhi, et.size);
14992       /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14993          with one or the other; those cases are caught by
14994          neon_cmode_for_move_imm.  */
14995       op = !op;
14996       if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
14997 					    &op, et.size, et.type)) == FAIL)
14998         {
14999           first_error (_("immediate out of range"));
15000           return;
15001         }
15002     }
15003 
15004   inst.instruction &= ~(1 << 5);
15005   inst.instruction |= op << 5;
15006 
15007   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15008   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15009   inst.instruction |= neon_quad (rs) << 6;
15010   inst.instruction |= cmode << 8;
15011 
15012   neon_write_immbits (immbits);
15013 }
15014 
15015 static void
15016 do_neon_mvn (void)
15017 {
15018   if (inst.operands[1].isreg)
15019     {
15020       enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15021 
15022       NEON_ENCODE (INTEGER, inst);
15023       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15024       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15025       inst.instruction |= LOW4 (inst.operands[1].reg);
15026       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15027       inst.instruction |= neon_quad (rs) << 6;
15028     }
15029   else
15030     {
15031       NEON_ENCODE (IMMED, inst);
15032       neon_move_immediate ();
15033     }
15034 
15035   neon_dp_fixup (&inst);
15036 }
15037 
15038 /* Encode instructions of form:
15039 
15040   |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
15041   |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
15042 
15043 static void
15044 neon_mixed_length (struct neon_type_el et, unsigned size)
15045 {
15046   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15047   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15048   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15049   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15050   inst.instruction |= LOW4 (inst.operands[2].reg);
15051   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15052   inst.instruction |= (et.type == NT_unsigned) << 24;
15053   inst.instruction |= neon_logbits (size) << 20;
15054 
15055   neon_dp_fixup (&inst);
15056 }
15057 
15058 static void
15059 do_neon_dyadic_long (void)
15060 {
15061   /* FIXME: Type checking for lengthening op.  */
15062   struct neon_type_el et = neon_check_type (3, NS_QDD,
15063     N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15064   neon_mixed_length (et, et.size);
15065 }
15066 
15067 static void
15068 do_neon_abal (void)
15069 {
15070   struct neon_type_el et = neon_check_type (3, NS_QDD,
15071     N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15072   neon_mixed_length (et, et.size);
15073 }
15074 
15075 static void
15076 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15077 {
15078   if (inst.operands[2].isscalar)
15079     {
15080       struct neon_type_el et = neon_check_type (3, NS_QDS,
15081         N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15082       NEON_ENCODE (SCALAR, inst);
15083       neon_mul_mac (et, et.type == NT_unsigned);
15084     }
15085   else
15086     {
15087       struct neon_type_el et = neon_check_type (3, NS_QDD,
15088         N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15089       NEON_ENCODE (INTEGER, inst);
15090       neon_mixed_length (et, et.size);
15091     }
15092 }
15093 
15094 static void
15095 do_neon_mac_maybe_scalar_long (void)
15096 {
15097   neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15098 }
15099 
15100 static void
15101 do_neon_dyadic_wide (void)
15102 {
15103   struct neon_type_el et = neon_check_type (3, NS_QQD,
15104     N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15105   neon_mixed_length (et, et.size);
15106 }
15107 
15108 static void
15109 do_neon_dyadic_narrow (void)
15110 {
15111   struct neon_type_el et = neon_check_type (3, NS_QDD,
15112     N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15113   /* Operand sign is unimportant, and the U bit is part of the opcode,
15114      so force the operand type to integer.  */
15115   et.type = NT_integer;
15116   neon_mixed_length (et, et.size / 2);
15117 }
15118 
15119 static void
15120 do_neon_mul_sat_scalar_long (void)
15121 {
15122   neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15123 }
15124 
15125 static void
15126 do_neon_vmull (void)
15127 {
15128   if (inst.operands[2].isscalar)
15129     do_neon_mac_maybe_scalar_long ();
15130   else
15131     {
15132       struct neon_type_el et = neon_check_type (3, NS_QDD,
15133         N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15134 
15135       if (et.type == NT_poly)
15136         NEON_ENCODE (POLY, inst);
15137       else
15138         NEON_ENCODE (INTEGER, inst);
15139 
15140       /* For polynomial encoding the U bit must be zero, and the size must
15141 	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15142 	 obviously, as 0b10).  */
15143       if (et.size == 64)
15144 	{
15145 	  /* Check we're on the correct architecture.  */
15146 	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
15147 	    inst.error =
15148 	      _("Instruction form not available on this architecture.");
15149 
15150 	  et.size = 32;
15151 	}
15152 
15153       neon_mixed_length (et, et.size);
15154     }
15155 }
15156 
15157 static void
15158 do_neon_ext (void)
15159 {
15160   enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15161   struct neon_type_el et = neon_check_type (3, rs,
15162     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15163   unsigned imm = (inst.operands[3].imm * et.size) / 8;
15164 
15165   constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15166 	      _("shift out of range"));
15167   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15168   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15169   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15170   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15171   inst.instruction |= LOW4 (inst.operands[2].reg);
15172   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15173   inst.instruction |= neon_quad (rs) << 6;
15174   inst.instruction |= imm << 8;
15175 
15176   neon_dp_fixup (&inst);
15177 }
15178 
15179 static void
15180 do_neon_rev (void)
15181 {
15182   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15183   struct neon_type_el et = neon_check_type (2, rs,
15184     N_EQK, N_8 | N_16 | N_32 | N_KEY);
15185   unsigned op = (inst.instruction >> 7) & 3;
15186   /* N (width of reversed regions) is encoded as part of the bitmask. We
15187      extract it here to check the elements to be reversed are smaller.
15188      Otherwise we'd get a reserved instruction.  */
15189   unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15190   gas_assert (elsize != 0);
15191   constraint (et.size >= elsize,
15192               _("elements must be smaller than reversal region"));
15193   neon_two_same (neon_quad (rs), 1, et.size);
15194 }
15195 
15196 static void
15197 do_neon_dup (void)
15198 {
15199   if (inst.operands[1].isscalar)
15200     {
15201       enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15202       struct neon_type_el et = neon_check_type (2, rs,
15203         N_EQK, N_8 | N_16 | N_32 | N_KEY);
15204       unsigned sizebits = et.size >> 3;
15205       unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15206       int logsize = neon_logbits (et.size);
15207       unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15208 
15209       if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15210         return;
15211 
15212       NEON_ENCODE (SCALAR, inst);
15213       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15214       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15215       inst.instruction |= LOW4 (dm);
15216       inst.instruction |= HI1 (dm) << 5;
15217       inst.instruction |= neon_quad (rs) << 6;
15218       inst.instruction |= x << 17;
15219       inst.instruction |= sizebits << 16;
15220 
15221       neon_dp_fixup (&inst);
15222     }
15223   else
15224     {
15225       enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15226       struct neon_type_el et = neon_check_type (2, rs,
15227         N_8 | N_16 | N_32 | N_KEY, N_EQK);
15228       /* Duplicate ARM register to lanes of vector.  */
15229       NEON_ENCODE (ARMREG, inst);
15230       switch (et.size)
15231         {
15232         case 8:  inst.instruction |= 0x400000; break;
15233         case 16: inst.instruction |= 0x000020; break;
15234         case 32: inst.instruction |= 0x000000; break;
15235         default: break;
15236         }
15237       inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15238       inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15239       inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15240       inst.instruction |= neon_quad (rs) << 21;
15241       /* The encoding for this instruction is identical for the ARM and Thumb
15242          variants, except for the condition field.  */
15243       do_vfp_cond_or_thumb ();
15244     }
15245 }
15246 
15247 /* VMOV has particularly many variations. It can be one of:
15248      0. VMOV<c><q> <Qd>, <Qm>
15249      1. VMOV<c><q> <Dd>, <Dm>
15250    (Register operations, which are VORR with Rm = Rn.)
15251      2. VMOV<c><q>.<dt> <Qd>, #<imm>
15252      3. VMOV<c><q>.<dt> <Dd>, #<imm>
15253    (Immediate loads.)
15254      4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15255    (ARM register to scalar.)
15256      5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15257    (Two ARM registers to vector.)
15258      6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15259    (Scalar to ARM register.)
15260      7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15261    (Vector to two ARM registers.)
15262      8. VMOV.F32 <Sd>, <Sm>
15263      9. VMOV.F64 <Dd>, <Dm>
15264    (VFP register moves.)
15265     10. VMOV.F32 <Sd>, #imm
15266     11. VMOV.F64 <Dd>, #imm
15267    (VFP float immediate load.)
15268     12. VMOV <Rd>, <Sm>
15269    (VFP single to ARM reg.)
15270     13. VMOV <Sd>, <Rm>
15271    (ARM reg to VFP single.)
15272     14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15273    (Two ARM regs to two VFP singles.)
15274     15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15275    (Two VFP singles to two ARM regs.)
15276 
15277    These cases can be disambiguated using neon_select_shape, except cases 1/9
15278    and 3/11 which depend on the operand type too.
15279 
15280    All the encoded bits are hardcoded by this function.
15281 
15282    Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15283    Cases 5, 7 may be used with VFPv2 and above.
15284 
15285    FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15286    can specify a type where it doesn't make sense to, and is ignored).  */
15287 
15288 static void
15289 do_neon_mov (void)
15290 {
15291   enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15292     NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15293     NS_NULL);
15294   struct neon_type_el et;
15295   const char *ldconst = 0;
15296 
15297   switch (rs)
15298     {
15299     case NS_DD:  /* case 1/9.  */
15300       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15301       /* It is not an error here if no type is given.  */
15302       inst.error = NULL;
15303       if (et.type == NT_float && et.size == 64)
15304         {
15305           do_vfp_nsyn_opcode ("fcpyd");
15306           break;
15307         }
15308       /* fall through.  */
15309 
15310     case NS_QQ:  /* case 0/1.  */
15311       {
15312         if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15313           return;
15314         /* The architecture manual I have doesn't explicitly state which
15315            value the U bit should have for register->register moves, but
15316            the equivalent VORR instruction has U = 0, so do that.  */
15317         inst.instruction = 0x0200110;
15318         inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15319         inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15320         inst.instruction |= LOW4 (inst.operands[1].reg);
15321         inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15322         inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15323         inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15324         inst.instruction |= neon_quad (rs) << 6;
15325 
15326         neon_dp_fixup (&inst);
15327       }
15328       break;
15329 
15330     case NS_DI:  /* case 3/11.  */
15331       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15332       inst.error = NULL;
15333       if (et.type == NT_float && et.size == 64)
15334         {
15335           /* case 11 (fconstd).  */
15336           ldconst = "fconstd";
15337           goto encode_fconstd;
15338         }
15339       /* fall through.  */
15340 
15341     case NS_QI:  /* case 2/3.  */
15342       if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15343         return;
15344       inst.instruction = 0x0800010;
15345       neon_move_immediate ();
15346       neon_dp_fixup (&inst);
15347       break;
15348 
15349     case NS_SR:  /* case 4.  */
15350       {
15351         unsigned bcdebits = 0;
15352         int logsize;
15353         unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15354         unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15355 
15356         et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15357         logsize = neon_logbits (et.size);
15358 
15359         constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15360                     _(BAD_FPU));
15361         constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15362                     && et.size != 32, _(BAD_FPU));
15363         constraint (et.type == NT_invtype, _("bad type for scalar"));
15364         constraint (x >= 64 / et.size, _("scalar index out of range"));
15365 
15366         switch (et.size)
15367           {
15368           case 8:  bcdebits = 0x8; break;
15369           case 16: bcdebits = 0x1; break;
15370           case 32: bcdebits = 0x0; break;
15371           default: ;
15372           }
15373 
15374         bcdebits |= x << logsize;
15375 
15376         inst.instruction = 0xe000b10;
15377         do_vfp_cond_or_thumb ();
15378         inst.instruction |= LOW4 (dn) << 16;
15379         inst.instruction |= HI1 (dn) << 7;
15380         inst.instruction |= inst.operands[1].reg << 12;
15381         inst.instruction |= (bcdebits & 3) << 5;
15382         inst.instruction |= (bcdebits >> 2) << 21;
15383       }
15384       break;
15385 
15386     case NS_DRR:  /* case 5 (fmdrr).  */
15387       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15388                   _(BAD_FPU));
15389 
15390       inst.instruction = 0xc400b10;
15391       do_vfp_cond_or_thumb ();
15392       inst.instruction |= LOW4 (inst.operands[0].reg);
15393       inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15394       inst.instruction |= inst.operands[1].reg << 12;
15395       inst.instruction |= inst.operands[2].reg << 16;
15396       break;
15397 
15398     case NS_RS:  /* case 6.  */
15399       {
15400         unsigned logsize;
15401         unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15402         unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15403         unsigned abcdebits = 0;
15404 
15405 	et = neon_check_type (2, NS_NULL,
15406 			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15407         logsize = neon_logbits (et.size);
15408 
15409         constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15410                     _(BAD_FPU));
15411         constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15412                     && et.size != 32, _(BAD_FPU));
15413         constraint (et.type == NT_invtype, _("bad type for scalar"));
15414         constraint (x >= 64 / et.size, _("scalar index out of range"));
15415 
15416         switch (et.size)
15417           {
15418           case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15419           case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15420           case 32: abcdebits = 0x00; break;
15421           default: ;
15422           }
15423 
15424         abcdebits |= x << logsize;
15425         inst.instruction = 0xe100b10;
15426         do_vfp_cond_or_thumb ();
15427         inst.instruction |= LOW4 (dn) << 16;
15428         inst.instruction |= HI1 (dn) << 7;
15429         inst.instruction |= inst.operands[0].reg << 12;
15430         inst.instruction |= (abcdebits & 3) << 5;
15431         inst.instruction |= (abcdebits >> 2) << 21;
15432       }
15433       break;
15434 
15435     case NS_RRD:  /* case 7 (fmrrd).  */
15436       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15437                   _(BAD_FPU));
15438 
15439       inst.instruction = 0xc500b10;
15440       do_vfp_cond_or_thumb ();
15441       inst.instruction |= inst.operands[0].reg << 12;
15442       inst.instruction |= inst.operands[1].reg << 16;
15443       inst.instruction |= LOW4 (inst.operands[2].reg);
15444       inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15445       break;
15446 
15447     case NS_FF:  /* case 8 (fcpys).  */
15448       do_vfp_nsyn_opcode ("fcpys");
15449       break;
15450 
15451     case NS_FI:  /* case 10 (fconsts).  */
15452       ldconst = "fconsts";
15453       encode_fconstd:
15454       if (is_quarter_float (inst.operands[1].imm))
15455         {
15456           inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15457           do_vfp_nsyn_opcode (ldconst);
15458         }
15459       else
15460         first_error (_("immediate out of range"));
15461       break;
15462 
15463     case NS_RF:  /* case 12 (fmrs).  */
15464       do_vfp_nsyn_opcode ("fmrs");
15465       break;
15466 
15467     case NS_FR:  /* case 13 (fmsr).  */
15468       do_vfp_nsyn_opcode ("fmsr");
15469       break;
15470 
15471     /* The encoders for the fmrrs and fmsrr instructions expect three operands
15472        (one of which is a list), but we have parsed four.  Do some fiddling to
15473        make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15474        expect.  */
15475     case NS_RRFF:  /* case 14 (fmrrs).  */
15476       constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15477                   _("VFP registers must be adjacent"));
15478       inst.operands[2].imm = 2;
15479       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15480       do_vfp_nsyn_opcode ("fmrrs");
15481       break;
15482 
15483     case NS_FFRR:  /* case 15 (fmsrr).  */
15484       constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15485                   _("VFP registers must be adjacent"));
15486       inst.operands[1] = inst.operands[2];
15487       inst.operands[2] = inst.operands[3];
15488       inst.operands[0].imm = 2;
15489       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15490       do_vfp_nsyn_opcode ("fmsrr");
15491       break;
15492 
15493     default:
15494       abort ();
15495     }
15496 }
15497 
15498 static void
15499 do_neon_rshift_round_imm (void)
15500 {
15501   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15502   struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15503   int imm = inst.operands[2].imm;
15504 
15505   /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
15506   if (imm == 0)
15507     {
15508       inst.operands[2].present = 0;
15509       do_neon_mov ();
15510       return;
15511     }
15512 
15513   constraint (imm < 1 || (unsigned)imm > et.size,
15514               _("immediate out of range for shift"));
15515   neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15516                   et.size - imm);
15517 }
15518 
15519 static void
15520 do_neon_movl (void)
15521 {
15522   struct neon_type_el et = neon_check_type (2, NS_QD,
15523     N_EQK | N_DBL, N_SU_32 | N_KEY);
15524   unsigned sizebits = et.size >> 3;
15525   inst.instruction |= sizebits << 19;
15526   neon_two_same (0, et.type == NT_unsigned, -1);
15527 }
15528 
15529 static void
15530 do_neon_trn (void)
15531 {
15532   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15533   struct neon_type_el et = neon_check_type (2, rs,
15534     N_EQK, N_8 | N_16 | N_32 | N_KEY);
15535   NEON_ENCODE (INTEGER, inst);
15536   neon_two_same (neon_quad (rs), 1, et.size);
15537 }
15538 
15539 static void
15540 do_neon_zip_uzp (void)
15541 {
15542   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15543   struct neon_type_el et = neon_check_type (2, rs,
15544     N_EQK, N_8 | N_16 | N_32 | N_KEY);
15545   if (rs == NS_DD && et.size == 32)
15546     {
15547       /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
15548       inst.instruction = N_MNEM_vtrn;
15549       do_neon_trn ();
15550       return;
15551     }
15552   neon_two_same (neon_quad (rs), 1, et.size);
15553 }
15554 
15555 static void
15556 do_neon_sat_abs_neg (void)
15557 {
15558   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15559   struct neon_type_el et = neon_check_type (2, rs,
15560     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15561   neon_two_same (neon_quad (rs), 1, et.size);
15562 }
15563 
15564 static void
15565 do_neon_pair_long (void)
15566 {
15567   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15568   struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15569   /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
15570   inst.instruction |= (et.type == NT_unsigned) << 7;
15571   neon_two_same (neon_quad (rs), 1, et.size);
15572 }
15573 
15574 static void
15575 do_neon_recip_est (void)
15576 {
15577   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15578   struct neon_type_el et = neon_check_type (2, rs,
15579     N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15580   inst.instruction |= (et.type == NT_float) << 8;
15581   neon_two_same (neon_quad (rs), 1, et.size);
15582 }
15583 
15584 static void
15585 do_neon_cls (void)
15586 {
15587   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15588   struct neon_type_el et = neon_check_type (2, rs,
15589     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15590   neon_two_same (neon_quad (rs), 1, et.size);
15591 }
15592 
15593 static void
15594 do_neon_clz (void)
15595 {
15596   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15597   struct neon_type_el et = neon_check_type (2, rs,
15598     N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15599   neon_two_same (neon_quad (rs), 1, et.size);
15600 }
15601 
15602 static void
15603 do_neon_cnt (void)
15604 {
15605   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15606   struct neon_type_el et = neon_check_type (2, rs,
15607     N_EQK | N_INT, N_8 | N_KEY);
15608   neon_two_same (neon_quad (rs), 1, et.size);
15609 }
15610 
15611 static void
15612 do_neon_swp (void)
15613 {
15614   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15615   neon_two_same (neon_quad (rs), 1, -1);
15616 }
15617 
15618 static void
15619 do_neon_tbl_tbx (void)
15620 {
15621   unsigned listlenbits;
15622   neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15623 
15624   if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15625     {
15626       first_error (_("bad list length for table lookup"));
15627       return;
15628     }
15629 
15630   listlenbits = inst.operands[1].imm - 1;
15631   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15632   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15633   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15634   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15635   inst.instruction |= LOW4 (inst.operands[2].reg);
15636   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15637   inst.instruction |= listlenbits << 8;
15638 
15639   neon_dp_fixup (&inst);
15640 }
15641 
15642 static void
15643 do_neon_ldm_stm (void)
15644 {
15645   /* P, U and L bits are part of bitmask.  */
15646   int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15647   unsigned offsetbits = inst.operands[1].imm * 2;
15648 
15649   if (inst.operands[1].issingle)
15650     {
15651       do_vfp_nsyn_ldm_stm (is_dbmode);
15652       return;
15653     }
15654 
15655   constraint (is_dbmode && !inst.operands[0].writeback,
15656               _("writeback (!) must be used for VLDMDB and VSTMDB"));
15657 
15658   constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15659               _("register list must contain at least 1 and at most 16 "
15660                 "registers"));
15661 
15662   inst.instruction |= inst.operands[0].reg << 16;
15663   inst.instruction |= inst.operands[0].writeback << 21;
15664   inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15665   inst.instruction |= HI1 (inst.operands[1].reg) << 22;
15666 
15667   inst.instruction |= offsetbits;
15668 
15669   do_vfp_cond_or_thumb ();
15670 }
15671 
15672 static void
15673 do_neon_ldr_str (void)
15674 {
15675   int is_ldr = (inst.instruction & (1 << 20)) != 0;
15676 
15677   /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
15678      And is UNPREDICTABLE in thumb mode.  */
15679   if (!is_ldr
15680       && inst.operands[1].reg == REG_PC
15681       && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
15682     {
15683       if (!thumb_mode && warn_on_deprecated)
15684 	as_warn (_("Use of PC here is deprecated"));
15685       else
15686 	inst.error = _("Use of PC here is UNPREDICTABLE");
15687     }
15688 
15689   if (inst.operands[0].issingle)
15690     {
15691       if (is_ldr)
15692         do_vfp_nsyn_opcode ("flds");
15693       else
15694         do_vfp_nsyn_opcode ("fsts");
15695     }
15696   else
15697     {
15698       if (is_ldr)
15699         do_vfp_nsyn_opcode ("fldd");
15700       else
15701         do_vfp_nsyn_opcode ("fstd");
15702     }
15703 }
15704 
15705 /* "interleave" version also handles non-interleaving register VLD1/VST1
15706    instructions.  */
15707 
15708 static void
15709 do_neon_ld_st_interleave (void)
15710 {
15711   struct neon_type_el et = neon_check_type (1, NS_NULL,
15712                                             N_8 | N_16 | N_32 | N_64);
15713   unsigned alignbits = 0;
15714   unsigned idx;
15715   /* The bits in this table go:
15716      0: register stride of one (0) or two (1)
15717      1,2: register list length, minus one (1, 2, 3, 4).
15718      3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
15719      We use -1 for invalid entries.  */
15720   const int typetable[] =
15721     {
15722       0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
15723        -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
15724        -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
15725        -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
15726     };
15727   int typebits;
15728 
15729   if (et.type == NT_invtype)
15730     return;
15731 
15732   if (inst.operands[1].immisalign)
15733     switch (inst.operands[1].imm >> 8)
15734       {
15735       case 64: alignbits = 1; break;
15736       case 128:
15737         if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
15738 	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15739           goto bad_alignment;
15740         alignbits = 2;
15741         break;
15742       case 256:
15743         if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15744           goto bad_alignment;
15745         alignbits = 3;
15746         break;
15747       default:
15748       bad_alignment:
15749         first_error (_("bad alignment"));
15750         return;
15751       }
15752 
15753   inst.instruction |= alignbits << 4;
15754   inst.instruction |= neon_logbits (et.size) << 6;
15755 
15756   /* Bits [4:6] of the immediate in a list specifier encode register stride
15757      (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
15758      VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
15759      up the right value for "type" in a table based on this value and the given
15760      list style, then stick it back.  */
15761   idx = ((inst.operands[0].imm >> 4) & 7)
15762         | (((inst.instruction >> 8) & 3) << 3);
15763 
15764   typebits = typetable[idx];
15765 
15766   constraint (typebits == -1, _("bad list type for instruction"));
15767 
15768   inst.instruction &= ~0xf00;
15769   inst.instruction |= typebits << 8;
15770 }
15771 
15772 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
15773    *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
15774    otherwise. The variable arguments are a list of pairs of legal (size, align)
15775    values, terminated with -1.  */
15776 
15777 static int
15778 neon_alignment_bit (int size, int align, int *do_align, ...)
15779 {
15780   va_list ap;
15781   int result = FAIL, thissize, thisalign;
15782 
15783   if (!inst.operands[1].immisalign)
15784     {
15785       *do_align = 0;
15786       return SUCCESS;
15787     }
15788 
15789   va_start (ap, do_align);
15790 
15791   do
15792     {
15793       thissize = va_arg (ap, int);
15794       if (thissize == -1)
15795         break;
15796       thisalign = va_arg (ap, int);
15797 
15798       if (size == thissize && align == thisalign)
15799         result = SUCCESS;
15800     }
15801   while (result != SUCCESS);
15802 
15803   va_end (ap);
15804 
15805   if (result == SUCCESS)
15806     *do_align = 1;
15807   else
15808     first_error (_("unsupported alignment for instruction"));
15809 
15810   return result;
15811 }
15812 
15813 static void
15814 do_neon_ld_st_lane (void)
15815 {
15816   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15817   int align_good, do_align = 0;
15818   int logsize = neon_logbits (et.size);
15819   int align = inst.operands[1].imm >> 8;
15820   int n = (inst.instruction >> 8) & 3;
15821   int max_el = 64 / et.size;
15822 
15823   if (et.type == NT_invtype)
15824     return;
15825 
15826   constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
15827               _("bad list length"));
15828   constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
15829               _("scalar index out of range"));
15830   constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
15831               && et.size == 8,
15832               _("stride of 2 unavailable when element size is 8"));
15833 
15834   switch (n)
15835     {
15836     case 0:  /* VLD1 / VST1.  */
15837       align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
15838                                        32, 32, -1);
15839       if (align_good == FAIL)
15840         return;
15841       if (do_align)
15842         {
15843           unsigned alignbits = 0;
15844           switch (et.size)
15845             {
15846             case 16: alignbits = 0x1; break;
15847             case 32: alignbits = 0x3; break;
15848             default: ;
15849             }
15850           inst.instruction |= alignbits << 4;
15851         }
15852       break;
15853 
15854     case 1:  /* VLD2 / VST2.  */
15855       align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
15856                                        32, 64, -1);
15857       if (align_good == FAIL)
15858         return;
15859       if (do_align)
15860         inst.instruction |= 1 << 4;
15861       break;
15862 
15863     case 2:  /* VLD3 / VST3.  */
15864       constraint (inst.operands[1].immisalign,
15865                   _("can't use alignment with this instruction"));
15866       break;
15867 
15868     case 3:  /* VLD4 / VST4.  */
15869       align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15870                                        16, 64, 32, 64, 32, 128, -1);
15871       if (align_good == FAIL)
15872         return;
15873       if (do_align)
15874         {
15875           unsigned alignbits = 0;
15876           switch (et.size)
15877             {
15878             case 8:  alignbits = 0x1; break;
15879             case 16: alignbits = 0x1; break;
15880             case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
15881             default: ;
15882             }
15883           inst.instruction |= alignbits << 4;
15884         }
15885       break;
15886 
15887     default: ;
15888     }
15889 
15890   /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
15891   if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15892     inst.instruction |= 1 << (4 + logsize);
15893 
15894   inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
15895   inst.instruction |= logsize << 10;
15896 }
15897 
15898 /* Encode single n-element structure to all lanes VLD<n> instructions.  */
15899 
15900 static void
15901 do_neon_ld_dup (void)
15902 {
15903   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15904   int align_good, do_align = 0;
15905 
15906   if (et.type == NT_invtype)
15907     return;
15908 
15909   switch ((inst.instruction >> 8) & 3)
15910     {
15911     case 0:  /* VLD1.  */
15912       gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
15913       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15914                                        &do_align, 16, 16, 32, 32, -1);
15915       if (align_good == FAIL)
15916         return;
15917       switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
15918         {
15919         case 1: break;
15920         case 2: inst.instruction |= 1 << 5; break;
15921         default: first_error (_("bad list length")); return;
15922         }
15923       inst.instruction |= neon_logbits (et.size) << 6;
15924       break;
15925 
15926     case 1:  /* VLD2.  */
15927       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15928                                        &do_align, 8, 16, 16, 32, 32, 64, -1);
15929       if (align_good == FAIL)
15930         return;
15931       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
15932                   _("bad list length"));
15933       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15934         inst.instruction |= 1 << 5;
15935       inst.instruction |= neon_logbits (et.size) << 6;
15936       break;
15937 
15938     case 2:  /* VLD3.  */
15939       constraint (inst.operands[1].immisalign,
15940                   _("can't use alignment with this instruction"));
15941       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
15942                   _("bad list length"));
15943       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15944         inst.instruction |= 1 << 5;
15945       inst.instruction |= neon_logbits (et.size) << 6;
15946       break;
15947 
15948     case 3:  /* VLD4.  */
15949       {
15950         int align = inst.operands[1].imm >> 8;
15951         align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15952                                          16, 64, 32, 64, 32, 128, -1);
15953         if (align_good == FAIL)
15954           return;
15955         constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
15956                     _("bad list length"));
15957         if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15958           inst.instruction |= 1 << 5;
15959         if (et.size == 32 && align == 128)
15960           inst.instruction |= 0x3 << 6;
15961         else
15962           inst.instruction |= neon_logbits (et.size) << 6;
15963       }
15964       break;
15965 
15966     default: ;
15967     }
15968 
15969   inst.instruction |= do_align << 4;
15970 }
15971 
15972 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15973    apart from bits [11:4].  */
15974 
15975 static void
15976 do_neon_ldx_stx (void)
15977 {
15978   if (inst.operands[1].isreg)
15979     constraint (inst.operands[1].reg == REG_PC, BAD_PC);
15980 
15981   switch (NEON_LANE (inst.operands[0].imm))
15982     {
15983     case NEON_INTERLEAVE_LANES:
15984       NEON_ENCODE (INTERLV, inst);
15985       do_neon_ld_st_interleave ();
15986       break;
15987 
15988     case NEON_ALL_LANES:
15989       NEON_ENCODE (DUP, inst);
15990       do_neon_ld_dup ();
15991       break;
15992 
15993     default:
15994       NEON_ENCODE (LANE, inst);
15995       do_neon_ld_st_lane ();
15996     }
15997 
15998   /* L bit comes from bit mask.  */
15999   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16000   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16001   inst.instruction |= inst.operands[1].reg << 16;
16002 
16003   if (inst.operands[1].postind)
16004     {
16005       int postreg = inst.operands[1].imm & 0xf;
16006       constraint (!inst.operands[1].immisreg,
16007                   _("post-index must be a register"));
16008       constraint (postreg == 0xd || postreg == 0xf,
16009                   _("bad register for post-index"));
16010       inst.instruction |= postreg;
16011     }
16012   else if (inst.operands[1].writeback)
16013     {
16014       inst.instruction |= 0xd;
16015     }
16016   else
16017     inst.instruction |= 0xf;
16018 
16019   if (thumb_mode)
16020     inst.instruction |= 0xf9000000;
16021   else
16022     inst.instruction |= 0xf4000000;
16023 }
16024 
16025 /* FP v8.  */
16026 static void
16027 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16028 {
16029   NEON_ENCODE (FPV8, inst);
16030 
16031   if (rs == NS_FFF)
16032     do_vfp_sp_dyadic ();
16033   else
16034     do_vfp_dp_rd_rn_rm ();
16035 
16036   if (rs == NS_DDD)
16037     inst.instruction |= 0x100;
16038 
16039   inst.instruction |= 0xf0000000;
16040 }
16041 
16042 static void
16043 do_vsel (void)
16044 {
16045   set_it_insn_type (OUTSIDE_IT_INSN);
16046 
16047   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16048     first_error (_("invalid instruction shape"));
16049 }
16050 
16051 static void
16052 do_vmaxnm (void)
16053 {
16054   set_it_insn_type (OUTSIDE_IT_INSN);
16055 
16056   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16057     return;
16058 
16059   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16060     return;
16061 
16062   neon_dyadic_misc (NT_untyped, N_F32, 0);
16063 }
16064 
16065 static void
16066 do_vrint_1 (enum neon_cvt_mode mode)
16067 {
16068   enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16069   struct neon_type_el et;
16070 
16071   if (rs == NS_NULL)
16072     return;
16073 
16074   et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16075   if (et.type != NT_invtype)
16076     {
16077       /* VFP encodings.  */
16078       if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16079 	  || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16080 	set_it_insn_type (OUTSIDE_IT_INSN);
16081 
16082       NEON_ENCODE (FPV8, inst);
16083       if (rs == NS_FF)
16084 	do_vfp_sp_monadic ();
16085       else
16086 	do_vfp_dp_rd_rm ();
16087 
16088       switch (mode)
16089 	{
16090 	case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16091 	case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16092 	case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16093 	case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16094 	case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16095 	case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16096 	case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16097 	default: abort ();
16098 	}
16099 
16100       inst.instruction |= (rs == NS_DD) << 8;
16101       do_vfp_cond_or_thumb ();
16102     }
16103   else
16104     {
16105       /* Neon encodings (or something broken...).  */
16106       inst.error = NULL;
16107       et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16108 
16109       if (et.type == NT_invtype)
16110 	return;
16111 
16112       set_it_insn_type (OUTSIDE_IT_INSN);
16113       NEON_ENCODE (FLOAT, inst);
16114 
16115       if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16116 	return;
16117 
16118       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16119       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16120       inst.instruction |= LOW4 (inst.operands[1].reg);
16121       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16122       inst.instruction |= neon_quad (rs) << 6;
16123       switch (mode)
16124 	{
16125 	case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16126 	case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16127 	case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16128 	case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16129 	case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16130 	case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16131 	case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16132 	default: abort ();
16133 	}
16134 
16135       if (thumb_mode)
16136 	inst.instruction |= 0xfc000000;
16137       else
16138 	inst.instruction |= 0xf0000000;
16139     }
16140 }
16141 
16142 static void
16143 do_vrintx (void)
16144 {
16145   do_vrint_1 (neon_cvt_mode_x);
16146 }
16147 
16148 static void
16149 do_vrintz (void)
16150 {
16151   do_vrint_1 (neon_cvt_mode_z);
16152 }
16153 
16154 static void
16155 do_vrintr (void)
16156 {
16157   do_vrint_1 (neon_cvt_mode_r);
16158 }
16159 
16160 static void
16161 do_vrinta (void)
16162 {
16163   do_vrint_1 (neon_cvt_mode_a);
16164 }
16165 
16166 static void
16167 do_vrintn (void)
16168 {
16169   do_vrint_1 (neon_cvt_mode_n);
16170 }
16171 
16172 static void
16173 do_vrintp (void)
16174 {
16175   do_vrint_1 (neon_cvt_mode_p);
16176 }
16177 
16178 static void
16179 do_vrintm (void)
16180 {
16181   do_vrint_1 (neon_cvt_mode_m);
16182 }
16183 
16184 /* Crypto v1 instructions.  */
16185 static void
16186 do_crypto_2op_1 (unsigned elttype, int op)
16187 {
16188   set_it_insn_type (OUTSIDE_IT_INSN);
16189 
16190   if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16191       == NT_invtype)
16192     return;
16193 
16194   inst.error = NULL;
16195 
16196   NEON_ENCODE (INTEGER, inst);
16197   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16198   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16199   inst.instruction |= LOW4 (inst.operands[1].reg);
16200   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16201   if (op != -1)
16202     inst.instruction |= op << 6;
16203 
16204   if (thumb_mode)
16205     inst.instruction |= 0xfc000000;
16206   else
16207     inst.instruction |= 0xf0000000;
16208 }
16209 
16210 static void
16211 do_crypto_3op_1 (int u, int op)
16212 {
16213   set_it_insn_type (OUTSIDE_IT_INSN);
16214 
16215   if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16216 		       N_32 | N_UNT | N_KEY).type == NT_invtype)
16217     return;
16218 
16219   inst.error = NULL;
16220 
16221   NEON_ENCODE (INTEGER, inst);
16222   neon_three_same (1, u, 8 << op);
16223 }
16224 
16225 static void
16226 do_aese (void)
16227 {
16228   do_crypto_2op_1 (N_8, 0);
16229 }
16230 
16231 static void
16232 do_aesd (void)
16233 {
16234   do_crypto_2op_1 (N_8, 1);
16235 }
16236 
16237 static void
16238 do_aesmc (void)
16239 {
16240   do_crypto_2op_1 (N_8, 2);
16241 }
16242 
16243 static void
16244 do_aesimc (void)
16245 {
16246   do_crypto_2op_1 (N_8, 3);
16247 }
16248 
16249 static void
16250 do_sha1c (void)
16251 {
16252   do_crypto_3op_1 (0, 0);
16253 }
16254 
16255 static void
16256 do_sha1p (void)
16257 {
16258   do_crypto_3op_1 (0, 1);
16259 }
16260 
16261 static void
16262 do_sha1m (void)
16263 {
16264   do_crypto_3op_1 (0, 2);
16265 }
16266 
16267 static void
16268 do_sha1su0 (void)
16269 {
16270   do_crypto_3op_1 (0, 3);
16271 }
16272 
16273 static void
16274 do_sha256h (void)
16275 {
16276   do_crypto_3op_1 (1, 0);
16277 }
16278 
16279 static void
16280 do_sha256h2 (void)
16281 {
16282   do_crypto_3op_1 (1, 1);
16283 }
16284 
16285 static void
16286 do_sha256su1 (void)
16287 {
16288   do_crypto_3op_1 (1, 2);
16289 }
16290 
16291 static void
16292 do_sha1h (void)
16293 {
16294   do_crypto_2op_1 (N_32, -1);
16295 }
16296 
16297 static void
16298 do_sha1su1 (void)
16299 {
16300   do_crypto_2op_1 (N_32, 0);
16301 }
16302 
16303 static void
16304 do_sha256su0 (void)
16305 {
16306   do_crypto_2op_1 (N_32, 1);
16307 }
16308 
16309 /* Overall per-instruction processing.	*/
16310 
16311 /* We need to be able to fix up arbitrary expressions in some statements.
16312    This is so that we can handle symbols that are an arbitrary distance from
16313    the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16314    which returns part of an address in a form which will be valid for
16315    a data instruction.	We do this by pushing the expression into a symbol
16316    in the expr_section, and creating a fix for that.  */
16317 
16318 static void
16319 fix_new_arm (fragS *	   frag,
16320 	     int	   where,
16321 	     short int	   size,
16322 	     expressionS * exp,
16323 	     int	   pc_rel,
16324 	     int	   reloc)
16325 {
16326   fixS *	   new_fix;
16327 
16328   switch (exp->X_op)
16329     {
16330     case O_constant:
16331       if (pc_rel)
16332 	{
16333 	  /* Create an absolute valued symbol, so we have something to
16334              refer to in the object file.  Unfortunately for us, gas's
16335              generic expression parsing will already have folded out
16336              any use of .set foo/.type foo %function that may have
16337              been used to set type information of the target location,
16338              that's being specified symbolically.  We have to presume
16339              the user knows what they are doing.  */
16340 	  char name[16 + 8];
16341 	  symbolS *symbol;
16342 
16343 	  sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
16344 
16345 	  symbol = symbol_find_or_make (name);
16346 	  S_SET_SEGMENT (symbol, absolute_section);
16347 	  symbol_set_frag (symbol, &zero_address_frag);
16348 	  S_SET_VALUE (symbol, exp->X_add_number);
16349 	  exp->X_op = O_symbol;
16350 	  exp->X_add_symbol = symbol;
16351 	  exp->X_add_number = 0;
16352 	}
16353       /* FALLTHROUGH */
16354     case O_symbol:
16355     case O_add:
16356     case O_subtract:
16357       new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
16358                              (enum bfd_reloc_code_real) reloc);
16359       break;
16360 
16361     default:
16362       new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
16363                                   pc_rel, (enum bfd_reloc_code_real) reloc);
16364       break;
16365     }
16366 
16367   /* Mark whether the fix is to a THUMB instruction, or an ARM
16368      instruction.  */
16369   new_fix->tc_fix_data = thumb_mode;
16370 }
16371 
16372 /* Create a frg for an instruction requiring relaxation.  */
16373 static void
16374 output_relax_insn (void)
16375 {
16376   char * to;
16377   symbolS *sym;
16378   int offset;
16379 
16380   /* The size of the instruction is unknown, so tie the debug info to the
16381      start of the instruction.  */
16382   dwarf2_emit_insn (0);
16383 
16384   switch (inst.reloc.exp.X_op)
16385     {
16386     case O_symbol:
16387       sym = inst.reloc.exp.X_add_symbol;
16388       offset = inst.reloc.exp.X_add_number;
16389       break;
16390     case O_constant:
16391       sym = NULL;
16392       offset = inst.reloc.exp.X_add_number;
16393       break;
16394     default:
16395       sym = make_expr_symbol (&inst.reloc.exp);
16396       offset = 0;
16397       break;
16398   }
16399   to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
16400 		 inst.relax, sym, offset, NULL/*offset, opcode*/);
16401   md_number_to_chars (to, inst.instruction, THUMB_SIZE);
16402 }
16403 
16404 /* Write a 32-bit thumb instruction to buf.  */
16405 static void
16406 put_thumb32_insn (char * buf, unsigned long insn)
16407 {
16408   md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
16409   md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
16410 }
16411 
16412 static void
16413 output_inst (const char * str)
16414 {
16415   char * to = NULL;
16416 
16417   if (inst.error)
16418     {
16419       as_bad ("%s -- `%s'", inst.error, str);
16420       return;
16421     }
16422   if (inst.relax)
16423     {
16424       output_relax_insn ();
16425       return;
16426     }
16427   if (inst.size == 0)
16428     return;
16429 
16430   to = frag_more (inst.size);
16431   /* PR 9814: Record the thumb mode into the current frag so that we know
16432      what type of NOP padding to use, if necessary.  We override any previous
16433      setting so that if the mode has changed then the NOPS that we use will
16434      match the encoding of the last instruction in the frag.  */
16435   frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
16436 
16437   if (thumb_mode && (inst.size > THUMB_SIZE))
16438     {
16439       gas_assert (inst.size == (2 * THUMB_SIZE));
16440       put_thumb32_insn (to, inst.instruction);
16441     }
16442   else if (inst.size > INSN_SIZE)
16443     {
16444       gas_assert (inst.size == (2 * INSN_SIZE));
16445       md_number_to_chars (to, inst.instruction, INSN_SIZE);
16446       md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
16447     }
16448   else
16449     md_number_to_chars (to, inst.instruction, inst.size);
16450 
16451   if (inst.reloc.type != BFD_RELOC_UNUSED)
16452     fix_new_arm (frag_now, to - frag_now->fr_literal,
16453 		 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
16454 		 inst.reloc.type);
16455 
16456   dwarf2_emit_insn (inst.size);
16457 }
16458 
16459 static char *
16460 output_it_inst (int cond, int mask, char * to)
16461 {
16462   unsigned long instruction = 0xbf00;
16463 
16464   mask &= 0xf;
16465   instruction |= mask;
16466   instruction |= cond << 4;
16467 
16468   if (to == NULL)
16469     {
16470       to = frag_more (2);
16471 #ifdef OBJ_ELF
16472       dwarf2_emit_insn (2);
16473 #endif
16474     }
16475 
16476   md_number_to_chars (to, instruction, 2);
16477 
16478   return to;
16479 }
16480 
16481 /* Tag values used in struct asm_opcode's tag field.  */
16482 enum opcode_tag
16483 {
16484   OT_unconditional,	/* Instruction cannot be conditionalized.
16485 			   The ARM condition field is still 0xE.  */
16486   OT_unconditionalF,	/* Instruction cannot be conditionalized
16487 			   and carries 0xF in its ARM condition field.  */
16488   OT_csuffix,		/* Instruction takes a conditional suffix.  */
16489   OT_csuffixF,		/* Some forms of the instruction take a conditional
16490                            suffix, others place 0xF where the condition field
16491                            would be.  */
16492   OT_cinfix3,		/* Instruction takes a conditional infix,
16493 			   beginning at character index 3.  (In
16494 			   unified mode, it becomes a suffix.)  */
16495   OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
16496 			    tsts, cmps, cmns, and teqs. */
16497   OT_cinfix3_legacy,	/* Legacy instruction takes a conditional infix at
16498 			   character index 3, even in unified mode.  Used for
16499 			   legacy instructions where suffix and infix forms
16500 			   may be ambiguous.  */
16501   OT_csuf_or_in3,	/* Instruction takes either a conditional
16502 			   suffix or an infix at character index 3.  */
16503   OT_odd_infix_unc,	/* This is the unconditional variant of an
16504 			   instruction that takes a conditional infix
16505 			   at an unusual position.  In unified mode,
16506 			   this variant will accept a suffix.  */
16507   OT_odd_infix_0	/* Values greater than or equal to OT_odd_infix_0
16508 			   are the conditional variants of instructions that
16509 			   take conditional infixes in unusual positions.
16510 			   The infix appears at character index
16511 			   (tag - OT_odd_infix_0).  These are not accepted
16512 			   in unified mode.  */
16513 };
16514 
16515 /* Subroutine of md_assemble, responsible for looking up the primary
16516    opcode from the mnemonic the user wrote.  STR points to the
16517    beginning of the mnemonic.
16518 
16519    This is not simply a hash table lookup, because of conditional
16520    variants.  Most instructions have conditional variants, which are
16521    expressed with a _conditional affix_ to the mnemonic.  If we were
16522    to encode each conditional variant as a literal string in the opcode
16523    table, it would have approximately 20,000 entries.
16524 
16525    Most mnemonics take this affix as a suffix, and in unified syntax,
16526    'most' is upgraded to 'all'.  However, in the divided syntax, some
16527    instructions take the affix as an infix, notably the s-variants of
16528    the arithmetic instructions.  Of those instructions, all but six
16529    have the infix appear after the third character of the mnemonic.
16530 
16531    Accordingly, the algorithm for looking up primary opcodes given
16532    an identifier is:
16533 
16534    1. Look up the identifier in the opcode table.
16535       If we find a match, go to step U.
16536 
16537    2. Look up the last two characters of the identifier in the
16538       conditions table.  If we find a match, look up the first N-2
16539       characters of the identifier in the opcode table.  If we
16540       find a match, go to step CE.
16541 
16542    3. Look up the fourth and fifth characters of the identifier in
16543       the conditions table.  If we find a match, extract those
16544       characters from the identifier, and look up the remaining
16545       characters in the opcode table.  If we find a match, go
16546       to step CM.
16547 
16548    4. Fail.
16549 
16550    U. Examine the tag field of the opcode structure, in case this is
16551       one of the six instructions with its conditional infix in an
16552       unusual place.  If it is, the tag tells us where to find the
16553       infix; look it up in the conditions table and set inst.cond
16554       accordingly.  Otherwise, this is an unconditional instruction.
16555       Again set inst.cond accordingly.  Return the opcode structure.
16556 
16557   CE. Examine the tag field to make sure this is an instruction that
16558       should receive a conditional suffix.  If it is not, fail.
16559       Otherwise, set inst.cond from the suffix we already looked up,
16560       and return the opcode structure.
16561 
16562   CM. Examine the tag field to make sure this is an instruction that
16563       should receive a conditional infix after the third character.
16564       If it is not, fail.  Otherwise, undo the edits to the current
16565       line of input and proceed as for case CE.  */
16566 
16567 static const struct asm_opcode *
16568 opcode_lookup (char **str)
16569 {
16570   char *end, *base;
16571   char *affix;
16572   const struct asm_opcode *opcode;
16573   const struct asm_cond *cond;
16574   char save[2];
16575 
16576   /* Scan up to the end of the mnemonic, which must end in white space,
16577      '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
16578   for (base = end = *str; *end != '\0'; end++)
16579     if (*end == ' ' || *end == '.')
16580       break;
16581 
16582   if (end == base)
16583     return NULL;
16584 
16585   /* Handle a possible width suffix and/or Neon type suffix.  */
16586   if (end[0] == '.')
16587     {
16588       int offset = 2;
16589 
16590       /* The .w and .n suffixes are only valid if the unified syntax is in
16591          use.  */
16592       if (unified_syntax && end[1] == 'w')
16593 	inst.size_req = 4;
16594       else if (unified_syntax && end[1] == 'n')
16595 	inst.size_req = 2;
16596       else
16597         offset = 0;
16598 
16599       inst.vectype.elems = 0;
16600 
16601       *str = end + offset;
16602 
16603       if (end[offset] == '.')
16604 	{
16605 	  /* See if we have a Neon type suffix (possible in either unified or
16606              non-unified ARM syntax mode).  */
16607           if (parse_neon_type (&inst.vectype, str) == FAIL)
16608 	    return NULL;
16609         }
16610       else if (end[offset] != '\0' && end[offset] != ' ')
16611         return NULL;
16612     }
16613   else
16614     *str = end;
16615 
16616   /* Look for unaffixed or special-case affixed mnemonic.  */
16617   opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16618                                                     end - base);
16619   if (opcode)
16620     {
16621       /* step U */
16622       if (opcode->tag < OT_odd_infix_0)
16623 	{
16624 	  inst.cond = COND_ALWAYS;
16625 	  return opcode;
16626 	}
16627 
16628       if (warn_on_deprecated && unified_syntax)
16629 	as_warn (_("conditional infixes are deprecated in unified syntax"));
16630       affix = base + (opcode->tag - OT_odd_infix_0);
16631       cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16632       gas_assert (cond);
16633 
16634       inst.cond = cond->value;
16635       return opcode;
16636     }
16637 
16638   /* Cannot have a conditional suffix on a mnemonic of less than two
16639      characters.  */
16640   if (end - base < 3)
16641     return NULL;
16642 
16643   /* Look for suffixed mnemonic.  */
16644   affix = end - 2;
16645   cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16646   opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16647                                                     affix - base);
16648   if (opcode && cond)
16649     {
16650       /* step CE */
16651       switch (opcode->tag)
16652 	{
16653 	case OT_cinfix3_legacy:
16654 	  /* Ignore conditional suffixes matched on infix only mnemonics.  */
16655 	  break;
16656 
16657 	case OT_cinfix3:
16658 	case OT_cinfix3_deprecated:
16659 	case OT_odd_infix_unc:
16660 	  if (!unified_syntax)
16661 	    return 0;
16662 	  /* else fall through */
16663 
16664 	case OT_csuffix:
16665         case OT_csuffixF:
16666 	case OT_csuf_or_in3:
16667 	  inst.cond = cond->value;
16668 	  return opcode;
16669 
16670 	case OT_unconditional:
16671 	case OT_unconditionalF:
16672 	  if (thumb_mode)
16673 	    inst.cond = cond->value;
16674 	  else
16675 	    {
16676 	      /* Delayed diagnostic.  */
16677 	      inst.error = BAD_COND;
16678 	      inst.cond = COND_ALWAYS;
16679 	    }
16680 	  return opcode;
16681 
16682 	default:
16683 	  return NULL;
16684 	}
16685     }
16686 
16687   /* Cannot have a usual-position infix on a mnemonic of less than
16688      six characters (five would be a suffix).  */
16689   if (end - base < 6)
16690     return NULL;
16691 
16692   /* Look for infixed mnemonic in the usual position.  */
16693   affix = base + 3;
16694   cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16695   if (!cond)
16696     return NULL;
16697 
16698   memcpy (save, affix, 2);
16699   memmove (affix, affix + 2, (end - affix) - 2);
16700   opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16701                                                     (end - base) - 2);
16702   memmove (affix + 2, affix, (end - affix) - 2);
16703   memcpy (affix, save, 2);
16704 
16705   if (opcode
16706       && (opcode->tag == OT_cinfix3
16707 	  || opcode->tag == OT_cinfix3_deprecated
16708 	  || opcode->tag == OT_csuf_or_in3
16709 	  || opcode->tag == OT_cinfix3_legacy))
16710     {
16711       /* Step CM.  */
16712       if (warn_on_deprecated && unified_syntax
16713 	  && (opcode->tag == OT_cinfix3
16714 	      || opcode->tag == OT_cinfix3_deprecated))
16715 	as_warn (_("conditional infixes are deprecated in unified syntax"));
16716 
16717       inst.cond = cond->value;
16718       return opcode;
16719     }
16720 
16721   return NULL;
16722 }
16723 
16724 /* This function generates an initial IT instruction, leaving its block
16725    virtually open for the new instructions. Eventually,
16726    the mask will be updated by now_it_add_mask () each time
16727    a new instruction needs to be included in the IT block.
16728    Finally, the block is closed with close_automatic_it_block ().
16729    The block closure can be requested either from md_assemble (),
16730    a tencode (), or due to a label hook.  */
16731 
16732 static void
16733 new_automatic_it_block (int cond)
16734 {
16735   now_it.state = AUTOMATIC_IT_BLOCK;
16736   now_it.mask = 0x18;
16737   now_it.cc = cond;
16738   now_it.block_length = 1;
16739   mapping_state (MAP_THUMB);
16740   now_it.insn = output_it_inst (cond, now_it.mask, NULL);
16741   now_it.warn_deprecated = FALSE;
16742   now_it.insn_cond = TRUE;
16743 }
16744 
16745 /* Close an automatic IT block.
16746    See comments in new_automatic_it_block ().  */
16747 
16748 static void
16749 close_automatic_it_block (void)
16750 {
16751   now_it.mask = 0x10;
16752   now_it.block_length = 0;
16753 }
16754 
16755 /* Update the mask of the current automatically-generated IT
16756    instruction. See comments in new_automatic_it_block ().  */
16757 
16758 static void
16759 now_it_add_mask (int cond)
16760 {
16761 #define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
16762 #define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
16763                                               | ((bitvalue) << (nbit)))
16764   const int resulting_bit = (cond & 1);
16765 
16766   now_it.mask &= 0xf;
16767   now_it.mask = SET_BIT_VALUE (now_it.mask,
16768                                    resulting_bit,
16769                                   (5 - now_it.block_length));
16770   now_it.mask = SET_BIT_VALUE (now_it.mask,
16771                                    1,
16772                                    ((5 - now_it.block_length) - 1) );
16773   output_it_inst (now_it.cc, now_it.mask, now_it.insn);
16774 
16775 #undef CLEAR_BIT
16776 #undef SET_BIT_VALUE
16777 }
16778 
16779 /* The IT blocks handling machinery is accessed through the these functions:
16780      it_fsm_pre_encode ()               from md_assemble ()
16781      set_it_insn_type ()                optional, from the tencode functions
16782      set_it_insn_type_last ()           ditto
16783      in_it_block ()                     ditto
16784      it_fsm_post_encode ()              from md_assemble ()
16785      force_automatic_it_block_close ()  from label habdling functions
16786 
16787    Rationale:
16788      1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
16789         initializing the IT insn type with a generic initial value depending
16790         on the inst.condition.
16791      2) During the tencode function, two things may happen:
16792         a) The tencode function overrides the IT insn type by
16793            calling either set_it_insn_type (type) or set_it_insn_type_last ().
16794         b) The tencode function queries the IT block state by
16795            calling in_it_block () (i.e. to determine narrow/not narrow mode).
16796 
16797         Both set_it_insn_type and in_it_block run the internal FSM state
16798         handling function (handle_it_state), because: a) setting the IT insn
16799         type may incur in an invalid state (exiting the function),
16800         and b) querying the state requires the FSM to be updated.
16801         Specifically we want to avoid creating an IT block for conditional
16802         branches, so it_fsm_pre_encode is actually a guess and we can't
16803         determine whether an IT block is required until the tencode () routine
16804         has decided what type of instruction this actually it.
16805         Because of this, if set_it_insn_type and in_it_block have to be used,
16806         set_it_insn_type has to be called first.
16807 
16808         set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
16809         determines the insn IT type depending on the inst.cond code.
16810         When a tencode () routine encodes an instruction that can be
16811         either outside an IT block, or, in the case of being inside, has to be
16812         the last one, set_it_insn_type_last () will determine the proper
16813         IT instruction type based on the inst.cond code. Otherwise,
16814         set_it_insn_type can be called for overriding that logic or
16815         for covering other cases.
16816 
16817         Calling handle_it_state () may not transition the IT block state to
16818         OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
16819         still queried. Instead, if the FSM determines that the state should
16820         be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
16821         after the tencode () function: that's what it_fsm_post_encode () does.
16822 
16823         Since in_it_block () calls the state handling function to get an
16824         updated state, an error may occur (due to invalid insns combination).
16825         In that case, inst.error is set.
16826         Therefore, inst.error has to be checked after the execution of
16827         the tencode () routine.
16828 
16829      3) Back in md_assemble(), it_fsm_post_encode () is called to commit
16830         any pending state change (if any) that didn't take place in
16831         handle_it_state () as explained above.  */
16832 
16833 static void
16834 it_fsm_pre_encode (void)
16835 {
16836   if (inst.cond != COND_ALWAYS)
16837     inst.it_insn_type = INSIDE_IT_INSN;
16838   else
16839     inst.it_insn_type = OUTSIDE_IT_INSN;
16840 
16841   now_it.state_handled = 0;
16842 }
16843 
16844 /* IT state FSM handling function.  */
16845 
16846 static int
16847 handle_it_state (void)
16848 {
16849   now_it.state_handled = 1;
16850   now_it.insn_cond = FALSE;
16851 
16852   switch (now_it.state)
16853     {
16854     case OUTSIDE_IT_BLOCK:
16855       switch (inst.it_insn_type)
16856 	{
16857 	case OUTSIDE_IT_INSN:
16858 	  break;
16859 
16860 	case INSIDE_IT_INSN:
16861 	case INSIDE_IT_LAST_INSN:
16862 	  if (thumb_mode == 0)
16863 	    {
16864 	      if (unified_syntax
16865 		  && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
16866 		as_tsktsk (_("Warning: conditional outside an IT block"\
16867 			     " for Thumb."));
16868 	    }
16869 	  else
16870 	    {
16871 	      if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
16872 		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
16873 		{
16874 		  /* Automatically generate the IT instruction.  */
16875 		  new_automatic_it_block (inst.cond);
16876 		  if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
16877 		    close_automatic_it_block ();
16878 		}
16879 	      else
16880 		{
16881 		  inst.error = BAD_OUT_IT;
16882 		  return FAIL;
16883 		}
16884 	    }
16885 	  break;
16886 
16887 	case IF_INSIDE_IT_LAST_INSN:
16888 	case NEUTRAL_IT_INSN:
16889 	  break;
16890 
16891 	case IT_INSN:
16892 	  now_it.state = MANUAL_IT_BLOCK;
16893 	  now_it.block_length = 0;
16894 	  break;
16895 	}
16896       break;
16897 
16898     case AUTOMATIC_IT_BLOCK:
16899       /* Three things may happen now:
16900 	 a) We should increment current it block size;
16901 	 b) We should close current it block (closing insn or 4 insns);
16902 	 c) We should close current it block and start a new one (due
16903 	 to incompatible conditions or
16904 	 4 insns-length block reached).  */
16905 
16906       switch (inst.it_insn_type)
16907 	{
16908 	case OUTSIDE_IT_INSN:
16909 	  /* The closure of the block shall happen immediatelly,
16910 	     so any in_it_block () call reports the block as closed.  */
16911 	  force_automatic_it_block_close ();
16912 	  break;
16913 
16914 	case INSIDE_IT_INSN:
16915 	case INSIDE_IT_LAST_INSN:
16916 	case IF_INSIDE_IT_LAST_INSN:
16917 	  now_it.block_length++;
16918 
16919 	  if (now_it.block_length > 4
16920 	      || !now_it_compatible (inst.cond))
16921 	    {
16922 	      force_automatic_it_block_close ();
16923 	      if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
16924 		new_automatic_it_block (inst.cond);
16925 	    }
16926 	  else
16927 	    {
16928 	      now_it.insn_cond = TRUE;
16929 	      now_it_add_mask (inst.cond);
16930 	    }
16931 
16932 	  if (now_it.state == AUTOMATIC_IT_BLOCK
16933 	      && (inst.it_insn_type == INSIDE_IT_LAST_INSN
16934 		  || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
16935 	    close_automatic_it_block ();
16936 	  break;
16937 
16938 	case NEUTRAL_IT_INSN:
16939 	  now_it.block_length++;
16940 	  now_it.insn_cond = TRUE;
16941 
16942 	  if (now_it.block_length > 4)
16943 	    force_automatic_it_block_close ();
16944 	  else
16945 	    now_it_add_mask (now_it.cc & 1);
16946 	  break;
16947 
16948 	case IT_INSN:
16949 	  close_automatic_it_block ();
16950 	  now_it.state = MANUAL_IT_BLOCK;
16951 	  break;
16952 	}
16953       break;
16954 
16955     case MANUAL_IT_BLOCK:
16956       {
16957 	/* Check conditional suffixes.  */
16958 	const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
16959 	int is_last;
16960 	now_it.mask <<= 1;
16961 	now_it.mask &= 0x1f;
16962 	is_last = (now_it.mask == 0x10);
16963 	now_it.insn_cond = TRUE;
16964 
16965 	switch (inst.it_insn_type)
16966 	  {
16967 	  case OUTSIDE_IT_INSN:
16968 	    inst.error = BAD_NOT_IT;
16969 	    return FAIL;
16970 
16971 	  case INSIDE_IT_INSN:
16972 	    if (cond != inst.cond)
16973 	      {
16974 		inst.error = BAD_IT_COND;
16975 		return FAIL;
16976 	      }
16977 	    break;
16978 
16979 	  case INSIDE_IT_LAST_INSN:
16980 	  case IF_INSIDE_IT_LAST_INSN:
16981 	    if (cond != inst.cond)
16982 	      {
16983 		inst.error = BAD_IT_COND;
16984 		return FAIL;
16985 	      }
16986 	    if (!is_last)
16987 	      {
16988 		inst.error = BAD_BRANCH;
16989 		return FAIL;
16990 	      }
16991 	    break;
16992 
16993 	  case NEUTRAL_IT_INSN:
16994 	    /* The BKPT instruction is unconditional even in an IT block.  */
16995 	    break;
16996 
16997 	  case IT_INSN:
16998 	    inst.error = BAD_IT_IT;
16999 	    return FAIL;
17000 	  }
17001       }
17002       break;
17003     }
17004 
17005   return SUCCESS;
17006 }
17007 
17008 struct depr_insn_mask
17009 {
17010   unsigned long pattern;
17011   unsigned long mask;
17012   const char* description;
17013 };
17014 
17015 /* List of 16-bit instruction patterns deprecated in an IT block in
17016    ARMv8.  */
17017 static const struct depr_insn_mask depr_it_insns[] = {
17018   { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17019   { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17020   { 0xa000, 0xb800, N_("ADR") },
17021   { 0x4800, 0xf800, N_("Literal loads") },
17022   { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17023   { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17024   { 0, 0, NULL }
17025 };
17026 
17027 static void
17028 it_fsm_post_encode (void)
17029 {
17030   int is_last;
17031 
17032   if (!now_it.state_handled)
17033     handle_it_state ();
17034 
17035   if (now_it.insn_cond
17036       && !now_it.warn_deprecated
17037       && warn_on_deprecated
17038       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17039     {
17040       if (inst.instruction >= 0x10000)
17041 	{
17042 	  as_warn (_("it blocks containing wide Thumb instructions are "
17043 		     "deprecated in ARMv8"));
17044 	  now_it.warn_deprecated = TRUE;
17045 	}
17046       else
17047 	{
17048 	  const struct depr_insn_mask *p = depr_it_insns;
17049 
17050 	  while (p->mask != 0)
17051 	    {
17052 	      if ((inst.instruction & p->mask) == p->pattern)
17053 		{
17054 		  as_warn (_("it blocks containing 16-bit Thumb intsructions "
17055 			     "of the following class are deprecated in ARMv8: "
17056 			     "%s"), p->description);
17057 		  now_it.warn_deprecated = TRUE;
17058 		  break;
17059 		}
17060 
17061 	      ++p;
17062 	    }
17063 	}
17064 
17065       if (now_it.block_length > 1)
17066 	{
17067 	  as_warn (_("it blocks of more than one conditional instruction are "
17068 		     "deprecated in ARMv8"));
17069 	  now_it.warn_deprecated = TRUE;
17070 	}
17071     }
17072 
17073   is_last = (now_it.mask == 0x10);
17074   if (is_last)
17075     {
17076       now_it.state = OUTSIDE_IT_BLOCK;
17077       now_it.mask = 0;
17078     }
17079 }
17080 
17081 static void
17082 force_automatic_it_block_close (void)
17083 {
17084   if (now_it.state == AUTOMATIC_IT_BLOCK)
17085     {
17086       close_automatic_it_block ();
17087       now_it.state = OUTSIDE_IT_BLOCK;
17088       now_it.mask = 0;
17089     }
17090 }
17091 
17092 static int
17093 in_it_block (void)
17094 {
17095   if (!now_it.state_handled)
17096     handle_it_state ();
17097 
17098   return now_it.state != OUTSIDE_IT_BLOCK;
17099 }
17100 
17101 void
17102 md_assemble (char *str)
17103 {
17104   char *p = str;
17105   const struct asm_opcode * opcode;
17106 
17107   /* Align the previous label if needed.  */
17108   if (last_label_seen != NULL)
17109     {
17110       symbol_set_frag (last_label_seen, frag_now);
17111       S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17112       S_SET_SEGMENT (last_label_seen, now_seg);
17113     }
17114 
17115   memset (&inst, '\0', sizeof (inst));
17116   inst.reloc.type = BFD_RELOC_UNUSED;
17117 
17118   opcode = opcode_lookup (&p);
17119   if (!opcode)
17120     {
17121       /* It wasn't an instruction, but it might be a register alias of
17122 	 the form alias .req reg, or a Neon .dn/.qn directive.  */
17123       if (! create_register_alias (str, p)
17124           && ! create_neon_reg_alias (str, p))
17125 	as_bad (_("bad instruction `%s'"), str);
17126 
17127       return;
17128     }
17129 
17130   if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17131     as_warn (_("s suffix on comparison instruction is deprecated"));
17132 
17133   /* The value which unconditional instructions should have in place of the
17134      condition field.  */
17135   inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17136 
17137   if (thumb_mode)
17138     {
17139       arm_feature_set variant;
17140 
17141       variant = cpu_variant;
17142       /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
17143       if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17144 	ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17145       /* Check that this instruction is supported for this CPU.  */
17146       if (!opcode->tvariant
17147 	  || (thumb_mode == 1
17148 	      && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17149 	{
17150 	  as_bad (_("selected processor does not support Thumb mode `%s'"), str);
17151 	  return;
17152 	}
17153       if (inst.cond != COND_ALWAYS && !unified_syntax
17154 	  && opcode->tencode != do_t_branch)
17155 	{
17156 	  as_bad (_("Thumb does not support conditional execution"));
17157 	  return;
17158 	}
17159 
17160       if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17161 	{
17162 	  if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17163 	      && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17164 		   || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17165 	    {
17166 	      /* Two things are addressed here.
17167 		 1) Implicit require narrow instructions on Thumb-1.
17168 		    This avoids relaxation accidentally introducing Thumb-2
17169 		     instructions.
17170 		 2) Reject wide instructions in non Thumb-2 cores.  */
17171 	      if (inst.size_req == 0)
17172 		inst.size_req = 2;
17173 	      else if (inst.size_req == 4)
17174 		{
17175 		  as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
17176 		  return;
17177 		}
17178 	    }
17179 	}
17180 
17181       inst.instruction = opcode->tvalue;
17182 
17183       if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17184         {
17185           /* Prepare the it_insn_type for those encodings that don't set
17186              it.  */
17187           it_fsm_pre_encode ();
17188 
17189           opcode->tencode ();
17190 
17191           it_fsm_post_encode ();
17192         }
17193 
17194       if (!(inst.error || inst.relax))
17195 	{
17196 	  gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17197 	  inst.size = (inst.instruction > 0xffff ? 4 : 2);
17198 	  if (inst.size_req && inst.size_req != inst.size)
17199 	    {
17200 	      as_bad (_("cannot honor width suffix -- `%s'"), str);
17201 	      return;
17202 	    }
17203 	}
17204 
17205       /* Something has gone badly wrong if we try to relax a fixed size
17206          instruction.  */
17207       gas_assert (inst.size_req == 0 || !inst.relax);
17208 
17209       ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17210 			      *opcode->tvariant);
17211       /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17212 	 set those bits when Thumb-2 32-bit instructions are seen.  ie.
17213 	 anything other than bl/blx and v6-M instructions.
17214 	 This is overly pessimistic for relaxable instructions.  */
17215       if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17216 	   || inst.relax)
17217 	  && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17218 	       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17219 	ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17220 				arm_ext_v6t2);
17221 
17222       check_neon_suffixes;
17223 
17224       if (!inst.error)
17225 	{
17226 	  mapping_state (MAP_THUMB);
17227 	}
17228     }
17229   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17230     {
17231       bfd_boolean is_bx;
17232 
17233       /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
17234       is_bx = (opcode->aencode == do_bx);
17235 
17236       /* Check that this instruction is supported for this CPU.  */
17237       if (!(is_bx && fix_v4bx)
17238 	  && !(opcode->avariant &&
17239 	       ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17240 	{
17241 	  as_bad (_("selected processor does not support ARM mode `%s'"), str);
17242 	  return;
17243 	}
17244       if (inst.size_req)
17245 	{
17246 	  as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17247 	  return;
17248 	}
17249 
17250       inst.instruction = opcode->avalue;
17251       if (opcode->tag == OT_unconditionalF)
17252 	inst.instruction |= 0xF << 28;
17253       else
17254 	inst.instruction |= inst.cond << 28;
17255       inst.size = INSN_SIZE;
17256       if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17257         {
17258           it_fsm_pre_encode ();
17259           opcode->aencode ();
17260           it_fsm_post_encode ();
17261         }
17262       /* Arm mode bx is marked as both v4T and v5 because it's still required
17263          on a hypothetical non-thumb v5 core.  */
17264       if (is_bx)
17265 	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17266       else
17267 	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17268 				*opcode->avariant);
17269 
17270       check_neon_suffixes;
17271 
17272       if (!inst.error)
17273 	{
17274 	  mapping_state (MAP_ARM);
17275 	}
17276     }
17277   else
17278     {
17279       as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17280 		"-- `%s'"), str);
17281       return;
17282     }
17283   output_inst (str);
17284 }
17285 
17286 static void
17287 check_it_blocks_finished (void)
17288 {
17289 #ifdef OBJ_ELF
17290   asection *sect;
17291 
17292   for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17293     {
17294       segment_info_type *seginfo = seg_info (sect);
17295 
17296       if (seginfo && seginfo->tc_segment_info_data.current_it.state
17297 	  == MANUAL_IT_BLOCK)
17298         {
17299 	  as_warn (_("section '%s' finished with an open IT block."),
17300 		   sect->name);
17301         }
17302     }
17303 #else
17304   if (now_it.state == MANUAL_IT_BLOCK)
17305     as_warn (_("file finished with an open IT block."));
17306 #endif
17307 }
17308 
17309 /* Various frobbings of labels and their addresses.  */
17310 
17311 void
17312 arm_start_line_hook (void)
17313 {
17314   last_label_seen = NULL;
17315 }
17316 
17317 void
17318 arm_frob_label (symbolS * sym)
17319 {
17320   last_label_seen = sym;
17321 
17322   ARM_SET_THUMB (sym, thumb_mode);
17323 
17324 #if defined OBJ_COFF || defined OBJ_ELF
17325   ARM_SET_INTERWORK (sym, support_interwork);
17326 #endif
17327 
17328   force_automatic_it_block_close ();
17329 
17330   /* Note - do not allow local symbols (.Lxxx) to be labelled
17331      as Thumb functions.  This is because these labels, whilst
17332      they exist inside Thumb code, are not the entry points for
17333      possible ARM->Thumb calls.	 Also, these labels can be used
17334      as part of a computed goto or switch statement.  eg gcc
17335      can generate code that looks like this:
17336 
17337 		ldr  r2, [pc, .Laaa]
17338 		lsl  r3, r3, #2
17339 		ldr  r2, [r3, r2]
17340 		mov  pc, r2
17341 
17342        .Lbbb:  .word .Lxxx
17343        .Lccc:  .word .Lyyy
17344        ..etc...
17345        .Laaa:	.word Lbbb
17346 
17347      The first instruction loads the address of the jump table.
17348      The second instruction converts a table index into a byte offset.
17349      The third instruction gets the jump address out of the table.
17350      The fourth instruction performs the jump.
17351 
17352      If the address stored at .Laaa is that of a symbol which has the
17353      Thumb_Func bit set, then the linker will arrange for this address
17354      to have the bottom bit set, which in turn would mean that the
17355      address computation performed by the third instruction would end
17356      up with the bottom bit set.  Since the ARM is capable of unaligned
17357      word loads, the instruction would then load the incorrect address
17358      out of the jump table, and chaos would ensue.  */
17359   if (label_is_thumb_function_name
17360       && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
17361       && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
17362     {
17363       /* When the address of a Thumb function is taken the bottom
17364 	 bit of that address should be set.  This will allow
17365 	 interworking between Arm and Thumb functions to work
17366 	 correctly.  */
17367 
17368       THUMB_SET_FUNC (sym, 1);
17369 
17370       label_is_thumb_function_name = FALSE;
17371     }
17372 
17373   dwarf2_emit_label (sym);
17374 }
17375 
17376 bfd_boolean
17377 arm_data_in_code (void)
17378 {
17379   if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
17380     {
17381       *input_line_pointer = '/';
17382       input_line_pointer += 5;
17383       *input_line_pointer = 0;
17384       return TRUE;
17385     }
17386 
17387   return FALSE;
17388 }
17389 
17390 char *
17391 arm_canonicalize_symbol_name (char * name)
17392 {
17393   int len;
17394 
17395   if (thumb_mode && (len = strlen (name)) > 5
17396       && streq (name + len - 5, "/data"))
17397     *(name + len - 5) = 0;
17398 
17399   return name;
17400 }
17401 
17402 /* Table of all register names defined by default.  The user can
17403    define additional names with .req.  Note that all register names
17404    should appear in both upper and lowercase variants.	Some registers
17405    also have mixed-case names.	*/
17406 
17407 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17408 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17409 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17410 #define REGSET(p,t) \
17411   REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17412   REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17413   REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17414   REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17415 #define REGSETH(p,t) \
17416   REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17417   REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17418   REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17419   REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17420 #define REGSET2(p,t) \
17421   REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17422   REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17423   REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17424   REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17425 #define SPLRBANK(base,bank,t) \
17426   REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17427   REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17428   REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17429   REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17430   REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17431   REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17432 
17433 static const struct reg_entry reg_names[] =
17434 {
17435   /* ARM integer registers.  */
17436   REGSET(r, RN), REGSET(R, RN),
17437 
17438   /* ATPCS synonyms.  */
17439   REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
17440   REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
17441   REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
17442 
17443   REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
17444   REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
17445   REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
17446 
17447   /* Well-known aliases.  */
17448   REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
17449   REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
17450 
17451   REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
17452   REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
17453 
17454   /* Coprocessor numbers.  */
17455   REGSET(p, CP), REGSET(P, CP),
17456 
17457   /* Coprocessor register numbers.  The "cr" variants are for backward
17458      compatibility.  */
17459   REGSET(c,  CN), REGSET(C, CN),
17460   REGSET(cr, CN), REGSET(CR, CN),
17461 
17462   /* ARM banked registers.  */
17463   REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
17464   REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
17465   REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
17466   REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
17467   REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
17468   REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
17469   REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
17470 
17471   REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
17472   REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
17473   REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
17474   REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
17475   REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
17476   REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(SP_fiq,512|(13<<16),RNB),
17477   REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
17478   REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
17479 
17480   SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
17481   SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
17482   SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
17483   SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
17484   SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
17485   REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
17486   REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
17487   REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
17488   REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
17489 
17490   /* FPA registers.  */
17491   REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
17492   REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
17493 
17494   REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
17495   REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
17496 
17497   /* VFP SP registers.	*/
17498   REGSET(s,VFS),  REGSET(S,VFS),
17499   REGSETH(s,VFS), REGSETH(S,VFS),
17500 
17501   /* VFP DP Registers.	*/
17502   REGSET(d,VFD),  REGSET(D,VFD),
17503   /* Extra Neon DP registers.  */
17504   REGSETH(d,VFD), REGSETH(D,VFD),
17505 
17506   /* Neon QP registers.  */
17507   REGSET2(q,NQ),  REGSET2(Q,NQ),
17508 
17509   /* VFP control registers.  */
17510   REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
17511   REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
17512   REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
17513   REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
17514   REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
17515   REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
17516 
17517   /* Maverick DSP coprocessor registers.  */
17518   REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
17519   REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
17520 
17521   REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
17522   REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
17523   REGDEF(dspsc,0,DSPSC),
17524 
17525   REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
17526   REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
17527   REGDEF(DSPSC,0,DSPSC),
17528 
17529   /* iWMMXt data registers - p0, c0-15.	 */
17530   REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
17531 
17532   /* iWMMXt control registers - p1, c0-3.  */
17533   REGDEF(wcid,	0,MMXWC),  REGDEF(wCID,	 0,MMXWC),  REGDEF(WCID,  0,MMXWC),
17534   REGDEF(wcon,	1,MMXWC),  REGDEF(wCon,	 1,MMXWC),  REGDEF(WCON,  1,MMXWC),
17535   REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
17536   REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
17537 
17538   /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
17539   REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
17540   REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
17541   REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
17542   REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
17543 
17544   /* XScale accumulator registers.  */
17545   REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
17546 };
17547 #undef REGDEF
17548 #undef REGNUM
17549 #undef REGSET
17550 
17551 /* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
17552    within psr_required_here.  */
17553 static const struct asm_psr psrs[] =
17554 {
17555   /* Backward compatibility notation.  Note that "all" is no longer
17556      truly all possible PSR bits.  */
17557   {"all",  PSR_c | PSR_f},
17558   {"flg",  PSR_f},
17559   {"ctl",  PSR_c},
17560 
17561   /* Individual flags.	*/
17562   {"f",	   PSR_f},
17563   {"c",	   PSR_c},
17564   {"x",	   PSR_x},
17565   {"s",	   PSR_s},
17566 
17567   /* Combinations of flags.  */
17568   {"fs",   PSR_f | PSR_s},
17569   {"fx",   PSR_f | PSR_x},
17570   {"fc",   PSR_f | PSR_c},
17571   {"sf",   PSR_s | PSR_f},
17572   {"sx",   PSR_s | PSR_x},
17573   {"sc",   PSR_s | PSR_c},
17574   {"xf",   PSR_x | PSR_f},
17575   {"xs",   PSR_x | PSR_s},
17576   {"xc",   PSR_x | PSR_c},
17577   {"cf",   PSR_c | PSR_f},
17578   {"cs",   PSR_c | PSR_s},
17579   {"cx",   PSR_c | PSR_x},
17580   {"fsx",  PSR_f | PSR_s | PSR_x},
17581   {"fsc",  PSR_f | PSR_s | PSR_c},
17582   {"fxs",  PSR_f | PSR_x | PSR_s},
17583   {"fxc",  PSR_f | PSR_x | PSR_c},
17584   {"fcs",  PSR_f | PSR_c | PSR_s},
17585   {"fcx",  PSR_f | PSR_c | PSR_x},
17586   {"sfx",  PSR_s | PSR_f | PSR_x},
17587   {"sfc",  PSR_s | PSR_f | PSR_c},
17588   {"sxf",  PSR_s | PSR_x | PSR_f},
17589   {"sxc",  PSR_s | PSR_x | PSR_c},
17590   {"scf",  PSR_s | PSR_c | PSR_f},
17591   {"scx",  PSR_s | PSR_c | PSR_x},
17592   {"xfs",  PSR_x | PSR_f | PSR_s},
17593   {"xfc",  PSR_x | PSR_f | PSR_c},
17594   {"xsf",  PSR_x | PSR_s | PSR_f},
17595   {"xsc",  PSR_x | PSR_s | PSR_c},
17596   {"xcf",  PSR_x | PSR_c | PSR_f},
17597   {"xcs",  PSR_x | PSR_c | PSR_s},
17598   {"cfs",  PSR_c | PSR_f | PSR_s},
17599   {"cfx",  PSR_c | PSR_f | PSR_x},
17600   {"csf",  PSR_c | PSR_s | PSR_f},
17601   {"csx",  PSR_c | PSR_s | PSR_x},
17602   {"cxf",  PSR_c | PSR_x | PSR_f},
17603   {"cxs",  PSR_c | PSR_x | PSR_s},
17604   {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
17605   {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
17606   {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
17607   {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
17608   {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
17609   {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
17610   {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
17611   {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
17612   {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
17613   {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
17614   {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
17615   {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
17616   {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
17617   {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
17618   {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
17619   {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
17620   {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
17621   {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
17622   {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
17623   {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
17624   {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
17625   {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
17626   {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
17627   {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
17628 };
17629 
17630 /* Table of V7M psr names.  */
17631 static const struct asm_psr v7m_psrs[] =
17632 {
17633   {"apsr",	  0 }, {"APSR",		0 },
17634   {"iapsr",	  1 }, {"IAPSR",	1 },
17635   {"eapsr",	  2 }, {"EAPSR",	2 },
17636   {"psr",	  3 }, {"PSR",		3 },
17637   {"xpsr",	  3 }, {"XPSR",		3 }, {"xPSR",	  3 },
17638   {"ipsr",	  5 }, {"IPSR",		5 },
17639   {"epsr",	  6 }, {"EPSR",		6 },
17640   {"iepsr",	  7 }, {"IEPSR",	7 },
17641   {"msp",	  8 }, {"MSP",		8 },
17642   {"psp",	  9 }, {"PSP",		9 },
17643   {"primask",	  16}, {"PRIMASK",	16},
17644   {"basepri",	  17}, {"BASEPRI",	17},
17645   {"basepri_max", 18}, {"BASEPRI_MAX",	18},
17646   {"basepri_max", 18}, {"BASEPRI_MASK",	18}, /* Typo, preserved for backwards compatibility.  */
17647   {"faultmask",	  19}, {"FAULTMASK",	19},
17648   {"control",	  20}, {"CONTROL",	20}
17649 };
17650 
17651 /* Table of all shift-in-operand names.	 */
17652 static const struct asm_shift_name shift_names [] =
17653 {
17654   { "asl", SHIFT_LSL },	 { "ASL", SHIFT_LSL },
17655   { "lsl", SHIFT_LSL },	 { "LSL", SHIFT_LSL },
17656   { "lsr", SHIFT_LSR },	 { "LSR", SHIFT_LSR },
17657   { "asr", SHIFT_ASR },	 { "ASR", SHIFT_ASR },
17658   { "ror", SHIFT_ROR },	 { "ROR", SHIFT_ROR },
17659   { "rrx", SHIFT_RRX },	 { "RRX", SHIFT_RRX }
17660 };
17661 
17662 /* Table of all explicit relocation names.  */
17663 #ifdef OBJ_ELF
17664 static struct reloc_entry reloc_names[] =
17665 {
17666   { "got",     BFD_RELOC_ARM_GOT32   },	 { "GOT",     BFD_RELOC_ARM_GOT32   },
17667   { "gotoff",  BFD_RELOC_ARM_GOTOFF  },	 { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
17668   { "plt",     BFD_RELOC_ARM_PLT32   },	 { "PLT",     BFD_RELOC_ARM_PLT32   },
17669   { "target1", BFD_RELOC_ARM_TARGET1 },	 { "TARGET1", BFD_RELOC_ARM_TARGET1 },
17670   { "target2", BFD_RELOC_ARM_TARGET2 },	 { "TARGET2", BFD_RELOC_ARM_TARGET2 },
17671   { "sbrel",   BFD_RELOC_ARM_SBREL32 },	 { "SBREL",   BFD_RELOC_ARM_SBREL32 },
17672   { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
17673   { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
17674   { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
17675   { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
17676   { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
17677   { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
17678   { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
17679   	{ "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
17680   { "tlscall", BFD_RELOC_ARM_TLS_CALL},
17681   	{ "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
17682   { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
17683   	{ "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
17684 };
17685 #endif
17686 
17687 /* Table of all conditional affixes.  0xF is not defined as a condition code.  */
17688 static const struct asm_cond conds[] =
17689 {
17690   {"eq", 0x0},
17691   {"ne", 0x1},
17692   {"cs", 0x2}, {"hs", 0x2},
17693   {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
17694   {"mi", 0x4},
17695   {"pl", 0x5},
17696   {"vs", 0x6},
17697   {"vc", 0x7},
17698   {"hi", 0x8},
17699   {"ls", 0x9},
17700   {"ge", 0xa},
17701   {"lt", 0xb},
17702   {"gt", 0xc},
17703   {"le", 0xd},
17704   {"al", 0xe}
17705 };
17706 
17707 #define UL_BARRIER(L,U,CODE,FEAT) \
17708   { L, CODE, ARM_FEATURE (FEAT, 0) }, \
17709   { U, CODE, ARM_FEATURE (FEAT, 0) }
17710 
17711 static struct asm_barrier_opt barrier_opt_names[] =
17712 {
17713   UL_BARRIER ("sy",	"SY",	 0xf, ARM_EXT_BARRIER),
17714   UL_BARRIER ("st",	"ST",	 0xe, ARM_EXT_BARRIER),
17715   UL_BARRIER ("ld",	"LD",	 0xd, ARM_EXT_V8),
17716   UL_BARRIER ("ish",	"ISH",	 0xb, ARM_EXT_BARRIER),
17717   UL_BARRIER ("sh",	"SH",	 0xb, ARM_EXT_BARRIER),
17718   UL_BARRIER ("ishst",	"ISHST", 0xa, ARM_EXT_BARRIER),
17719   UL_BARRIER ("shst",	"SHST",	 0xa, ARM_EXT_BARRIER),
17720   UL_BARRIER ("ishld",	"ISHLD", 0x9, ARM_EXT_V8),
17721   UL_BARRIER ("un",	"UN",	 0x7, ARM_EXT_BARRIER),
17722   UL_BARRIER ("nsh",	"NSH",	 0x7, ARM_EXT_BARRIER),
17723   UL_BARRIER ("unst",	"UNST",	 0x6, ARM_EXT_BARRIER),
17724   UL_BARRIER ("nshst",	"NSHST", 0x6, ARM_EXT_BARRIER),
17725   UL_BARRIER ("nshld",	"NSHLD", 0x5, ARM_EXT_V8),
17726   UL_BARRIER ("osh",	"OSH",	 0x3, ARM_EXT_BARRIER),
17727   UL_BARRIER ("oshst",	"OSHST", 0x2, ARM_EXT_BARRIER),
17728   UL_BARRIER ("oshld",	"OSHLD", 0x1, ARM_EXT_V8)
17729 };
17730 
17731 #undef UL_BARRIER
17732 
17733 /* Table of ARM-format instructions.	*/
17734 
17735 /* Macros for gluing together operand strings.  N.B. In all cases
17736    other than OPS0, the trailing OP_stop comes from default
17737    zero-initialization of the unspecified elements of the array.  */
17738 #define OPS0()		  { OP_stop, }
17739 #define OPS1(a)		  { OP_##a, }
17740 #define OPS2(a,b)	  { OP_##a,OP_##b, }
17741 #define OPS3(a,b,c)	  { OP_##a,OP_##b,OP_##c, }
17742 #define OPS4(a,b,c,d)	  { OP_##a,OP_##b,OP_##c,OP_##d, }
17743 #define OPS5(a,b,c,d,e)	  { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
17744 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
17745 
17746 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
17747    This is useful when mixing operands for ARM and THUMB, i.e. using the
17748    MIX_ARM_THUMB_OPERANDS macro.
17749    In order to use these macros, prefix the number of operands with _
17750    e.g. _3.  */
17751 #define OPS_1(a)	   { a, }
17752 #define OPS_2(a,b)	   { a,b, }
17753 #define OPS_3(a,b,c)	   { a,b,c, }
17754 #define OPS_4(a,b,c,d)	   { a,b,c,d, }
17755 #define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
17756 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
17757 
17758 /* These macros abstract out the exact format of the mnemonic table and
17759    save some repeated characters.  */
17760 
17761 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
17762 #define TxCE(mnem, op, top, nops, ops, ae, te) \
17763   { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
17764     THUMB_VARIANT, do_##ae, do_##te }
17765 
17766 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
17767    a T_MNEM_xyz enumerator.  */
17768 #define TCE(mnem, aop, top, nops, ops, ae, te) \
17769       TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
17770 #define tCE(mnem, aop, top, nops, ops, ae, te) \
17771       TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17772 
17773 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
17774    infix after the third character.  */
17775 #define TxC3(mnem, op, top, nops, ops, ae, te) \
17776   { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
17777     THUMB_VARIANT, do_##ae, do_##te }
17778 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
17779   { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
17780     THUMB_VARIANT, do_##ae, do_##te }
17781 #define TC3(mnem, aop, top, nops, ops, ae, te) \
17782       TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
17783 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
17784       TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
17785 #define tC3(mnem, aop, top, nops, ops, ae, te) \
17786       TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17787 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
17788       TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17789 
17790 /* Mnemonic with a conditional infix in an unusual place.  Each and every variant has to
17791    appear in the condition table.  */
17792 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te)	\
17793   { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17794     0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
17795 
17796 #define TxCM(m1, m2, op, top, nops, ops, ae, te)	\
17797   TxCM_ (m1,   , m2, op, top, nops, ops, ae, te),	\
17798   TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te),	\
17799   TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te),	\
17800   TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te),	\
17801   TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te),	\
17802   TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te),	\
17803   TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te),	\
17804   TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te),	\
17805   TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te),	\
17806   TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te),	\
17807   TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te),	\
17808   TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te),	\
17809   TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te),	\
17810   TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te),	\
17811   TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te),	\
17812   TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te),	\
17813   TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te),	\
17814   TxCM_ (m1, le, m2, op, top, nops, ops, ae, te),	\
17815   TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
17816 
17817 #define TCM(m1,m2, aop, top, nops, ops, ae, te)		\
17818       TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
17819 #define tCM(m1,m2, aop, top, nops, ops, ae, te)		\
17820       TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
17821 
17822 /* Mnemonic that cannot be conditionalized.  The ARM condition-code
17823    field is still 0xE.  Many of the Thumb variants can be executed
17824    conditionally, so this is checked separately.  */
17825 #define TUE(mnem, op, top, nops, ops, ae, te)				\
17826   { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
17827     THUMB_VARIANT, do_##ae, do_##te }
17828 
17829 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
17830    condition code field.  */
17831 #define TUF(mnem, op, top, nops, ops, ae, te)				\
17832   { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
17833     THUMB_VARIANT, do_##ae, do_##te }
17834 
17835 /* ARM-only variants of all the above.  */
17836 #define CE(mnem,  op, nops, ops, ae)	\
17837   { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17838 
17839 #define C3(mnem, op, nops, ops, ae)	\
17840   { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17841 
17842 /* Legacy mnemonics that always have conditional infix after the third
17843    character.  */
17844 #define CL(mnem, op, nops, ops, ae)	\
17845   { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17846     0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17847 
17848 /* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
17849 #define cCE(mnem,  op, nops, ops, ae)	\
17850   { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17851 
17852 /* Legacy coprocessor instructions where conditional infix and conditional
17853    suffix are ambiguous.  For consistency this includes all FPA instructions,
17854    not just the potentially ambiguous ones.  */
17855 #define cCL(mnem, op, nops, ops, ae)	\
17856   { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17857     0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17858 
17859 /* Coprocessor, takes either a suffix or a position-3 infix
17860    (for an FPA corner case). */
17861 #define C3E(mnem, op, nops, ops, ae) \
17862   { mnem, OPS##nops ops, OT_csuf_or_in3, \
17863     0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17864 
17865 #define xCM_(m1, m2, m3, op, nops, ops, ae)	\
17866   { m1 #m2 m3, OPS##nops ops, \
17867     sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17868     0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17869 
17870 #define CM(m1, m2, op, nops, ops, ae)	\
17871   xCM_ (m1,   , m2, op, nops, ops, ae),	\
17872   xCM_ (m1, eq, m2, op, nops, ops, ae),	\
17873   xCM_ (m1, ne, m2, op, nops, ops, ae),	\
17874   xCM_ (m1, cs, m2, op, nops, ops, ae),	\
17875   xCM_ (m1, hs, m2, op, nops, ops, ae),	\
17876   xCM_ (m1, cc, m2, op, nops, ops, ae),	\
17877   xCM_ (m1, ul, m2, op, nops, ops, ae),	\
17878   xCM_ (m1, lo, m2, op, nops, ops, ae),	\
17879   xCM_ (m1, mi, m2, op, nops, ops, ae),	\
17880   xCM_ (m1, pl, m2, op, nops, ops, ae),	\
17881   xCM_ (m1, vs, m2, op, nops, ops, ae),	\
17882   xCM_ (m1, vc, m2, op, nops, ops, ae),	\
17883   xCM_ (m1, hi, m2, op, nops, ops, ae),	\
17884   xCM_ (m1, ls, m2, op, nops, ops, ae),	\
17885   xCM_ (m1, ge, m2, op, nops, ops, ae),	\
17886   xCM_ (m1, lt, m2, op, nops, ops, ae),	\
17887   xCM_ (m1, gt, m2, op, nops, ops, ae),	\
17888   xCM_ (m1, le, m2, op, nops, ops, ae),	\
17889   xCM_ (m1, al, m2, op, nops, ops, ae)
17890 
17891 #define UE(mnem, op, nops, ops, ae)	\
17892   { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17893 
17894 #define UF(mnem, op, nops, ops, ae)	\
17895   { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17896 
17897 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
17898    The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
17899    use the same encoding function for each.  */
17900 #define NUF(mnem, op, nops, ops, enc)					\
17901   { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
17902     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17903 
17904 /* Neon data processing, version which indirects through neon_enc_tab for
17905    the various overloaded versions of opcodes.  */
17906 #define nUF(mnem, op, nops, ops, enc)					\
17907   { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
17908     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17909 
17910 /* Neon insn with conditional suffix for the ARM version, non-overloaded
17911    version.  */
17912 #define NCE_tag(mnem, op, nops, ops, enc, tag)				\
17913   { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,		\
17914     THUMB_VARIANT, do_##enc, do_##enc }
17915 
17916 #define NCE(mnem, op, nops, ops, enc)					\
17917    NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17918 
17919 #define NCEF(mnem, op, nops, ops, enc)					\
17920     NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17921 
17922 /* Neon insn with conditional suffix for the ARM version, overloaded types.  */
17923 #define nCE_tag(mnem, op, nops, ops, enc, tag)				\
17924   { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,		\
17925     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17926 
17927 #define nCE(mnem, op, nops, ops, enc)					\
17928    nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17929 
17930 #define nCEF(mnem, op, nops, ops, enc)					\
17931     nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17932 
17933 #define do_0 0
17934 
17935 static const struct asm_opcode insns[] =
17936 {
17937 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions.  */
17938 #define THUMB_VARIANT &arm_ext_v4t
17939  tCE("and",	0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
17940  tC3("ands",	0100000, _ands,	   3, (RR, oRR, SH), arit, t_arit3c),
17941  tCE("eor",	0200000, _eor,	   3, (RR, oRR, SH), arit, t_arit3c),
17942  tC3("eors",	0300000, _eors,	   3, (RR, oRR, SH), arit, t_arit3c),
17943  tCE("sub",	0400000, _sub,	   3, (RR, oRR, SH), arit, t_add_sub),
17944  tC3("subs",	0500000, _subs,	   3, (RR, oRR, SH), arit, t_add_sub),
17945  tCE("add",	0800000, _add,	   3, (RR, oRR, SHG), arit, t_add_sub),
17946  tC3("adds",	0900000, _adds,	   3, (RR, oRR, SHG), arit, t_add_sub),
17947  tCE("adc",	0a00000, _adc,	   3, (RR, oRR, SH), arit, t_arit3c),
17948  tC3("adcs",	0b00000, _adcs,	   3, (RR, oRR, SH), arit, t_arit3c),
17949  tCE("sbc",	0c00000, _sbc,	   3, (RR, oRR, SH), arit, t_arit3),
17950  tC3("sbcs",	0d00000, _sbcs,	   3, (RR, oRR, SH), arit, t_arit3),
17951  tCE("orr",	1800000, _orr,	   3, (RR, oRR, SH), arit, t_arit3c),
17952  tC3("orrs",	1900000, _orrs,	   3, (RR, oRR, SH), arit, t_arit3c),
17953  tCE("bic",	1c00000, _bic,	   3, (RR, oRR, SH), arit, t_arit3),
17954  tC3("bics",	1d00000, _bics,	   3, (RR, oRR, SH), arit, t_arit3),
17955 
17956  /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
17957     for setting PSR flag bits.  They are obsolete in V6 and do not
17958     have Thumb equivalents. */
17959  tCE("tst",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
17960  tC3w("tsts",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
17961   CL("tstp",	110f000,     	   2, (RR, SH),      cmp),
17962  tCE("cmp",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
17963  tC3w("cmps",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
17964   CL("cmpp",	150f000,     	   2, (RR, SH),      cmp),
17965  tCE("cmn",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
17966  tC3w("cmns",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
17967   CL("cmnp",	170f000,     	   2, (RR, SH),      cmp),
17968 
17969  tCE("mov",	1a00000, _mov,	   2, (RR, SH),      mov,  t_mov_cmp),
17970  tC3("movs",	1b00000, _movs,	   2, (RR, SH),      mov,  t_mov_cmp),
17971  tCE("mvn",	1e00000, _mvn,	   2, (RR, SH),      mov,  t_mvn_tst),
17972  tC3("mvns",	1f00000, _mvns,	   2, (RR, SH),      mov,  t_mvn_tst),
17973 
17974  tCE("ldr",	4100000, _ldr,	   2, (RR, ADDRGLDR),ldst, t_ldst),
17975  tC3("ldrb",	4500000, _ldrb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17976  tCE("str",	4000000, _str,	   _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
17977 								OP_RRnpc),
17978 					OP_ADDRGLDR),ldst, t_ldst),
17979  tC3("strb",	4400000, _strb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17980 
17981  tCE("stm",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17982  tC3("stmia",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17983  tC3("stmea",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17984  tCE("ldm",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17985  tC3("ldmia",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17986  tC3("ldmfd",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17987 
17988  TCE("swi",	f000000, df00,     1, (EXPi),        swi, t_swi),
17989  TCE("svc",	f000000, df00,     1, (EXPi),        swi, t_swi),
17990  tCE("b",	a000000, _b,	   1, (EXPr),	     branch, t_branch),
17991  TCE("bl",	b000000, f000f800, 1, (EXPr),	     bl, t_branch23),
17992 
17993   /* Pseudo ops.  */
17994  tCE("adr",	28f0000, _adr,	   2, (RR, EXP),     adr,  t_adr),
17995   C3(adrl,	28f0000,           2, (RR, EXP),     adrl),
17996  tCE("nop",	1a00000, _nop,	   1, (oI255c),	     nop,  t_nop),
17997 
17998   /* Thumb-compatibility pseudo ops.  */
17999  tCE("lsl",	1a00000, _lsl,	   3, (RR, oRR, SH), shift, t_shift),
18000  tC3("lsls",	1b00000, _lsls,	   3, (RR, oRR, SH), shift, t_shift),
18001  tCE("lsr",	1a00020, _lsr,	   3, (RR, oRR, SH), shift, t_shift),
18002  tC3("lsrs",	1b00020, _lsrs,	   3, (RR, oRR, SH), shift, t_shift),
18003  tCE("asr",	1a00040, _asr,	   3, (RR, oRR, SH), shift, t_shift),
18004  tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
18005  tCE("ror",	1a00060, _ror,	   3, (RR, oRR, SH), shift, t_shift),
18006  tC3("rors",	1b00060, _rors,	   3, (RR, oRR, SH), shift, t_shift),
18007  tCE("neg",	2600000, _neg,	   2, (RR, RR),      rd_rn, t_neg),
18008  tC3("negs",	2700000, _negs,	   2, (RR, RR),      rd_rn, t_neg),
18009  tCE("push",	92d0000, _push,     1, (REGLST),	     push_pop, t_push_pop),
18010  tCE("pop",	8bd0000, _pop,	   1, (REGLST),	     push_pop, t_push_pop),
18011 
18012  /* These may simplify to neg.  */
18013  TCE("rsb",	0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18014  TC3("rsbs",	0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18015 
18016 #undef  THUMB_VARIANT
18017 #define THUMB_VARIANT  & arm_ext_v6
18018 
18019  TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
18020 
18021  /* V1 instructions with no Thumb analogue prior to V6T2.  */
18022 #undef  THUMB_VARIANT
18023 #define THUMB_VARIANT  & arm_ext_v6t2
18024 
18025  TCE("teq",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
18026  TC3w("teqs",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
18027   CL("teqp",	130f000,           2, (RR, SH),      cmp),
18028 
18029  TC3("ldrt",	4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18030  TC3("ldrbt",	4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18031  TC3("strt",	4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
18032  TC3("strbt",	4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18033 
18034  TC3("stmdb",	9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18035  TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18036 
18037  TC3("ldmdb",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18038  TC3("ldmea",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18039 
18040  /* V1 instructions with no Thumb analogue at all.  */
18041   CE("rsc",	0e00000,	   3, (RR, oRR, SH), arit),
18042   C3(rscs,	0f00000,	   3, (RR, oRR, SH), arit),
18043 
18044   C3(stmib,	9800000,	   2, (RRw, REGLST), ldmstm),
18045   C3(stmfa,	9800000,	   2, (RRw, REGLST), ldmstm),
18046   C3(stmda,	8000000,	   2, (RRw, REGLST), ldmstm),
18047   C3(stmed,	8000000,	   2, (RRw, REGLST), ldmstm),
18048   C3(ldmib,	9900000,	   2, (RRw, REGLST), ldmstm),
18049   C3(ldmed,	9900000,	   2, (RRw, REGLST), ldmstm),
18050   C3(ldmda,	8100000,	   2, (RRw, REGLST), ldmstm),
18051   C3(ldmfa,	8100000,	   2, (RRw, REGLST), ldmstm),
18052 
18053 #undef  ARM_VARIANT
18054 #define ARM_VARIANT    & arm_ext_v2	/* ARM 2 - multiplies.	*/
18055 #undef  THUMB_VARIANT
18056 #define THUMB_VARIANT  & arm_ext_v4t
18057 
18058  tCE("mul",	0000090, _mul,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
18059  tC3("muls",	0100090, _muls,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
18060 
18061 #undef  THUMB_VARIANT
18062 #define THUMB_VARIANT  & arm_ext_v6t2
18063 
18064  TCE("mla",	0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18065   C3(mlas,	0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18066 
18067   /* Generic coprocessor instructions.	*/
18068  TCE("cdp",	e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
18069  TCE("ldc",	c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
18070  TC3("ldcl",	c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
18071  TCE("stc",	c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
18072  TC3("stcl",	c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
18073  TCE("mcr",	e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
18074  TCE("mrc",	e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
18075 
18076 #undef  ARM_VARIANT
18077 #define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
18078 
18079   CE("swp",	1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18080   C3(swpb,	1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18081 
18082 #undef  ARM_VARIANT
18083 #define ARM_VARIANT    & arm_ext_v3	/* ARM 6 Status register instructions.	*/
18084 #undef  THUMB_VARIANT
18085 #define THUMB_VARIANT  & arm_ext_msr
18086 
18087  TCE("mrs",	1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18088  TCE("msr",	120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18089 
18090 #undef  ARM_VARIANT
18091 #define ARM_VARIANT    & arm_ext_v3m	 /* ARM 7M long multiplies.  */
18092 #undef  THUMB_VARIANT
18093 #define THUMB_VARIANT  & arm_ext_v6t2
18094 
18095  TCE("smull",	0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18096   CM("smull","s",	0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18097  TCE("umull",	0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18098   CM("umull","s",	0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18099  TCE("smlal",	0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18100   CM("smlal","s",	0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18101  TCE("umlal",	0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18102   CM("umlal","s",	0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18103 
18104 #undef  ARM_VARIANT
18105 #define ARM_VARIANT    & arm_ext_v4	/* ARM Architecture 4.	*/
18106 #undef  THUMB_VARIANT
18107 #define THUMB_VARIANT  & arm_ext_v4t
18108 
18109  tC3("ldrh",	01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18110  tC3("strh",	00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18111  tC3("ldrsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18112  tC3("ldrsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18113  tCM("ld","sh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18114  tCM("ld","sb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18115 
18116 #undef  ARM_VARIANT
18117 #define ARM_VARIANT  & arm_ext_v4t_5
18118 
18119   /* ARM Architecture 4T.  */
18120   /* Note: bx (and blx) are required on V5, even if the processor does
18121      not support Thumb.	 */
18122  TCE("bx",	12fff10, 4700, 1, (RR),	bx, t_bx),
18123 
18124 #undef  ARM_VARIANT
18125 #define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.	 */
18126 #undef  THUMB_VARIANT
18127 #define THUMB_VARIANT  & arm_ext_v5t
18128 
18129   /* Note: blx has 2 variants; the .value coded here is for
18130      BLX(2).  Only this variant has conditional execution.  */
18131  TCE("blx",	12fff30, 4780, 1, (RR_EXr),			    blx,  t_blx),
18132  TUE("bkpt",	1200070, be00, 1, (oIffffb),			    bkpt, t_bkpt),
18133 
18134 #undef  THUMB_VARIANT
18135 #define THUMB_VARIANT  & arm_ext_v6t2
18136 
18137  TCE("clz",	16f0f10, fab0f080, 2, (RRnpc, RRnpc),		        rd_rm,  t_clz),
18138  TUF("ldc2",	c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
18139  TUF("ldc2l",	c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
18140  TUF("stc2",	c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
18141  TUF("stc2l",	c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
18142  TUF("cdp2",	e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
18143  TUF("mcr2",	e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
18144  TUF("mrc2",	e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
18145 
18146 #undef  ARM_VARIANT
18147 #define ARM_VARIANT  & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
18148 #undef THUMB_VARIANT
18149 #define THUMB_VARIANT &arm_ext_v5exp
18150 
18151  TCE("smlabb",	1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18152  TCE("smlatb",	10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18153  TCE("smlabt",	10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18154  TCE("smlatt",	10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18155 
18156  TCE("smlawb",	1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18157  TCE("smlawt",	12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18158 
18159  TCE("smlalbb",	1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
18160  TCE("smlaltb",	14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
18161  TCE("smlalbt",	14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
18162  TCE("smlaltt",	14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
18163 
18164  TCE("smulbb",	1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18165  TCE("smultb",	16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18166  TCE("smulbt",	16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18167  TCE("smultt",	16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18168 
18169  TCE("smulwb",	12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18170  TCE("smulwt",	12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18171 
18172  TCE("qadd",	1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
18173  TCE("qdadd",	1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
18174  TCE("qsub",	1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
18175  TCE("qdsub",	1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
18176 
18177 #undef  ARM_VARIANT
18178 #define ARM_VARIANT  & arm_ext_v5e /*  ARM Architecture 5TE.  */
18179 #undef THUMB_VARIANT
18180 #define THUMB_VARIANT &arm_ext_v6t2
18181 
18182  TUF("pld",	450f000, f810f000, 1, (ADDR),		     pld,  t_pld),
18183  TC3("ldrd",	00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18184      ldrd, t_ldstd),
18185  TC3("strd",	00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18186 				       ADDRGLDRS), ldrd, t_ldstd),
18187 
18188  TCE("mcrr",	c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18189  TCE("mrrc",	c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18190 
18191 #undef  ARM_VARIANT
18192 #define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
18193 
18194  TCE("bxj",	12fff20, f3c08f00, 1, (RR),			  bxj, t_bxj),
18195 
18196 #undef  ARM_VARIANT
18197 #define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
18198 #undef  THUMB_VARIANT
18199 #define THUMB_VARIANT  & arm_ext_v6
18200 
18201  TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
18202  TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
18203  tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
18204  tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
18205  tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
18206  tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
18207  tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
18208  tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
18209  tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
18210  TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
18211 
18212 #undef  THUMB_VARIANT
18213 #define THUMB_VARIANT  & arm_ext_v6t2
18214 
18215  TCE("ldrex",	1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),	  ldrex, t_ldrex),
18216  TCE("strex",	1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18217 				      strex,  t_strex),
18218  TUF("mcrr2",	c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18219  TUF("mrrc2",	c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18220 
18221  TCE("ssat",	6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
18222  TCE("usat",	6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
18223 
18224 /*  ARM V6 not included in V7M.  */
18225 #undef  THUMB_VARIANT
18226 #define THUMB_VARIANT  & arm_ext_v6_notm
18227  TUF("rfeia",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
18228  TUF("rfe",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
18229   UF(rfeib,	9900a00,           1, (RRw),			   rfe),
18230   UF(rfeda,	8100a00,           1, (RRw),			   rfe),
18231  TUF("rfedb",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
18232  TUF("rfefd",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
18233   UF(rfefa,	8100a00,           1, (RRw),			   rfe),
18234  TUF("rfeea",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
18235   UF(rfeed,	9900a00,           1, (RRw),			   rfe),
18236  TUF("srsia",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
18237  TUF("srs",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
18238  TUF("srsea",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
18239   UF(srsib,	9c00500,           2, (oRRw, I31w),		   srs),
18240   UF(srsfa,	9c00500,           2, (oRRw, I31w),		   srs),
18241   UF(srsda,	8400500,	   2, (oRRw, I31w),		   srs),
18242   UF(srsed,	8400500,	   2, (oRRw, I31w),		   srs),
18243  TUF("srsdb",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
18244  TUF("srsfd",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
18245 
18246 /*  ARM V6 not included in V7M (eg. integer SIMD).  */
18247 #undef  THUMB_VARIANT
18248 #define THUMB_VARIANT  & arm_ext_v6_dsp
18249  TUF("cps",	1020000, f3af8100, 1, (I31b),			  imm0, t_cps),
18250  TCE("pkhbt",	6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
18251  TCE("pkhtb",	6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
18252  TCE("qadd16",	6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18253  TCE("qadd8",	6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18254  TCE("qasx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18255  /* Old name for QASX.  */
18256  TCE("qaddsubx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18257  TCE("qsax",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18258  /* Old name for QSAX.  */
18259  TCE("qsubaddx",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18260  TCE("qsub16",	6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18261  TCE("qsub8",	6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18262  TCE("sadd16",	6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18263  TCE("sadd8",	6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18264  TCE("sasx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18265  /* Old name for SASX.  */
18266  TCE("saddsubx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18267  TCE("shadd16",	6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18268  TCE("shadd8",	6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18269  TCE("shasx",     6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18270  /* Old name for SHASX.  */
18271  TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18272  TCE("shsax",      6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18273  /* Old name for SHSAX.  */
18274  TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18275  TCE("shsub16",	6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18276  TCE("shsub8",	6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18277  TCE("ssax",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18278  /* Old name for SSAX.  */
18279  TCE("ssubaddx",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18280  TCE("ssub16",	6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18281  TCE("ssub8",	6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18282  TCE("uadd16",	6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18283  TCE("uadd8",	6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18284  TCE("uasx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18285  /* Old name for UASX.  */
18286  TCE("uaddsubx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18287  TCE("uhadd16",	6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18288  TCE("uhadd8",	6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18289  TCE("uhasx",     6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18290  /* Old name for UHASX.  */
18291  TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18292  TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18293  /* Old name for UHSAX.  */
18294  TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18295  TCE("uhsub16",	6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18296  TCE("uhsub8",	6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18297  TCE("uqadd16",	6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18298  TCE("uqadd8",	6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18299  TCE("uqasx",     6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18300  /* Old name for UQASX.  */
18301  TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18302  TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18303  /* Old name for UQSAX.  */
18304  TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18305  TCE("uqsub16",	6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18306  TCE("uqsub8",	6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18307  TCE("usub16",	6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18308  TCE("usax",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18309  /* Old name for USAX.  */
18310  TCE("usubaddx",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18311  TCE("usub8",	6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18312  TCE("sxtah",	6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18313  TCE("sxtab16",	6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18314  TCE("sxtab",	6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18315  TCE("sxtb16",	68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
18316  TCE("uxtah",	6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18317  TCE("uxtab16",	6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18318  TCE("uxtab",	6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18319  TCE("uxtb16",	6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
18320  TCE("sel",	6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18321  TCE("smlad",	7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18322  TCE("smladx",	7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18323  TCE("smlald",	7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18324  TCE("smlaldx",	7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18325  TCE("smlsd",	7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18326  TCE("smlsdx",	7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18327  TCE("smlsld",	7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18328  TCE("smlsldx",	7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18329  TCE("smmla",	7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18330  TCE("smmlar",	7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18331  TCE("smmls",	75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18332  TCE("smmlsr",	75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18333  TCE("smmul",	750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18334  TCE("smmulr",	750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18335  TCE("smuad",	700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18336  TCE("smuadx",	700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18337  TCE("smusd",	700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18338  TCE("smusdx",	700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18339  TCE("ssat16",	6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),	   ssat16, t_ssat16),
18340  TCE("umaal",	0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
18341  TCE("usad8",	780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),	   smul,   t_simd),
18342  TCE("usada8",	7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
18343  TCE("usat16",	6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),	   usat16, t_usat16),
18344 
18345 #undef  ARM_VARIANT
18346 #define ARM_VARIANT   & arm_ext_v6k
18347 #undef  THUMB_VARIANT
18348 #define THUMB_VARIANT & arm_ext_v6k
18349 
18350  tCE("yield",	320f001, _yield,    0, (), noargs, t_hint),
18351  tCE("wfe",	320f002, _wfe,      0, (), noargs, t_hint),
18352  tCE("wfi",	320f003, _wfi,      0, (), noargs, t_hint),
18353  tCE("sev",	320f004, _sev,      0, (), noargs, t_hint),
18354 
18355 #undef  THUMB_VARIANT
18356 #define THUMB_VARIANT  & arm_ext_v6_notm
18357  TCE("ldrexd",	1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
18358 				      ldrexd, t_ldrexd),
18359  TCE("strexd",	1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
18360 				       RRnpcb), strexd, t_strexd),
18361 
18362 #undef  THUMB_VARIANT
18363 #define THUMB_VARIANT  & arm_ext_v6t2
18364  TCE("ldrexb",	1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
18365      rd_rn,  rd_rn),
18366  TCE("ldrexh",	1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
18367      rd_rn,  rd_rn),
18368  TCE("strexb",	1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18369      strex, t_strexbh),
18370  TCE("strexh",	1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18371      strex, t_strexbh),
18372  TUF("clrex",	57ff01f, f3bf8f2f, 0, (),			      noargs, noargs),
18373 
18374 #undef  ARM_VARIANT
18375 #define ARM_VARIANT    & arm_ext_sec
18376 #undef THUMB_VARIANT
18377 #define THUMB_VARIANT  & arm_ext_sec
18378 
18379  TCE("smc",	1600070, f7f08000, 1, (EXPi), smc, t_smc),
18380 
18381 #undef	ARM_VARIANT
18382 #define	ARM_VARIANT    & arm_ext_virt
18383 #undef	THUMB_VARIANT
18384 #define	THUMB_VARIANT    & arm_ext_virt
18385 
18386  TCE("hvc",	1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
18387  TCE("eret",	160006e, f3de8f00, 0, (), noargs, noargs),
18388 
18389 #undef  ARM_VARIANT
18390 #define ARM_VARIANT  & arm_ext_v6t2
18391 #undef  THUMB_VARIANT
18392 #define THUMB_VARIANT  & arm_ext_v6t2
18393 
18394  TCE("bfc",	7c0001f, f36f0000, 3, (RRnpc, I31, I32),	   bfc, t_bfc),
18395  TCE("bfi",	7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
18396  TCE("sbfx",	7a00050, f3400000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
18397  TCE("ubfx",	7e00050, f3c00000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
18398 
18399  TCE("mls",	0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18400  TCE("movw",	3000000, f2400000, 2, (RRnpc, HALF),		    mov16, t_mov16),
18401  TCE("movt",	3400000, f2c00000, 2, (RRnpc, HALF),		    mov16, t_mov16),
18402  TCE("rbit",	6ff0f30, fa90f0a0, 2, (RR, RR),			    rd_rm, t_rbit),
18403 
18404  TC3("ldrht",	03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18405  TC3("ldrsht",	03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18406  TC3("ldrsbt",	03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18407  TC3("strht",	02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18408 
18409  /* Thumb-only instructions.  */
18410 #undef ARM_VARIANT
18411 #define ARM_VARIANT NULL
18412   TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
18413   TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
18414 
18415  /* ARM does not really have an IT instruction, so always allow it.
18416     The opcode is copied from Thumb in order to allow warnings in
18417     -mimplicit-it=[never | arm] modes.  */
18418 #undef  ARM_VARIANT
18419 #define ARM_VARIANT  & arm_ext_v1
18420 
18421  TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
18422  TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
18423  TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
18424  TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
18425  TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
18426  TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
18427  TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
18428  TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
18429  TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
18430  TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
18431  TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
18432  TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
18433  TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
18434  TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
18435  TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
18436  /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
18437  TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
18438  TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
18439 
18440  /* Thumb2 only instructions.  */
18441 #undef  ARM_VARIANT
18442 #define ARM_VARIANT  NULL
18443 
18444  TCE("addw",	0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18445  TCE("subw",	0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18446  TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
18447  TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
18448  TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
18449  TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
18450 
18451  /* Hardware division instructions.  */
18452 #undef  ARM_VARIANT
18453 #define ARM_VARIANT    & arm_ext_adiv
18454 #undef  THUMB_VARIANT
18455 #define THUMB_VARIANT  & arm_ext_div
18456 
18457  TCE("sdiv",	710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
18458  TCE("udiv",	730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
18459 
18460  /* ARM V6M/V7 instructions.  */
18461 #undef  ARM_VARIANT
18462 #define ARM_VARIANT    & arm_ext_barrier
18463 #undef  THUMB_VARIANT
18464 #define THUMB_VARIANT  & arm_ext_barrier
18465 
18466  TUF("dmb",	57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier,  t_barrier),
18467  TUF("dsb",	57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier,  t_barrier),
18468  TUF("isb",	57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier,  t_barrier),
18469 
18470  /* ARM V7 instructions.  */
18471 #undef  ARM_VARIANT
18472 #define ARM_VARIANT    & arm_ext_v7
18473 #undef  THUMB_VARIANT
18474 #define THUMB_VARIANT  & arm_ext_v7
18475 
18476  TUF("pli",	450f000, f910f000, 1, (ADDR),	  pli,	    t_pld),
18477  TCE("dbg",	320f0f0, f3af80f0, 1, (I15),	  dbg,	    t_dbg),
18478 
18479 #undef ARM_VARIANT
18480 #define ARM_VARIANT    & arm_ext_mp
18481 #undef THUMB_VARIANT
18482 #define THUMB_VARIANT  & arm_ext_mp
18483 
18484  TUF("pldw",	410f000, f830f000, 1, (ADDR),	pld,	t_pld),
18485 
18486  /* AArchv8 instructions.  */
18487 #undef  ARM_VARIANT
18488 #define ARM_VARIANT   & arm_ext_v8
18489 #undef  THUMB_VARIANT
18490 #define THUMB_VARIANT & arm_ext_v8
18491 
18492  tCE("sevl",	320f005, _sevl,    0, (),		noargs,	t_hint),
18493  TUE("hlt",	1000070, ba80,     1, (oIffffb),	bkpt,	t_hlt),
18494  TCE("ldaex",	1900e9f, e8d00fef, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
18495  TCE("ldaexd",	1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
18496 							ldrexd, t_ldrexd),
18497  TCE("ldaexb",	1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb),	rd_rn,  rd_rn),
18498  TCE("ldaexh",	1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
18499  TCE("stlex",	1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
18500 							stlex,  t_stlex),
18501  TCE("stlexd",	1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
18502 							strexd, t_strexd),
18503  TCE("stlexb",	1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
18504 							stlex, t_stlex),
18505  TCE("stlexh",	1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
18506 							stlex, t_stlex),
18507  TCE("lda",	1900c9f, e8d00faf, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
18508  TCE("ldab",	1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
18509  TCE("ldah",	1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
18510  TCE("stl",	180fc90, e8c00faf, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
18511  TCE("stlb",	1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
18512  TCE("stlh",	1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
18513 
18514  /* ARMv8 T32 only.  */
18515 #undef ARM_VARIANT
18516 #define ARM_VARIANT  NULL
18517  TUF("dcps1",	0,	 f78f8001, 0, (),	noargs, noargs),
18518  TUF("dcps2",	0,	 f78f8002, 0, (),	noargs, noargs),
18519  TUF("dcps3",	0,	 f78f8003, 0, (),	noargs, noargs),
18520 
18521   /* FP for ARMv8.  */
18522 #undef  ARM_VARIANT
18523 #define ARM_VARIANT & fpu_vfp_ext_armv8
18524 #undef  THUMB_VARIANT
18525 #define THUMB_VARIANT & fpu_vfp_ext_armv8
18526 
18527   nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD),		vsel),
18528   nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD),		vsel),
18529   nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD),		vsel),
18530   nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD),		vsel),
18531   nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
18532   nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
18533   nUF(vcvta,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvta),
18534   nUF(vcvtn,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtn),
18535   nUF(vcvtp,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtp),
18536   nUF(vcvtm,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtm),
18537   nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintr),
18538   nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintz),
18539   nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintx),
18540   nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ),		vrinta),
18541   nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintn),
18542   nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintp),
18543   nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintm),
18544 
18545   /* Crypto v1 extensions.  */
18546 #undef  ARM_VARIANT
18547 #define ARM_VARIANT & fpu_crypto_ext_armv8
18548 #undef  THUMB_VARIANT
18549 #define THUMB_VARIANT & fpu_crypto_ext_armv8
18550 
18551   nUF(aese, _aes, 2, (RNQ, RNQ), aese),
18552   nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
18553   nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
18554   nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
18555   nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
18556   nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
18557   nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
18558   nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
18559   nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
18560   nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
18561   nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
18562   nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
18563   nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
18564   nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
18565 
18566 #undef  ARM_VARIANT
18567 #define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
18568 #undef  THUMB_VARIANT
18569 #define THUMB_VARIANT NULL
18570 
18571  cCE("wfs",	e200110, 1, (RR),	     rd),
18572  cCE("rfs",	e300110, 1, (RR),	     rd),
18573  cCE("wfc",	e400110, 1, (RR),	     rd),
18574  cCE("rfc",	e500110, 1, (RR),	     rd),
18575 
18576  cCL("ldfs",	c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
18577  cCL("ldfd",	c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
18578  cCL("ldfe",	c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
18579  cCL("ldfp",	c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
18580 
18581  cCL("stfs",	c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
18582  cCL("stfd",	c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
18583  cCL("stfe",	c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
18584  cCL("stfp",	c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
18585 
18586  cCL("mvfs",	e008100, 2, (RF, RF_IF),     rd_rm),
18587  cCL("mvfsp",	e008120, 2, (RF, RF_IF),     rd_rm),
18588  cCL("mvfsm",	e008140, 2, (RF, RF_IF),     rd_rm),
18589  cCL("mvfsz",	e008160, 2, (RF, RF_IF),     rd_rm),
18590  cCL("mvfd",	e008180, 2, (RF, RF_IF),     rd_rm),
18591  cCL("mvfdp",	e0081a0, 2, (RF, RF_IF),     rd_rm),
18592  cCL("mvfdm",	e0081c0, 2, (RF, RF_IF),     rd_rm),
18593  cCL("mvfdz",	e0081e0, 2, (RF, RF_IF),     rd_rm),
18594  cCL("mvfe",	e088100, 2, (RF, RF_IF),     rd_rm),
18595  cCL("mvfep",	e088120, 2, (RF, RF_IF),     rd_rm),
18596  cCL("mvfem",	e088140, 2, (RF, RF_IF),     rd_rm),
18597  cCL("mvfez",	e088160, 2, (RF, RF_IF),     rd_rm),
18598 
18599  cCL("mnfs",	e108100, 2, (RF, RF_IF),     rd_rm),
18600  cCL("mnfsp",	e108120, 2, (RF, RF_IF),     rd_rm),
18601  cCL("mnfsm",	e108140, 2, (RF, RF_IF),     rd_rm),
18602  cCL("mnfsz",	e108160, 2, (RF, RF_IF),     rd_rm),
18603  cCL("mnfd",	e108180, 2, (RF, RF_IF),     rd_rm),
18604  cCL("mnfdp",	e1081a0, 2, (RF, RF_IF),     rd_rm),
18605  cCL("mnfdm",	e1081c0, 2, (RF, RF_IF),     rd_rm),
18606  cCL("mnfdz",	e1081e0, 2, (RF, RF_IF),     rd_rm),
18607  cCL("mnfe",	e188100, 2, (RF, RF_IF),     rd_rm),
18608  cCL("mnfep",	e188120, 2, (RF, RF_IF),     rd_rm),
18609  cCL("mnfem",	e188140, 2, (RF, RF_IF),     rd_rm),
18610  cCL("mnfez",	e188160, 2, (RF, RF_IF),     rd_rm),
18611 
18612  cCL("abss",	e208100, 2, (RF, RF_IF),     rd_rm),
18613  cCL("abssp",	e208120, 2, (RF, RF_IF),     rd_rm),
18614  cCL("abssm",	e208140, 2, (RF, RF_IF),     rd_rm),
18615  cCL("abssz",	e208160, 2, (RF, RF_IF),     rd_rm),
18616  cCL("absd",	e208180, 2, (RF, RF_IF),     rd_rm),
18617  cCL("absdp",	e2081a0, 2, (RF, RF_IF),     rd_rm),
18618  cCL("absdm",	e2081c0, 2, (RF, RF_IF),     rd_rm),
18619  cCL("absdz",	e2081e0, 2, (RF, RF_IF),     rd_rm),
18620  cCL("abse",	e288100, 2, (RF, RF_IF),     rd_rm),
18621  cCL("absep",	e288120, 2, (RF, RF_IF),     rd_rm),
18622  cCL("absem",	e288140, 2, (RF, RF_IF),     rd_rm),
18623  cCL("absez",	e288160, 2, (RF, RF_IF),     rd_rm),
18624 
18625  cCL("rnds",	e308100, 2, (RF, RF_IF),     rd_rm),
18626  cCL("rndsp",	e308120, 2, (RF, RF_IF),     rd_rm),
18627  cCL("rndsm",	e308140, 2, (RF, RF_IF),     rd_rm),
18628  cCL("rndsz",	e308160, 2, (RF, RF_IF),     rd_rm),
18629  cCL("rndd",	e308180, 2, (RF, RF_IF),     rd_rm),
18630  cCL("rnddp",	e3081a0, 2, (RF, RF_IF),     rd_rm),
18631  cCL("rnddm",	e3081c0, 2, (RF, RF_IF),     rd_rm),
18632  cCL("rnddz",	e3081e0, 2, (RF, RF_IF),     rd_rm),
18633  cCL("rnde",	e388100, 2, (RF, RF_IF),     rd_rm),
18634  cCL("rndep",	e388120, 2, (RF, RF_IF),     rd_rm),
18635  cCL("rndem",	e388140, 2, (RF, RF_IF),     rd_rm),
18636  cCL("rndez",	e388160, 2, (RF, RF_IF),     rd_rm),
18637 
18638  cCL("sqts",	e408100, 2, (RF, RF_IF),     rd_rm),
18639  cCL("sqtsp",	e408120, 2, (RF, RF_IF),     rd_rm),
18640  cCL("sqtsm",	e408140, 2, (RF, RF_IF),     rd_rm),
18641  cCL("sqtsz",	e408160, 2, (RF, RF_IF),     rd_rm),
18642  cCL("sqtd",	e408180, 2, (RF, RF_IF),     rd_rm),
18643  cCL("sqtdp",	e4081a0, 2, (RF, RF_IF),     rd_rm),
18644  cCL("sqtdm",	e4081c0, 2, (RF, RF_IF),     rd_rm),
18645  cCL("sqtdz",	e4081e0, 2, (RF, RF_IF),     rd_rm),
18646  cCL("sqte",	e488100, 2, (RF, RF_IF),     rd_rm),
18647  cCL("sqtep",	e488120, 2, (RF, RF_IF),     rd_rm),
18648  cCL("sqtem",	e488140, 2, (RF, RF_IF),     rd_rm),
18649  cCL("sqtez",	e488160, 2, (RF, RF_IF),     rd_rm),
18650 
18651  cCL("logs",	e508100, 2, (RF, RF_IF),     rd_rm),
18652  cCL("logsp",	e508120, 2, (RF, RF_IF),     rd_rm),
18653  cCL("logsm",	e508140, 2, (RF, RF_IF),     rd_rm),
18654  cCL("logsz",	e508160, 2, (RF, RF_IF),     rd_rm),
18655  cCL("logd",	e508180, 2, (RF, RF_IF),     rd_rm),
18656  cCL("logdp",	e5081a0, 2, (RF, RF_IF),     rd_rm),
18657  cCL("logdm",	e5081c0, 2, (RF, RF_IF),     rd_rm),
18658  cCL("logdz",	e5081e0, 2, (RF, RF_IF),     rd_rm),
18659  cCL("loge",	e588100, 2, (RF, RF_IF),     rd_rm),
18660  cCL("logep",	e588120, 2, (RF, RF_IF),     rd_rm),
18661  cCL("logem",	e588140, 2, (RF, RF_IF),     rd_rm),
18662  cCL("logez",	e588160, 2, (RF, RF_IF),     rd_rm),
18663 
18664  cCL("lgns",	e608100, 2, (RF, RF_IF),     rd_rm),
18665  cCL("lgnsp",	e608120, 2, (RF, RF_IF),     rd_rm),
18666  cCL("lgnsm",	e608140, 2, (RF, RF_IF),     rd_rm),
18667  cCL("lgnsz",	e608160, 2, (RF, RF_IF),     rd_rm),
18668  cCL("lgnd",	e608180, 2, (RF, RF_IF),     rd_rm),
18669  cCL("lgndp",	e6081a0, 2, (RF, RF_IF),     rd_rm),
18670  cCL("lgndm",	e6081c0, 2, (RF, RF_IF),     rd_rm),
18671  cCL("lgndz",	e6081e0, 2, (RF, RF_IF),     rd_rm),
18672  cCL("lgne",	e688100, 2, (RF, RF_IF),     rd_rm),
18673  cCL("lgnep",	e688120, 2, (RF, RF_IF),     rd_rm),
18674  cCL("lgnem",	e688140, 2, (RF, RF_IF),     rd_rm),
18675  cCL("lgnez",	e688160, 2, (RF, RF_IF),     rd_rm),
18676 
18677  cCL("exps",	e708100, 2, (RF, RF_IF),     rd_rm),
18678  cCL("expsp",	e708120, 2, (RF, RF_IF),     rd_rm),
18679  cCL("expsm",	e708140, 2, (RF, RF_IF),     rd_rm),
18680  cCL("expsz",	e708160, 2, (RF, RF_IF),     rd_rm),
18681  cCL("expd",	e708180, 2, (RF, RF_IF),     rd_rm),
18682  cCL("expdp",	e7081a0, 2, (RF, RF_IF),     rd_rm),
18683  cCL("expdm",	e7081c0, 2, (RF, RF_IF),     rd_rm),
18684  cCL("expdz",	e7081e0, 2, (RF, RF_IF),     rd_rm),
18685  cCL("expe",	e788100, 2, (RF, RF_IF),     rd_rm),
18686  cCL("expep",	e788120, 2, (RF, RF_IF),     rd_rm),
18687  cCL("expem",	e788140, 2, (RF, RF_IF),     rd_rm),
18688  cCL("expdz",	e788160, 2, (RF, RF_IF),     rd_rm),
18689 
18690  cCL("sins",	e808100, 2, (RF, RF_IF),     rd_rm),
18691  cCL("sinsp",	e808120, 2, (RF, RF_IF),     rd_rm),
18692  cCL("sinsm",	e808140, 2, (RF, RF_IF),     rd_rm),
18693  cCL("sinsz",	e808160, 2, (RF, RF_IF),     rd_rm),
18694  cCL("sind",	e808180, 2, (RF, RF_IF),     rd_rm),
18695  cCL("sindp",	e8081a0, 2, (RF, RF_IF),     rd_rm),
18696  cCL("sindm",	e8081c0, 2, (RF, RF_IF),     rd_rm),
18697  cCL("sindz",	e8081e0, 2, (RF, RF_IF),     rd_rm),
18698  cCL("sine",	e888100, 2, (RF, RF_IF),     rd_rm),
18699  cCL("sinep",	e888120, 2, (RF, RF_IF),     rd_rm),
18700  cCL("sinem",	e888140, 2, (RF, RF_IF),     rd_rm),
18701  cCL("sinez",	e888160, 2, (RF, RF_IF),     rd_rm),
18702 
18703  cCL("coss",	e908100, 2, (RF, RF_IF),     rd_rm),
18704  cCL("cossp",	e908120, 2, (RF, RF_IF),     rd_rm),
18705  cCL("cossm",	e908140, 2, (RF, RF_IF),     rd_rm),
18706  cCL("cossz",	e908160, 2, (RF, RF_IF),     rd_rm),
18707  cCL("cosd",	e908180, 2, (RF, RF_IF),     rd_rm),
18708  cCL("cosdp",	e9081a0, 2, (RF, RF_IF),     rd_rm),
18709  cCL("cosdm",	e9081c0, 2, (RF, RF_IF),     rd_rm),
18710  cCL("cosdz",	e9081e0, 2, (RF, RF_IF),     rd_rm),
18711  cCL("cose",	e988100, 2, (RF, RF_IF),     rd_rm),
18712  cCL("cosep",	e988120, 2, (RF, RF_IF),     rd_rm),
18713  cCL("cosem",	e988140, 2, (RF, RF_IF),     rd_rm),
18714  cCL("cosez",	e988160, 2, (RF, RF_IF),     rd_rm),
18715 
18716  cCL("tans",	ea08100, 2, (RF, RF_IF),     rd_rm),
18717  cCL("tansp",	ea08120, 2, (RF, RF_IF),     rd_rm),
18718  cCL("tansm",	ea08140, 2, (RF, RF_IF),     rd_rm),
18719  cCL("tansz",	ea08160, 2, (RF, RF_IF),     rd_rm),
18720  cCL("tand",	ea08180, 2, (RF, RF_IF),     rd_rm),
18721  cCL("tandp",	ea081a0, 2, (RF, RF_IF),     rd_rm),
18722  cCL("tandm",	ea081c0, 2, (RF, RF_IF),     rd_rm),
18723  cCL("tandz",	ea081e0, 2, (RF, RF_IF),     rd_rm),
18724  cCL("tane",	ea88100, 2, (RF, RF_IF),     rd_rm),
18725  cCL("tanep",	ea88120, 2, (RF, RF_IF),     rd_rm),
18726  cCL("tanem",	ea88140, 2, (RF, RF_IF),     rd_rm),
18727  cCL("tanez",	ea88160, 2, (RF, RF_IF),     rd_rm),
18728 
18729  cCL("asns",	eb08100, 2, (RF, RF_IF),     rd_rm),
18730  cCL("asnsp",	eb08120, 2, (RF, RF_IF),     rd_rm),
18731  cCL("asnsm",	eb08140, 2, (RF, RF_IF),     rd_rm),
18732  cCL("asnsz",	eb08160, 2, (RF, RF_IF),     rd_rm),
18733  cCL("asnd",	eb08180, 2, (RF, RF_IF),     rd_rm),
18734  cCL("asndp",	eb081a0, 2, (RF, RF_IF),     rd_rm),
18735  cCL("asndm",	eb081c0, 2, (RF, RF_IF),     rd_rm),
18736  cCL("asndz",	eb081e0, 2, (RF, RF_IF),     rd_rm),
18737  cCL("asne",	eb88100, 2, (RF, RF_IF),     rd_rm),
18738  cCL("asnep",	eb88120, 2, (RF, RF_IF),     rd_rm),
18739  cCL("asnem",	eb88140, 2, (RF, RF_IF),     rd_rm),
18740  cCL("asnez",	eb88160, 2, (RF, RF_IF),     rd_rm),
18741 
18742  cCL("acss",	ec08100, 2, (RF, RF_IF),     rd_rm),
18743  cCL("acssp",	ec08120, 2, (RF, RF_IF),     rd_rm),
18744  cCL("acssm",	ec08140, 2, (RF, RF_IF),     rd_rm),
18745  cCL("acssz",	ec08160, 2, (RF, RF_IF),     rd_rm),
18746  cCL("acsd",	ec08180, 2, (RF, RF_IF),     rd_rm),
18747  cCL("acsdp",	ec081a0, 2, (RF, RF_IF),     rd_rm),
18748  cCL("acsdm",	ec081c0, 2, (RF, RF_IF),     rd_rm),
18749  cCL("acsdz",	ec081e0, 2, (RF, RF_IF),     rd_rm),
18750  cCL("acse",	ec88100, 2, (RF, RF_IF),     rd_rm),
18751  cCL("acsep",	ec88120, 2, (RF, RF_IF),     rd_rm),
18752  cCL("acsem",	ec88140, 2, (RF, RF_IF),     rd_rm),
18753  cCL("acsez",	ec88160, 2, (RF, RF_IF),     rd_rm),
18754 
18755  cCL("atns",	ed08100, 2, (RF, RF_IF),     rd_rm),
18756  cCL("atnsp",	ed08120, 2, (RF, RF_IF),     rd_rm),
18757  cCL("atnsm",	ed08140, 2, (RF, RF_IF),     rd_rm),
18758  cCL("atnsz",	ed08160, 2, (RF, RF_IF),     rd_rm),
18759  cCL("atnd",	ed08180, 2, (RF, RF_IF),     rd_rm),
18760  cCL("atndp",	ed081a0, 2, (RF, RF_IF),     rd_rm),
18761  cCL("atndm",	ed081c0, 2, (RF, RF_IF),     rd_rm),
18762  cCL("atndz",	ed081e0, 2, (RF, RF_IF),     rd_rm),
18763  cCL("atne",	ed88100, 2, (RF, RF_IF),     rd_rm),
18764  cCL("atnep",	ed88120, 2, (RF, RF_IF),     rd_rm),
18765  cCL("atnem",	ed88140, 2, (RF, RF_IF),     rd_rm),
18766  cCL("atnez",	ed88160, 2, (RF, RF_IF),     rd_rm),
18767 
18768  cCL("urds",	ee08100, 2, (RF, RF_IF),     rd_rm),
18769  cCL("urdsp",	ee08120, 2, (RF, RF_IF),     rd_rm),
18770  cCL("urdsm",	ee08140, 2, (RF, RF_IF),     rd_rm),
18771  cCL("urdsz",	ee08160, 2, (RF, RF_IF),     rd_rm),
18772  cCL("urdd",	ee08180, 2, (RF, RF_IF),     rd_rm),
18773  cCL("urddp",	ee081a0, 2, (RF, RF_IF),     rd_rm),
18774  cCL("urddm",	ee081c0, 2, (RF, RF_IF),     rd_rm),
18775  cCL("urddz",	ee081e0, 2, (RF, RF_IF),     rd_rm),
18776  cCL("urde",	ee88100, 2, (RF, RF_IF),     rd_rm),
18777  cCL("urdep",	ee88120, 2, (RF, RF_IF),     rd_rm),
18778  cCL("urdem",	ee88140, 2, (RF, RF_IF),     rd_rm),
18779  cCL("urdez",	ee88160, 2, (RF, RF_IF),     rd_rm),
18780 
18781  cCL("nrms",	ef08100, 2, (RF, RF_IF),     rd_rm),
18782  cCL("nrmsp",	ef08120, 2, (RF, RF_IF),     rd_rm),
18783  cCL("nrmsm",	ef08140, 2, (RF, RF_IF),     rd_rm),
18784  cCL("nrmsz",	ef08160, 2, (RF, RF_IF),     rd_rm),
18785  cCL("nrmd",	ef08180, 2, (RF, RF_IF),     rd_rm),
18786  cCL("nrmdp",	ef081a0, 2, (RF, RF_IF),     rd_rm),
18787  cCL("nrmdm",	ef081c0, 2, (RF, RF_IF),     rd_rm),
18788  cCL("nrmdz",	ef081e0, 2, (RF, RF_IF),     rd_rm),
18789  cCL("nrme",	ef88100, 2, (RF, RF_IF),     rd_rm),
18790  cCL("nrmep",	ef88120, 2, (RF, RF_IF),     rd_rm),
18791  cCL("nrmem",	ef88140, 2, (RF, RF_IF),     rd_rm),
18792  cCL("nrmez",	ef88160, 2, (RF, RF_IF),     rd_rm),
18793 
18794  cCL("adfs",	e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
18795  cCL("adfsp",	e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
18796  cCL("adfsm",	e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
18797  cCL("adfsz",	e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
18798  cCL("adfd",	e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
18799  cCL("adfdp",	e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18800  cCL("adfdm",	e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18801  cCL("adfdz",	e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18802  cCL("adfe",	e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
18803  cCL("adfep",	e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
18804  cCL("adfem",	e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
18805  cCL("adfez",	e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
18806 
18807  cCL("sufs",	e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
18808  cCL("sufsp",	e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
18809  cCL("sufsm",	e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
18810  cCL("sufsz",	e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
18811  cCL("sufd",	e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
18812  cCL("sufdp",	e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18813  cCL("sufdm",	e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18814  cCL("sufdz",	e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18815  cCL("sufe",	e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
18816  cCL("sufep",	e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
18817  cCL("sufem",	e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
18818  cCL("sufez",	e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
18819 
18820  cCL("rsfs",	e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
18821  cCL("rsfsp",	e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
18822  cCL("rsfsm",	e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
18823  cCL("rsfsz",	e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
18824  cCL("rsfd",	e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
18825  cCL("rsfdp",	e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18826  cCL("rsfdm",	e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18827  cCL("rsfdz",	e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18828  cCL("rsfe",	e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
18829  cCL("rsfep",	e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
18830  cCL("rsfem",	e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
18831  cCL("rsfez",	e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
18832 
18833  cCL("mufs",	e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
18834  cCL("mufsp",	e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
18835  cCL("mufsm",	e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
18836  cCL("mufsz",	e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
18837  cCL("mufd",	e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
18838  cCL("mufdp",	e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18839  cCL("mufdm",	e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18840  cCL("mufdz",	e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18841  cCL("mufe",	e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
18842  cCL("mufep",	e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
18843  cCL("mufem",	e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
18844  cCL("mufez",	e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
18845 
18846  cCL("dvfs",	e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
18847  cCL("dvfsp",	e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
18848  cCL("dvfsm",	e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
18849  cCL("dvfsz",	e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
18850  cCL("dvfd",	e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
18851  cCL("dvfdp",	e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18852  cCL("dvfdm",	e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18853  cCL("dvfdz",	e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18854  cCL("dvfe",	e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
18855  cCL("dvfep",	e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
18856  cCL("dvfem",	e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
18857  cCL("dvfez",	e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
18858 
18859  cCL("rdfs",	e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
18860  cCL("rdfsp",	e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
18861  cCL("rdfsm",	e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
18862  cCL("rdfsz",	e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
18863  cCL("rdfd",	e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
18864  cCL("rdfdp",	e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18865  cCL("rdfdm",	e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18866  cCL("rdfdz",	e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18867  cCL("rdfe",	e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
18868  cCL("rdfep",	e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
18869  cCL("rdfem",	e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
18870  cCL("rdfez",	e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
18871 
18872  cCL("pows",	e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
18873  cCL("powsp",	e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
18874  cCL("powsm",	e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
18875  cCL("powsz",	e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
18876  cCL("powd",	e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
18877  cCL("powdp",	e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18878  cCL("powdm",	e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18879  cCL("powdz",	e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18880  cCL("powe",	e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
18881  cCL("powep",	e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
18882  cCL("powem",	e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
18883  cCL("powez",	e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
18884 
18885  cCL("rpws",	e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
18886  cCL("rpwsp",	e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
18887  cCL("rpwsm",	e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
18888  cCL("rpwsz",	e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
18889  cCL("rpwd",	e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
18890  cCL("rpwdp",	e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18891  cCL("rpwdm",	e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18892  cCL("rpwdz",	e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18893  cCL("rpwe",	e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
18894  cCL("rpwep",	e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
18895  cCL("rpwem",	e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
18896  cCL("rpwez",	e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
18897 
18898  cCL("rmfs",	e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
18899  cCL("rmfsp",	e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
18900  cCL("rmfsm",	e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
18901  cCL("rmfsz",	e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
18902  cCL("rmfd",	e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
18903  cCL("rmfdp",	e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18904  cCL("rmfdm",	e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18905  cCL("rmfdz",	e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18906  cCL("rmfe",	e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
18907  cCL("rmfep",	e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
18908  cCL("rmfem",	e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
18909  cCL("rmfez",	e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
18910 
18911  cCL("fmls",	e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
18912  cCL("fmlsp",	e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
18913  cCL("fmlsm",	e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
18914  cCL("fmlsz",	e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
18915  cCL("fmld",	e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
18916  cCL("fmldp",	e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18917  cCL("fmldm",	e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18918  cCL("fmldz",	e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18919  cCL("fmle",	e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
18920  cCL("fmlep",	e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
18921  cCL("fmlem",	e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
18922  cCL("fmlez",	e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
18923 
18924  cCL("fdvs",	ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18925  cCL("fdvsp",	ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18926  cCL("fdvsm",	ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18927  cCL("fdvsz",	ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18928  cCL("fdvd",	ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18929  cCL("fdvdp",	ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18930  cCL("fdvdm",	ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18931  cCL("fdvdz",	ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18932  cCL("fdve",	ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18933  cCL("fdvep",	ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18934  cCL("fdvem",	ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18935  cCL("fdvez",	ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18936 
18937  cCL("frds",	eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18938  cCL("frdsp",	eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18939  cCL("frdsm",	eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18940  cCL("frdsz",	eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18941  cCL("frdd",	eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18942  cCL("frddp",	eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18943  cCL("frddm",	eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18944  cCL("frddz",	eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18945  cCL("frde",	eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18946  cCL("frdep",	eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18947  cCL("frdem",	eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18948  cCL("frdez",	eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18949 
18950  cCL("pols",	ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18951  cCL("polsp",	ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18952  cCL("polsm",	ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18953  cCL("polsz",	ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18954  cCL("pold",	ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18955  cCL("poldp",	ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18956  cCL("poldm",	ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18957  cCL("poldz",	ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18958  cCL("pole",	ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18959  cCL("polep",	ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18960  cCL("polem",	ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18961  cCL("polez",	ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18962 
18963  cCE("cmf",	e90f110, 2, (RF, RF_IF),     fpa_cmp),
18964  C3E("cmfe",	ed0f110, 2, (RF, RF_IF),     fpa_cmp),
18965  cCE("cnf",	eb0f110, 2, (RF, RF_IF),     fpa_cmp),
18966  C3E("cnfe",	ef0f110, 2, (RF, RF_IF),     fpa_cmp),
18967 
18968  cCL("flts",	e000110, 2, (RF, RR),	     rn_rd),
18969  cCL("fltsp",	e000130, 2, (RF, RR),	     rn_rd),
18970  cCL("fltsm",	e000150, 2, (RF, RR),	     rn_rd),
18971  cCL("fltsz",	e000170, 2, (RF, RR),	     rn_rd),
18972  cCL("fltd",	e000190, 2, (RF, RR),	     rn_rd),
18973  cCL("fltdp",	e0001b0, 2, (RF, RR),	     rn_rd),
18974  cCL("fltdm",	e0001d0, 2, (RF, RR),	     rn_rd),
18975  cCL("fltdz",	e0001f0, 2, (RF, RR),	     rn_rd),
18976  cCL("flte",	e080110, 2, (RF, RR),	     rn_rd),
18977  cCL("fltep",	e080130, 2, (RF, RR),	     rn_rd),
18978  cCL("fltem",	e080150, 2, (RF, RR),	     rn_rd),
18979  cCL("fltez",	e080170, 2, (RF, RR),	     rn_rd),
18980 
18981   /* The implementation of the FIX instruction is broken on some
18982      assemblers, in that it accepts a precision specifier as well as a
18983      rounding specifier, despite the fact that this is meaningless.
18984      To be more compatible, we accept it as well, though of course it
18985      does not set any bits.  */
18986  cCE("fix",	e100110, 2, (RR, RF),	     rd_rm),
18987  cCL("fixp",	e100130, 2, (RR, RF),	     rd_rm),
18988  cCL("fixm",	e100150, 2, (RR, RF),	     rd_rm),
18989  cCL("fixz",	e100170, 2, (RR, RF),	     rd_rm),
18990  cCL("fixsp",	e100130, 2, (RR, RF),	     rd_rm),
18991  cCL("fixsm",	e100150, 2, (RR, RF),	     rd_rm),
18992  cCL("fixsz",	e100170, 2, (RR, RF),	     rd_rm),
18993  cCL("fixdp",	e100130, 2, (RR, RF),	     rd_rm),
18994  cCL("fixdm",	e100150, 2, (RR, RF),	     rd_rm),
18995  cCL("fixdz",	e100170, 2, (RR, RF),	     rd_rm),
18996  cCL("fixep",	e100130, 2, (RR, RF),	     rd_rm),
18997  cCL("fixem",	e100150, 2, (RR, RF),	     rd_rm),
18998  cCL("fixez",	e100170, 2, (RR, RF),	     rd_rm),
18999 
19000   /* Instructions that were new with the real FPA, call them V2.  */
19001 #undef  ARM_VARIANT
19002 #define ARM_VARIANT  & fpu_fpa_ext_v2
19003 
19004  cCE("lfm",	c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19005  cCL("lfmfd",	c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19006  cCL("lfmea",	d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19007  cCE("sfm",	c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19008  cCL("sfmfd",	d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19009  cCL("sfmea",	c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19010 
19011 #undef  ARM_VARIANT
19012 #define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
19013 
19014   /* Moves and type conversions.  */
19015  cCE("fcpys",	eb00a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19016  cCE("fmrs",	e100a10, 2, (RR, RVS),	      vfp_reg_from_sp),
19017  cCE("fmsr",	e000a10, 2, (RVS, RR),	      vfp_sp_from_reg),
19018  cCE("fmstat",	ef1fa10, 0, (),		      noargs),
19019  cCE("vmrs",	ef00a10, 2, (APSR_RR, RVC),   vmrs),
19020  cCE("vmsr",	ee00a10, 2, (RVC, RR),        vmsr),
19021  cCE("fsitos",	eb80ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19022  cCE("fuitos",	eb80a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19023  cCE("ftosis",	ebd0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19024  cCE("ftosizs",	ebd0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19025  cCE("ftouis",	ebc0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19026  cCE("ftouizs",	ebc0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19027  cCE("fmrx",	ef00a10, 2, (RR, RVC),	      rd_rn),
19028  cCE("fmxr",	ee00a10, 2, (RVC, RR),	      rn_rd),
19029 
19030   /* Memory operations.	 */
19031  cCE("flds",	d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
19032  cCE("fsts",	d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
19033  cCE("fldmias",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
19034  cCE("fldmfds",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
19035  cCE("fldmdbs",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
19036  cCE("fldmeas",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
19037  cCE("fldmiax",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
19038  cCE("fldmfdx",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
19039  cCE("fldmdbx",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
19040  cCE("fldmeax",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
19041  cCE("fstmias",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
19042  cCE("fstmeas",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
19043  cCE("fstmdbs",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
19044  cCE("fstmfds",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
19045  cCE("fstmiax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
19046  cCE("fstmeax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
19047  cCE("fstmdbx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
19048  cCE("fstmfdx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
19049 
19050   /* Monadic operations.  */
19051  cCE("fabss",	eb00ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19052  cCE("fnegs",	eb10a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19053  cCE("fsqrts",	eb10ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19054 
19055   /* Dyadic operations.	 */
19056  cCE("fadds",	e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19057  cCE("fsubs",	e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19058  cCE("fmuls",	e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19059  cCE("fdivs",	e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19060  cCE("fmacs",	e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19061  cCE("fmscs",	e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19062  cCE("fnmuls",	e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19063  cCE("fnmacs",	e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19064  cCE("fnmscs",	e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19065 
19066   /* Comparisons.  */
19067  cCE("fcmps",	eb40a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19068  cCE("fcmpzs",	eb50a40, 1, (RVS),	      vfp_sp_compare_z),
19069  cCE("fcmpes",	eb40ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19070  cCE("fcmpezs",	eb50ac0, 1, (RVS),	      vfp_sp_compare_z),
19071 
19072  /* Double precision load/store are still present on single precision
19073     implementations.  */
19074  cCE("fldd",	d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
19075  cCE("fstd",	d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
19076  cCE("fldmiad",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
19077  cCE("fldmfdd",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
19078  cCE("fldmdbd",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
19079  cCE("fldmead",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
19080  cCE("fstmiad",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
19081  cCE("fstmead",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
19082  cCE("fstmdbd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
19083  cCE("fstmfdd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
19084 
19085 #undef  ARM_VARIANT
19086 #define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
19087 
19088   /* Moves and type conversions.  */
19089  cCE("fcpyd",	eb00b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19090  cCE("fcvtds",	eb70ac0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
19091  cCE("fcvtsd",	eb70bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19092  cCE("fmdhr",	e200b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
19093  cCE("fmdlr",	e000b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
19094  cCE("fmrdh",	e300b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
19095  cCE("fmrdl",	e100b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
19096  cCE("fsitod",	eb80bc0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
19097  cCE("fuitod",	eb80b40, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
19098  cCE("ftosid",	ebd0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19099  cCE("ftosizd",	ebd0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19100  cCE("ftouid",	ebc0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19101  cCE("ftouizd",	ebc0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19102 
19103   /* Monadic operations.  */
19104  cCE("fabsd",	eb00bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19105  cCE("fnegd",	eb10b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19106  cCE("fsqrtd",	eb10bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19107 
19108   /* Dyadic operations.	 */
19109  cCE("faddd",	e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19110  cCE("fsubd",	e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19111  cCE("fmuld",	e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19112  cCE("fdivd",	e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19113  cCE("fmacd",	e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19114  cCE("fmscd",	e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19115  cCE("fnmuld",	e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19116  cCE("fnmacd",	e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19117  cCE("fnmscd",	e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19118 
19119   /* Comparisons.  */
19120  cCE("fcmpd",	eb40b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19121  cCE("fcmpzd",	eb50b40, 1, (RVD),	      vfp_dp_rd),
19122  cCE("fcmped",	eb40bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19123  cCE("fcmpezd",	eb50bc0, 1, (RVD),	      vfp_dp_rd),
19124 
19125 #undef  ARM_VARIANT
19126 #define ARM_VARIANT  & fpu_vfp_ext_v2
19127 
19128  cCE("fmsrr",	c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19129  cCE("fmrrs",	c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19130  cCE("fmdrr",	c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
19131  cCE("fmrrd",	c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
19132 
19133 /* Instructions which may belong to either the Neon or VFP instruction sets.
19134    Individual encoder functions perform additional architecture checks.  */
19135 #undef  ARM_VARIANT
19136 #define ARM_VARIANT    & fpu_vfp_ext_v1xd
19137 #undef  THUMB_VARIANT
19138 #define THUMB_VARIANT  & fpu_vfp_ext_v1xd
19139 
19140   /* These mnemonics are unique to VFP.  */
19141  NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
19142  NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19143  nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19144  nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19145  nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19146  nCE(vcmp,      _vcmp,    2, (RVSD, RVSD_I0),    vfp_nsyn_cmp),
19147  nCE(vcmpe,     _vcmpe,   2, (RVSD, RVSD_I0),    vfp_nsyn_cmp),
19148  NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
19149  NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
19150  NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
19151 
19152   /* Mnemonics shared by Neon and VFP.  */
19153  nCEF(vmul,     _vmul,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19154  nCEF(vmla,     _vmla,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19155  nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19156 
19157  nCEF(vadd,     _vadd,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19158  nCEF(vsub,     _vsub,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19159 
19160  NCEF(vabs,     1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19161  NCEF(vneg,     1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19162 
19163  NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19164  NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19165  NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19166  NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19167  NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19168  NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19169  NCE(vldr,      d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19170  NCE(vstr,      d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19171 
19172  nCEF(vcvt,     _vcvt,   3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19173  nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
19174  NCEF(vcvtb,	eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19175  NCEF(vcvtt,	eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19176 
19177 
19178   /* NOTE: All VMOV encoding is special-cased!  */
19179  NCE(vmov,      0,       1, (VMOV), neon_mov),
19180  NCE(vmovq,     0,       1, (VMOV), neon_mov),
19181 
19182 #undef  THUMB_VARIANT
19183 #define THUMB_VARIANT  & fpu_neon_ext_v1
19184 #undef  ARM_VARIANT
19185 #define ARM_VARIANT    & fpu_neon_ext_v1
19186 
19187   /* Data processing with three registers of the same length.  */
19188   /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
19189  NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
19190  NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
19191  NUF(vhadd,     0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19192  NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
19193  NUF(vrhadd,    0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19194  NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
19195  NUF(vhsub,     0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19196  NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
19197   /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
19198  NUF(vqadd,     0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19199  NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
19200  NUF(vqsub,     0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19201  NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
19202  NUF(vrshl,     0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19203  NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
19204  NUF(vqrshl,    0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19205  NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
19206   /* If not immediate, fall back to neon_dyadic_i64_su.
19207      shl_imm should accept I8 I16 I32 I64,
19208      qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
19209  nUF(vshl,      _vshl,    3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19210  nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl_imm),
19211  nUF(vqshl,     _vqshl,   3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19212  nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl_imm),
19213   /* Logic ops, types optional & ignored.  */
19214  nUF(vand,      _vand,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19215  nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
19216  nUF(vbic,      _vbic,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19217  nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
19218  nUF(vorr,      _vorr,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19219  nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
19220  nUF(vorn,      _vorn,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19221  nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
19222  nUF(veor,      _veor,    3, (RNDQ, oRNDQ, RNDQ),      neon_logic),
19223  nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
19224   /* Bitfield ops, untyped.  */
19225  NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19226  NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
19227  NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19228  NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
19229  NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19230  NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
19231   /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32.  */
19232  nUF(vabd,      _vabd,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19233  nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
19234  nUF(vmax,      _vmax,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19235  nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
19236  nUF(vmin,      _vmin,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19237  nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
19238   /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19239      back to neon_dyadic_if_su.  */
19240  nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19241  nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
19242  nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19243  nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
19244  nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19245  nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
19246  nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19247  nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
19248   /* Comparison. Type I8 I16 I32 F32.  */
19249  nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19250  nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
19251   /* As above, D registers only.  */
19252  nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
19253  nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
19254   /* Int and float variants, signedness unimportant.  */
19255  nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
19256  nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
19257  nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
19258   /* Add/sub take types I8 I16 I32 I64 F32.  */
19259  nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
19260  nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
19261   /* vtst takes sizes 8, 16, 32.  */
19262  NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19263  NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
19264   /* VMUL takes I8 I16 I32 F32 P8.  */
19265  nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
19266   /* VQD{R}MULH takes S16 S32.  */
19267  nUF(vqdmulh,   _vqdmulh,  3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19268  nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
19269  nUF(vqrdmulh,  _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19270  nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
19271  NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19272  NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
19273  NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19274  NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
19275  NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19276  NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
19277  NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19278  NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
19279  NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
19280  NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
19281  NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
19282  NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
19283 
19284   /* Two address, int/float. Types S8 S16 S32 F32.  */
19285  NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
19286  NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
19287 
19288   /* Data processing with two registers and a shift amount.  */
19289   /* Right shifts, and variants with rounding.
19290      Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
19291  NUF(vshr,      0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19292  NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
19293  NUF(vrshr,     0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19294  NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
19295  NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
19296  NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
19297  NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
19298  NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
19299   /* Shift and insert. Sizes accepted 8 16 32 64.  */
19300  NUF(vsli,      1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19301  NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
19302  NUF(vsri,      1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19303  NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
19304   /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
19305  NUF(vqshlu,    1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19306  NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
19307   /* Right shift immediate, saturating & narrowing, with rounding variants.
19308      Types accepted S16 S32 S64 U16 U32 U64.  */
19309  NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19310  NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19311   /* As above, unsigned. Types accepted S16 S32 S64.  */
19312  NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19313  NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19314   /* Right shift narrowing. Types accepted I16 I32 I64.  */
19315  NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19316  NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19317   /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
19318  nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
19319   /* CVT with optional immediate for fixed-point variant.  */
19320  nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
19321 
19322  nUF(vmvn,      _vmvn,    2, (RNDQ, RNDQ_Ibig), neon_mvn),
19323  nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
19324 
19325   /* Data processing, three registers of different lengths.  */
19326   /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
19327  NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
19328  NUF(vabdl,     0800700, 3, (RNQ, RND, RND),  neon_dyadic_long),
19329  NUF(vaddl,     0800000, 3, (RNQ, RND, RND),  neon_dyadic_long),
19330  NUF(vsubl,     0800200, 3, (RNQ, RND, RND),  neon_dyadic_long),
19331   /* If not scalar, fall back to neon_dyadic_long.
19332      Vector types as above, scalar types S16 S32 U16 U32.  */
19333  nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19334  nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19335   /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
19336  NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19337  NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19338   /* Dyadic, narrowing insns. Types I16 I32 I64.  */
19339  NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
19340  NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
19341  NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
19342  NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
19343   /* Saturating doubling multiplies. Types S16 S32.  */
19344  nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19345  nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19346  nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19347   /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19348      S16 S32 U16 U32.  */
19349  nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
19350 
19351   /* Extract. Size 8.  */
19352  NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
19353  NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
19354 
19355   /* Two registers, miscellaneous.  */
19356   /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
19357  NUF(vrev64,    1b00000, 2, (RNDQ, RNDQ),     neon_rev),
19358  NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
19359  NUF(vrev32,    1b00080, 2, (RNDQ, RNDQ),     neon_rev),
19360  NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
19361  NUF(vrev16,    1b00100, 2, (RNDQ, RNDQ),     neon_rev),
19362  NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
19363   /* Vector replicate. Sizes 8 16 32.  */
19364  nCE(vdup,      _vdup,    2, (RNDQ, RR_RNSC),  neon_dup),
19365  nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
19366   /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
19367  NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
19368   /* VMOVN. Types I16 I32 I64.  */
19369  nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
19370   /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
19371  nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
19372   /* VQMOVUN. Types S16 S32 S64.  */
19373  nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
19374   /* VZIP / VUZP. Sizes 8 16 32.  */
19375  NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
19376  NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
19377  NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
19378  NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
19379   /* VQABS / VQNEG. Types S8 S16 S32.  */
19380  NUF(vqabs,     1b00700, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
19381  NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
19382  NUF(vqneg,     1b00780, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
19383  NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
19384   /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
19385  NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
19386  NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
19387  NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
19388  NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
19389   /* Reciprocal estimates. Types U32 F32.  */
19390  NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
19391  NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
19392  NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
19393  NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
19394   /* VCLS. Types S8 S16 S32.  */
19395  NUF(vcls,      1b00400, 2, (RNDQ, RNDQ),     neon_cls),
19396  NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
19397   /* VCLZ. Types I8 I16 I32.  */
19398  NUF(vclz,      1b00480, 2, (RNDQ, RNDQ),     neon_clz),
19399  NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
19400   /* VCNT. Size 8.  */
19401  NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
19402  NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
19403   /* Two address, untyped.  */
19404  NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
19405  NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
19406   /* VTRN. Sizes 8 16 32.  */
19407  nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
19408  nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
19409 
19410   /* Table lookup. Size 8.  */
19411  NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19412  NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19413 
19414 #undef  THUMB_VARIANT
19415 #define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
19416 #undef  ARM_VARIANT
19417 #define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
19418 
19419   /* Neon element/structure load/store.  */
19420  nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
19421  nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
19422  nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
19423  nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
19424  nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
19425  nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
19426  nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
19427  nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
19428 
19429 #undef  THUMB_VARIANT
19430 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
19431 #undef ARM_VARIANT
19432 #define ARM_VARIANT &fpu_vfp_ext_v3xd
19433  cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
19434  cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
19435  cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
19436  cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
19437  cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
19438  cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
19439  cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
19440  cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
19441  cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
19442 
19443 #undef THUMB_VARIANT
19444 #define THUMB_VARIANT  & fpu_vfp_ext_v3
19445 #undef  ARM_VARIANT
19446 #define ARM_VARIANT    & fpu_vfp_ext_v3
19447 
19448  cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
19449  cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
19450  cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
19451  cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
19452  cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
19453  cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
19454  cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
19455  cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
19456  cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
19457 
19458 #undef ARM_VARIANT
19459 #define ARM_VARIANT &fpu_vfp_ext_fma
19460 #undef THUMB_VARIANT
19461 #define THUMB_VARIANT &fpu_vfp_ext_fma
19462  /* Mnemonics shared by Neon and VFP.  These are included in the
19463     VFP FMA variant; NEON and VFP FMA always includes the NEON
19464     FMA instructions.  */
19465  nCEF(vfma,     _vfma,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19466  nCEF(vfms,     _vfms,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19467  /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19468     the v form should always be used.  */
19469  cCE("ffmas",	ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19470  cCE("ffnmas",	ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19471  cCE("ffmad",	ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19472  cCE("ffnmad",	ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19473  nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19474  nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19475 
19476 #undef THUMB_VARIANT
19477 #undef  ARM_VARIANT
19478 #define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
19479 
19480  cCE("mia",	e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19481  cCE("miaph",	e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19482  cCE("miabb",	e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19483  cCE("miabt",	e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19484  cCE("miatb",	e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19485  cCE("miatt",	e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19486  cCE("mar",	c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
19487  cCE("mra",	c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
19488 
19489 #undef  ARM_VARIANT
19490 #define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
19491 
19492  cCE("tandcb",	e13f130, 1, (RR),		    iwmmxt_tandorc),
19493  cCE("tandch",	e53f130, 1, (RR),		    iwmmxt_tandorc),
19494  cCE("tandcw",	e93f130, 1, (RR),		    iwmmxt_tandorc),
19495  cCE("tbcstb",	e400010, 2, (RIWR, RR),		    rn_rd),
19496  cCE("tbcsth",	e400050, 2, (RIWR, RR),		    rn_rd),
19497  cCE("tbcstw",	e400090, 2, (RIWR, RR),		    rn_rd),
19498  cCE("textrcb",	e130170, 2, (RR, I7),		    iwmmxt_textrc),
19499  cCE("textrch",	e530170, 2, (RR, I7),		    iwmmxt_textrc),
19500  cCE("textrcw",	e930170, 2, (RR, I7),		    iwmmxt_textrc),
19501  cCE("textrmub",	e100070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
19502  cCE("textrmuh",	e500070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
19503  cCE("textrmuw",	e900070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
19504  cCE("textrmsb",	e100078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
19505  cCE("textrmsh",	e500078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
19506  cCE("textrmsw",	e900078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
19507  cCE("tinsrb",	e600010, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
19508  cCE("tinsrh",	e600050, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
19509  cCE("tinsrw",	e600090, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
19510  cCE("tmcr",	e000110, 2, (RIWC_RIWG, RR),	    rn_rd),
19511  cCE("tmcrr",	c400000, 3, (RIWR, RR, RR),	    rm_rd_rn),
19512  cCE("tmia",	e200010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
19513  cCE("tmiaph",	e280010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
19514  cCE("tmiabb",	e2c0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
19515  cCE("tmiabt",	e2d0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
19516  cCE("tmiatb",	e2e0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
19517  cCE("tmiatt",	e2f0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
19518  cCE("tmovmskb",	e100030, 2, (RR, RIWR),		    rd_rn),
19519  cCE("tmovmskh",	e500030, 2, (RR, RIWR),		    rd_rn),
19520  cCE("tmovmskw",	e900030, 2, (RR, RIWR),		    rd_rn),
19521  cCE("tmrc",	e100110, 2, (RR, RIWC_RIWG),	    rd_rn),
19522  cCE("tmrrc",	c500000, 3, (RR, RR, RIWR),	    rd_rn_rm),
19523  cCE("torcb",	e13f150, 1, (RR),		    iwmmxt_tandorc),
19524  cCE("torch",	e53f150, 1, (RR),		    iwmmxt_tandorc),
19525  cCE("torcw",	e93f150, 1, (RR),		    iwmmxt_tandorc),
19526  cCE("waccb",	e0001c0, 2, (RIWR, RIWR),	    rd_rn),
19527  cCE("wacch",	e4001c0, 2, (RIWR, RIWR),	    rd_rn),
19528  cCE("waccw",	e8001c0, 2, (RIWR, RIWR),	    rd_rn),
19529  cCE("waddbss",	e300180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19530  cCE("waddb",	e000180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19531  cCE("waddbus",	e100180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19532  cCE("waddhss",	e700180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19533  cCE("waddh",	e400180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19534  cCE("waddhus",	e500180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19535  cCE("waddwss",	eb00180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19536  cCE("waddw",	e800180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19537  cCE("waddwus",	e900180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19538  cCE("waligni",	e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
19539  cCE("walignr0",	e800020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19540  cCE("walignr1",	e900020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19541  cCE("walignr2",	ea00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19542  cCE("walignr3",	eb00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19543  cCE("wand",	e200000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19544  cCE("wandn",	e300000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19545  cCE("wavg2b",	e800000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19546  cCE("wavg2br",	e900000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19547  cCE("wavg2h",	ec00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19548  cCE("wavg2hr",	ed00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19549  cCE("wcmpeqb",	e000060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19550  cCE("wcmpeqh",	e400060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19551  cCE("wcmpeqw",	e800060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19552  cCE("wcmpgtub",	e100060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19553  cCE("wcmpgtuh",	e500060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19554  cCE("wcmpgtuw",	e900060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19555  cCE("wcmpgtsb",	e300060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19556  cCE("wcmpgtsh",	e700060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19557  cCE("wcmpgtsw",	eb00060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19558  cCE("wldrb",	c100000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
19559  cCE("wldrh",	c500000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
19560  cCE("wldrw",	c100100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
19561  cCE("wldrd",	c500100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
19562  cCE("wmacs",	e600100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19563  cCE("wmacsz",	e700100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19564  cCE("wmacu",	e400100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19565  cCE("wmacuz",	e500100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19566  cCE("wmadds",	ea00100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19567  cCE("wmaddu",	e800100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19568  cCE("wmaxsb",	e200160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19569  cCE("wmaxsh",	e600160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19570  cCE("wmaxsw",	ea00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19571  cCE("wmaxub",	e000160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19572  cCE("wmaxuh",	e400160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19573  cCE("wmaxuw",	e800160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19574  cCE("wminsb",	e300160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19575  cCE("wminsh",	e700160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19576  cCE("wminsw",	eb00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19577  cCE("wminub",	e100160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19578  cCE("wminuh",	e500160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19579  cCE("wminuw",	e900160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19580  cCE("wmov",	e000000, 2, (RIWR, RIWR),	    iwmmxt_wmov),
19581  cCE("wmulsm",	e300100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19582  cCE("wmulsl",	e200100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19583  cCE("wmulum",	e100100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19584  cCE("wmulul",	e000100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19585  cCE("wor",	e000000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19586  cCE("wpackhss",	e700080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19587  cCE("wpackhus",	e500080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19588  cCE("wpackwss",	eb00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19589  cCE("wpackwus",	e900080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19590  cCE("wpackdss",	ef00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19591  cCE("wpackdus",	ed00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19592  cCE("wrorh",	e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19593  cCE("wrorhg",	e700148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19594  cCE("wrorw",	eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19595  cCE("wrorwg",	eb00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19596  cCE("wrord",	ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19597  cCE("wrordg",	ef00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19598  cCE("wsadb",	e000120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19599  cCE("wsadbz",	e100120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19600  cCE("wsadh",	e400120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19601  cCE("wsadhz",	e500120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19602  cCE("wshufh",	e0001e0, 3, (RIWR, RIWR, I255),	    iwmmxt_wshufh),
19603  cCE("wsllh",	e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19604  cCE("wsllhg",	e500148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19605  cCE("wsllw",	e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19606  cCE("wsllwg",	e900148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19607  cCE("wslld",	ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19608  cCE("wslldg",	ed00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19609  cCE("wsrah",	e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19610  cCE("wsrahg",	e400148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19611  cCE("wsraw",	e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19612  cCE("wsrawg",	e800148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19613  cCE("wsrad",	ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19614  cCE("wsradg",	ec00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19615  cCE("wsrlh",	e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19616  cCE("wsrlhg",	e600148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19617  cCE("wsrlw",	ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19618  cCE("wsrlwg",	ea00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19619  cCE("wsrld",	ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19620  cCE("wsrldg",	ee00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
19621  cCE("wstrb",	c000000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
19622  cCE("wstrh",	c400000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
19623  cCE("wstrw",	c000100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
19624  cCE("wstrd",	c400100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
19625  cCE("wsubbss",	e3001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19626  cCE("wsubb",	e0001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19627  cCE("wsubbus",	e1001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19628  cCE("wsubhss",	e7001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19629  cCE("wsubh",	e4001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19630  cCE("wsubhus",	e5001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19631  cCE("wsubwss",	eb001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19632  cCE("wsubw",	e8001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19633  cCE("wsubwus",	e9001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19634  cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),	    rd_rn),
19635  cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),	    rd_rn),
19636  cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),	    rd_rn),
19637  cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),	    rd_rn),
19638  cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),	    rd_rn),
19639  cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),	    rd_rn),
19640  cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19641  cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19642  cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19643  cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),	    rd_rn),
19644  cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),	    rd_rn),
19645  cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),	    rd_rn),
19646  cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),	    rd_rn),
19647  cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),	    rd_rn),
19648  cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),	    rd_rn),
19649  cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19650  cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19651  cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19652  cCE("wxor",	e100000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
19653  cCE("wzero",	e300000, 1, (RIWR),		    iwmmxt_wzero),
19654 
19655 #undef  ARM_VARIANT
19656 #define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
19657 
19658  cCE("torvscb",   e12f190, 1, (RR),		    iwmmxt_tandorc),
19659  cCE("torvsch",   e52f190, 1, (RR),		    iwmmxt_tandorc),
19660  cCE("torvscw",   e92f190, 1, (RR),		    iwmmxt_tandorc),
19661  cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
19662  cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
19663  cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
19664  cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19665  cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19666  cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19667  cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19668  cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19669  cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19670  cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19671  cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19672  cCE("wavg4",	e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19673  cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19674  cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19675  cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19676  cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19677  cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19678  cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
19679  cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19680  cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19681  cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19682  cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19683  cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19684  cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19685  cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19686  cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19687  cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19688  cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19689  cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19690  cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19691  cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19692  cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19693  cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19694  cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19695  cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19696  cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19697  cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19698  cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19699  cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19700  cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19701  cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19702  cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19703  cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19704  cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19705  cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19706  cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19707  cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19708  cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19709  cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19710  cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19711  cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19712  cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19713  cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19714  cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
19715 
19716 #undef  ARM_VARIANT
19717 #define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
19718 
19719  cCE("cfldrs",	c100400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
19720  cCE("cfldrd",	c500400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
19721  cCE("cfldr32",	c100500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
19722  cCE("cfldr64",	c500500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
19723  cCE("cfstrs",	c000400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
19724  cCE("cfstrd",	c400400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
19725  cCE("cfstr32",	c000500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
19726  cCE("cfstr64",	c400500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
19727  cCE("cfmvsr",	e000450, 2, (RMF, RR),		      rn_rd),
19728  cCE("cfmvrs",	e100450, 2, (RR, RMF),		      rd_rn),
19729  cCE("cfmvdlr",	e000410, 2, (RMD, RR),		      rn_rd),
19730  cCE("cfmvrdl",	e100410, 2, (RR, RMD),		      rd_rn),
19731  cCE("cfmvdhr",	e000430, 2, (RMD, RR),		      rn_rd),
19732  cCE("cfmvrdh",	e100430, 2, (RR, RMD),		      rd_rn),
19733  cCE("cfmv64lr",	e000510, 2, (RMDX, RR),		      rn_rd),
19734  cCE("cfmvr64l",	e100510, 2, (RR, RMDX),		      rd_rn),
19735  cCE("cfmv64hr",	e000530, 2, (RMDX, RR),		      rn_rd),
19736  cCE("cfmvr64h",	e100530, 2, (RR, RMDX),		      rd_rn),
19737  cCE("cfmval32",	e200440, 2, (RMAX, RMFX),	      rd_rn),
19738  cCE("cfmv32al",	e100440, 2, (RMFX, RMAX),	      rd_rn),
19739  cCE("cfmvam32",	e200460, 2, (RMAX, RMFX),	      rd_rn),
19740  cCE("cfmv32am",	e100460, 2, (RMFX, RMAX),	      rd_rn),
19741  cCE("cfmvah32",	e200480, 2, (RMAX, RMFX),	      rd_rn),
19742  cCE("cfmv32ah",	e100480, 2, (RMFX, RMAX),	      rd_rn),
19743  cCE("cfmva32",	e2004a0, 2, (RMAX, RMFX),	      rd_rn),
19744  cCE("cfmv32a",	e1004a0, 2, (RMFX, RMAX),	      rd_rn),
19745  cCE("cfmva64",	e2004c0, 2, (RMAX, RMDX),	      rd_rn),
19746  cCE("cfmv64a",	e1004c0, 2, (RMDX, RMAX),	      rd_rn),
19747  cCE("cfmvsc32",	e2004e0, 2, (RMDS, RMDX),	      mav_dspsc),
19748  cCE("cfmv32sc",	e1004e0, 2, (RMDX, RMDS),	      rd),
19749  cCE("cfcpys",	e000400, 2, (RMF, RMF),		      rd_rn),
19750  cCE("cfcpyd",	e000420, 2, (RMD, RMD),		      rd_rn),
19751  cCE("cfcvtsd",	e000460, 2, (RMD, RMF),		      rd_rn),
19752  cCE("cfcvtds",	e000440, 2, (RMF, RMD),		      rd_rn),
19753  cCE("cfcvt32s",	e000480, 2, (RMF, RMFX),	      rd_rn),
19754  cCE("cfcvt32d",	e0004a0, 2, (RMD, RMFX),	      rd_rn),
19755  cCE("cfcvt64s",	e0004c0, 2, (RMF, RMDX),	      rd_rn),
19756  cCE("cfcvt64d",	e0004e0, 2, (RMD, RMDX),	      rd_rn),
19757  cCE("cfcvts32",	e100580, 2, (RMFX, RMF),	      rd_rn),
19758  cCE("cfcvtd32",	e1005a0, 2, (RMFX, RMD),	      rd_rn),
19759  cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),	      rd_rn),
19760  cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),	      rd_rn),
19761  cCE("cfrshl32",	e000550, 3, (RMFX, RMFX, RR),	      mav_triple),
19762  cCE("cfrshl64",	e000570, 3, (RMDX, RMDX, RR),	      mav_triple),
19763  cCE("cfsh32",	e000500, 3, (RMFX, RMFX, I63s),	      mav_shift),
19764  cCE("cfsh64",	e200500, 3, (RMDX, RMDX, I63s),	      mav_shift),
19765  cCE("cfcmps",	e100490, 3, (RR, RMF, RMF),	      rd_rn_rm),
19766  cCE("cfcmpd",	e1004b0, 3, (RR, RMD, RMD),	      rd_rn_rm),
19767  cCE("cfcmp32",	e100590, 3, (RR, RMFX, RMFX),	      rd_rn_rm),
19768  cCE("cfcmp64",	e1005b0, 3, (RR, RMDX, RMDX),	      rd_rn_rm),
19769  cCE("cfabss",	e300400, 2, (RMF, RMF),		      rd_rn),
19770  cCE("cfabsd",	e300420, 2, (RMD, RMD),		      rd_rn),
19771  cCE("cfnegs",	e300440, 2, (RMF, RMF),		      rd_rn),
19772  cCE("cfnegd",	e300460, 2, (RMD, RMD),		      rd_rn),
19773  cCE("cfadds",	e300480, 3, (RMF, RMF, RMF),	      rd_rn_rm),
19774  cCE("cfaddd",	e3004a0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
19775  cCE("cfsubs",	e3004c0, 3, (RMF, RMF, RMF),	      rd_rn_rm),
19776  cCE("cfsubd",	e3004e0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
19777  cCE("cfmuls",	e100400, 3, (RMF, RMF, RMF),	      rd_rn_rm),
19778  cCE("cfmuld",	e100420, 3, (RMD, RMD, RMD),	      rd_rn_rm),
19779  cCE("cfabs32",	e300500, 2, (RMFX, RMFX),	      rd_rn),
19780  cCE("cfabs64",	e300520, 2, (RMDX, RMDX),	      rd_rn),
19781  cCE("cfneg32",	e300540, 2, (RMFX, RMFX),	      rd_rn),
19782  cCE("cfneg64",	e300560, 2, (RMDX, RMDX),	      rd_rn),
19783  cCE("cfadd32",	e300580, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
19784  cCE("cfadd64",	e3005a0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
19785  cCE("cfsub32",	e3005c0, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
19786  cCE("cfsub64",	e3005e0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
19787  cCE("cfmul32",	e100500, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
19788  cCE("cfmul64",	e100520, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
19789  cCE("cfmac32",	e100540, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
19790  cCE("cfmsc32",	e100560, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
19791  cCE("cfmadd32",	e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
19792  cCE("cfmsub32",	e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
19793  cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
19794  cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
19795 };
19796 #undef ARM_VARIANT
19797 #undef THUMB_VARIANT
19798 #undef TCE
19799 #undef TCM
19800 #undef TUE
19801 #undef TUF
19802 #undef TCC
19803 #undef cCE
19804 #undef cCL
19805 #undef C3E
19806 #undef CE
19807 #undef CM
19808 #undef UE
19809 #undef UF
19810 #undef UT
19811 #undef NUF
19812 #undef nUF
19813 #undef NCE
19814 #undef nCE
19815 #undef OPS0
19816 #undef OPS1
19817 #undef OPS2
19818 #undef OPS3
19819 #undef OPS4
19820 #undef OPS5
19821 #undef OPS6
19822 #undef do_0
19823 
19824 /* MD interface: bits in the object file.  */
19825 
19826 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
19827    for use in the a.out file, and stores them in the array pointed to by buf.
19828    This knows about the endian-ness of the target machine and does
19829    THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
19830    2 (short) and 4 (long)  Floating numbers are put out as a series of
19831    LITTLENUMS (shorts, here at least).	*/
19832 
19833 void
19834 md_number_to_chars (char * buf, valueT val, int n)
19835 {
19836   if (target_big_endian)
19837     number_to_chars_bigendian (buf, val, n);
19838   else
19839     number_to_chars_littleendian (buf, val, n);
19840 }
19841 
19842 static valueT
19843 md_chars_to_number (char * buf, int n)
19844 {
19845   valueT result = 0;
19846   unsigned char * where = (unsigned char *) buf;
19847 
19848   if (target_big_endian)
19849     {
19850       while (n--)
19851 	{
19852 	  result <<= 8;
19853 	  result |= (*where++ & 255);
19854 	}
19855     }
19856   else
19857     {
19858       while (n--)
19859 	{
19860 	  result <<= 8;
19861 	  result |= (where[n] & 255);
19862 	}
19863     }
19864 
19865   return result;
19866 }
19867 
19868 /* MD interface: Sections.  */
19869 
19870 /* Calculate the maximum variable size (i.e., excluding fr_fix)
19871    that an rs_machine_dependent frag may reach.  */
19872 
19873 unsigned int
19874 arm_frag_max_var (fragS *fragp)
19875 {
19876   /* We only use rs_machine_dependent for variable-size Thumb instructions,
19877      which are either THUMB_SIZE (2) or INSN_SIZE (4).
19878 
19879      Note that we generate relaxable instructions even for cases that don't
19880      really need it, like an immediate that's a trivial constant.  So we're
19881      overestimating the instruction size for some of those cases.  Rather
19882      than putting more intelligence here, it would probably be better to
19883      avoid generating a relaxation frag in the first place when it can be
19884      determined up front that a short instruction will suffice.  */
19885 
19886   gas_assert (fragp->fr_type == rs_machine_dependent);
19887   return INSN_SIZE;
19888 }
19889 
19890 /* Estimate the size of a frag before relaxing.  Assume everything fits in
19891    2 bytes.  */
19892 
19893 int
19894 md_estimate_size_before_relax (fragS * fragp,
19895 			       segT    segtype ATTRIBUTE_UNUSED)
19896 {
19897   fragp->fr_var = 2;
19898   return 2;
19899 }
19900 
19901 /* Convert a machine dependent frag.  */
19902 
19903 void
19904 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
19905 {
19906   unsigned long insn;
19907   unsigned long old_op;
19908   char *buf;
19909   expressionS exp;
19910   fixS *fixp;
19911   int reloc_type;
19912   int pc_rel;
19913   int opcode;
19914 
19915   buf = fragp->fr_literal + fragp->fr_fix;
19916 
19917   old_op = bfd_get_16(abfd, buf);
19918   if (fragp->fr_symbol)
19919     {
19920       exp.X_op = O_symbol;
19921       exp.X_add_symbol = fragp->fr_symbol;
19922     }
19923   else
19924     {
19925       exp.X_op = O_constant;
19926     }
19927   exp.X_add_number = fragp->fr_offset;
19928   opcode = fragp->fr_subtype;
19929   switch (opcode)
19930     {
19931     case T_MNEM_ldr_pc:
19932     case T_MNEM_ldr_pc2:
19933     case T_MNEM_ldr_sp:
19934     case T_MNEM_str_sp:
19935     case T_MNEM_ldr:
19936     case T_MNEM_ldrb:
19937     case T_MNEM_ldrh:
19938     case T_MNEM_str:
19939     case T_MNEM_strb:
19940     case T_MNEM_strh:
19941       if (fragp->fr_var == 4)
19942 	{
19943 	  insn = THUMB_OP32 (opcode);
19944 	  if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
19945 	    {
19946 	      insn |= (old_op & 0x700) << 4;
19947 	    }
19948 	  else
19949 	    {
19950 	      insn |= (old_op & 7) << 12;
19951 	      insn |= (old_op & 0x38) << 13;
19952 	    }
19953 	  insn |= 0x00000c00;
19954 	  put_thumb32_insn (buf, insn);
19955 	  reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
19956 	}
19957       else
19958 	{
19959 	  reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
19960 	}
19961       pc_rel = (opcode == T_MNEM_ldr_pc2);
19962       break;
19963     case T_MNEM_adr:
19964       if (fragp->fr_var == 4)
19965 	{
19966 	  insn = THUMB_OP32 (opcode);
19967 	  insn |= (old_op & 0xf0) << 4;
19968 	  put_thumb32_insn (buf, insn);
19969 	  reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
19970 	}
19971       else
19972 	{
19973 	  reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19974 	  exp.X_add_number -= 4;
19975 	}
19976       pc_rel = 1;
19977       break;
19978     case T_MNEM_mov:
19979     case T_MNEM_movs:
19980     case T_MNEM_cmp:
19981     case T_MNEM_cmn:
19982       if (fragp->fr_var == 4)
19983 	{
19984 	  int r0off = (opcode == T_MNEM_mov
19985 		       || opcode == T_MNEM_movs) ? 0 : 8;
19986 	  insn = THUMB_OP32 (opcode);
19987 	  insn = (insn & 0xe1ffffff) | 0x10000000;
19988 	  insn |= (old_op & 0x700) << r0off;
19989 	  put_thumb32_insn (buf, insn);
19990 	  reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19991 	}
19992       else
19993 	{
19994 	  reloc_type = BFD_RELOC_ARM_THUMB_IMM;
19995 	}
19996       pc_rel = 0;
19997       break;
19998     case T_MNEM_b:
19999       if (fragp->fr_var == 4)
20000 	{
20001 	  insn = THUMB_OP32(opcode);
20002 	  put_thumb32_insn (buf, insn);
20003 	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20004 	}
20005       else
20006 	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20007       pc_rel = 1;
20008       break;
20009     case T_MNEM_bcond:
20010       if (fragp->fr_var == 4)
20011 	{
20012 	  insn = THUMB_OP32(opcode);
20013 	  insn |= (old_op & 0xf00) << 14;
20014 	  put_thumb32_insn (buf, insn);
20015 	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20016 	}
20017       else
20018 	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20019       pc_rel = 1;
20020       break;
20021     case T_MNEM_add_sp:
20022     case T_MNEM_add_pc:
20023     case T_MNEM_inc_sp:
20024     case T_MNEM_dec_sp:
20025       if (fragp->fr_var == 4)
20026 	{
20027 	  /* ??? Choose between add and addw.  */
20028 	  insn = THUMB_OP32 (opcode);
20029 	  insn |= (old_op & 0xf0) << 4;
20030 	  put_thumb32_insn (buf, insn);
20031 	  if (opcode == T_MNEM_add_pc)
20032 	    reloc_type = BFD_RELOC_ARM_T32_IMM12;
20033 	  else
20034 	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20035 	}
20036       else
20037 	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20038       pc_rel = 0;
20039       break;
20040 
20041     case T_MNEM_addi:
20042     case T_MNEM_addis:
20043     case T_MNEM_subi:
20044     case T_MNEM_subis:
20045       if (fragp->fr_var == 4)
20046 	{
20047 	  insn = THUMB_OP32 (opcode);
20048 	  insn |= (old_op & 0xf0) << 4;
20049 	  insn |= (old_op & 0xf) << 16;
20050 	  put_thumb32_insn (buf, insn);
20051 	  if (insn & (1 << 20))
20052 	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20053 	  else
20054 	    reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20055 	}
20056       else
20057 	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20058       pc_rel = 0;
20059       break;
20060     default:
20061       abort ();
20062     }
20063   fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20064 		      (enum bfd_reloc_code_real) reloc_type);
20065   fixp->fx_file = fragp->fr_file;
20066   fixp->fx_line = fragp->fr_line;
20067   fragp->fr_fix += fragp->fr_var;
20068 }
20069 
20070 /* Return the size of a relaxable immediate operand instruction.
20071    SHIFT and SIZE specify the form of the allowable immediate.  */
20072 static int
20073 relax_immediate (fragS *fragp, int size, int shift)
20074 {
20075   offsetT offset;
20076   offsetT mask;
20077   offsetT low;
20078 
20079   /* ??? Should be able to do better than this.  */
20080   if (fragp->fr_symbol)
20081     return 4;
20082 
20083   low = (1 << shift) - 1;
20084   mask = (1 << (shift + size)) - (1 << shift);
20085   offset = fragp->fr_offset;
20086   /* Force misaligned offsets to 32-bit variant.  */
20087   if (offset & low)
20088     return 4;
20089   if (offset & ~mask)
20090     return 4;
20091   return 2;
20092 }
20093 
20094 /* Get the address of a symbol during relaxation.  */
20095 static addressT
20096 relaxed_symbol_addr (fragS *fragp, long stretch)
20097 {
20098   fragS *sym_frag;
20099   addressT addr;
20100   symbolS *sym;
20101 
20102   sym = fragp->fr_symbol;
20103   sym_frag = symbol_get_frag (sym);
20104   know (S_GET_SEGMENT (sym) != absolute_section
20105 	|| sym_frag == &zero_address_frag);
20106   addr = S_GET_VALUE (sym) + fragp->fr_offset;
20107 
20108   /* If frag has yet to be reached on this pass, assume it will
20109      move by STRETCH just as we did.  If this is not so, it will
20110      be because some frag between grows, and that will force
20111      another pass.  */
20112 
20113   if (stretch != 0
20114       && sym_frag->relax_marker != fragp->relax_marker)
20115     {
20116       fragS *f;
20117 
20118       /* Adjust stretch for any alignment frag.  Note that if have
20119 	 been expanding the earlier code, the symbol may be
20120 	 defined in what appears to be an earlier frag.  FIXME:
20121 	 This doesn't handle the fr_subtype field, which specifies
20122 	 a maximum number of bytes to skip when doing an
20123 	 alignment.  */
20124       for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20125 	{
20126 	  if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20127 	    {
20128 	      if (stretch < 0)
20129 		stretch = - ((- stretch)
20130 			     & ~ ((1 << (int) f->fr_offset) - 1));
20131 	      else
20132 		stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20133 	      if (stretch == 0)
20134 		break;
20135 	    }
20136 	}
20137       if (f != NULL)
20138 	addr += stretch;
20139     }
20140 
20141   return addr;
20142 }
20143 
20144 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20145    load.  */
20146 static int
20147 relax_adr (fragS *fragp, asection *sec, long stretch)
20148 {
20149   addressT addr;
20150   offsetT val;
20151 
20152   /* Assume worst case for symbols not known to be in the same section.  */
20153   if (fragp->fr_symbol == NULL
20154       || !S_IS_DEFINED (fragp->fr_symbol)
20155       || sec != S_GET_SEGMENT (fragp->fr_symbol)
20156       || S_IS_WEAK (fragp->fr_symbol))
20157     return 4;
20158 
20159   val = relaxed_symbol_addr (fragp, stretch);
20160   addr = fragp->fr_address + fragp->fr_fix;
20161   addr = (addr + 4) & ~3;
20162   /* Force misaligned targets to 32-bit variant.  */
20163   if (val & 3)
20164     return 4;
20165   val -= addr;
20166   if (val < 0 || val > 1020)
20167     return 4;
20168   return 2;
20169 }
20170 
20171 /* Return the size of a relaxable add/sub immediate instruction.  */
20172 static int
20173 relax_addsub (fragS *fragp, asection *sec)
20174 {
20175   char *buf;
20176   int op;
20177 
20178   buf = fragp->fr_literal + fragp->fr_fix;
20179   op = bfd_get_16(sec->owner, buf);
20180   if ((op & 0xf) == ((op >> 4) & 0xf))
20181     return relax_immediate (fragp, 8, 0);
20182   else
20183     return relax_immediate (fragp, 3, 0);
20184 }
20185 
20186 
20187 /* Return the size of a relaxable branch instruction.  BITS is the
20188    size of the offset field in the narrow instruction.  */
20189 
20190 static int
20191 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20192 {
20193   addressT addr;
20194   offsetT val;
20195   offsetT limit;
20196 
20197   /* Assume worst case for symbols not known to be in the same section.  */
20198   if (!S_IS_DEFINED (fragp->fr_symbol)
20199       || sec != S_GET_SEGMENT (fragp->fr_symbol)
20200       || S_IS_WEAK (fragp->fr_symbol))
20201     return 4;
20202 
20203 #ifdef OBJ_ELF
20204   if (S_IS_DEFINED (fragp->fr_symbol)
20205       && ARM_IS_FUNC (fragp->fr_symbol))
20206       return 4;
20207 
20208   /* PR 12532.  Global symbols with default visibility might
20209      be preempted, so do not relax relocations to them.  */
20210   if ((ELF_ST_VISIBILITY (S_GET_OTHER (fragp->fr_symbol)) == STV_DEFAULT)
20211       && (! S_IS_LOCAL (fragp->fr_symbol)))
20212     return 4;
20213 #endif
20214 
20215   val = relaxed_symbol_addr (fragp, stretch);
20216   addr = fragp->fr_address + fragp->fr_fix + 4;
20217   val -= addr;
20218 
20219   /* Offset is a signed value *2 */
20220   limit = 1 << bits;
20221   if (val >= limit || val < -limit)
20222     return 4;
20223   return 2;
20224 }
20225 
20226 
20227 /* Relax a machine dependent frag.  This returns the amount by which
20228    the current size of the frag should change.  */
20229 
20230 int
20231 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20232 {
20233   int oldsize;
20234   int newsize;
20235 
20236   oldsize = fragp->fr_var;
20237   switch (fragp->fr_subtype)
20238     {
20239     case T_MNEM_ldr_pc2:
20240       newsize = relax_adr (fragp, sec, stretch);
20241       break;
20242     case T_MNEM_ldr_pc:
20243     case T_MNEM_ldr_sp:
20244     case T_MNEM_str_sp:
20245       newsize = relax_immediate (fragp, 8, 2);
20246       break;
20247     case T_MNEM_ldr:
20248     case T_MNEM_str:
20249       newsize = relax_immediate (fragp, 5, 2);
20250       break;
20251     case T_MNEM_ldrh:
20252     case T_MNEM_strh:
20253       newsize = relax_immediate (fragp, 5, 1);
20254       break;
20255     case T_MNEM_ldrb:
20256     case T_MNEM_strb:
20257       newsize = relax_immediate (fragp, 5, 0);
20258       break;
20259     case T_MNEM_adr:
20260       newsize = relax_adr (fragp, sec, stretch);
20261       break;
20262     case T_MNEM_mov:
20263     case T_MNEM_movs:
20264     case T_MNEM_cmp:
20265     case T_MNEM_cmn:
20266       newsize = relax_immediate (fragp, 8, 0);
20267       break;
20268     case T_MNEM_b:
20269       newsize = relax_branch (fragp, sec, 11, stretch);
20270       break;
20271     case T_MNEM_bcond:
20272       newsize = relax_branch (fragp, sec, 8, stretch);
20273       break;
20274     case T_MNEM_add_sp:
20275     case T_MNEM_add_pc:
20276       newsize = relax_immediate (fragp, 8, 2);
20277       break;
20278     case T_MNEM_inc_sp:
20279     case T_MNEM_dec_sp:
20280       newsize = relax_immediate (fragp, 7, 2);
20281       break;
20282     case T_MNEM_addi:
20283     case T_MNEM_addis:
20284     case T_MNEM_subi:
20285     case T_MNEM_subis:
20286       newsize = relax_addsub (fragp, sec);
20287       break;
20288     default:
20289       abort ();
20290     }
20291 
20292   fragp->fr_var = newsize;
20293   /* Freeze wide instructions that are at or before the same location as
20294      in the previous pass.  This avoids infinite loops.
20295      Don't freeze them unconditionally because targets may be artificially
20296      misaligned by the expansion of preceding frags.  */
20297   if (stretch <= 0 && newsize > 2)
20298     {
20299       md_convert_frag (sec->owner, sec, fragp);
20300       frag_wane (fragp);
20301     }
20302 
20303   return newsize - oldsize;
20304 }
20305 
20306 /* Round up a section size to the appropriate boundary.	 */
20307 
20308 valueT
20309 md_section_align (segT	 segment ATTRIBUTE_UNUSED,
20310 		  valueT size)
20311 {
20312 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20313   if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
20314     {
20315       /* For a.out, force the section size to be aligned.  If we don't do
20316 	 this, BFD will align it for us, but it will not write out the
20317 	 final bytes of the section.  This may be a bug in BFD, but it is
20318 	 easier to fix it here since that is how the other a.out targets
20319 	 work.  */
20320       int align;
20321 
20322       align = bfd_get_section_alignment (stdoutput, segment);
20323       size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
20324     }
20325 #endif
20326 
20327   return size;
20328 }
20329 
20330 /* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
20331    of an rs_align_code fragment.  */
20332 
20333 void
20334 arm_handle_align (fragS * fragP)
20335 {
20336   static char const arm_noop[2][2][4] =
20337     {
20338       {  /* ARMv1 */
20339 	{0x00, 0x00, 0xa0, 0xe1},  /* LE */
20340 	{0xe1, 0xa0, 0x00, 0x00},  /* BE */
20341       },
20342       {  /* ARMv6k */
20343 	{0x00, 0xf0, 0x20, 0xe3},  /* LE */
20344 	{0xe3, 0x20, 0xf0, 0x00},  /* BE */
20345       },
20346     };
20347   static char const thumb_noop[2][2][2] =
20348     {
20349       {  /* Thumb-1 */
20350 	{0xc0, 0x46},  /* LE */
20351 	{0x46, 0xc0},  /* BE */
20352       },
20353       {  /* Thumb-2 */
20354 	{0x00, 0xbf},  /* LE */
20355 	{0xbf, 0x00}   /* BE */
20356       }
20357     };
20358   static char const wide_thumb_noop[2][4] =
20359     {  /* Wide Thumb-2 */
20360       {0xaf, 0xf3, 0x00, 0x80},  /* LE */
20361       {0xf3, 0xaf, 0x80, 0x00},  /* BE */
20362     };
20363 
20364   unsigned bytes, fix, noop_size;
20365   char * p;
20366   const char * noop;
20367   const char *narrow_noop = NULL;
20368 #ifdef OBJ_ELF
20369   enum mstate state;
20370 #endif
20371 
20372   if (fragP->fr_type != rs_align_code)
20373     return;
20374 
20375   bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
20376   p = fragP->fr_literal + fragP->fr_fix;
20377   fix = 0;
20378 
20379   if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
20380     bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
20381 
20382   gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
20383 
20384   if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
20385     {
20386       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
20387 	{
20388 	  narrow_noop = thumb_noop[1][target_big_endian];
20389 	  noop = wide_thumb_noop[target_big_endian];
20390 	}
20391       else
20392 	noop = thumb_noop[0][target_big_endian];
20393       noop_size = 2;
20394 #ifdef OBJ_ELF
20395       state = MAP_THUMB;
20396 #endif
20397     }
20398   else
20399     {
20400       noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
20401 		     [target_big_endian];
20402       noop_size = 4;
20403 #ifdef OBJ_ELF
20404       state = MAP_ARM;
20405 #endif
20406     }
20407 
20408   fragP->fr_var = noop_size;
20409 
20410   if (bytes & (noop_size - 1))
20411     {
20412       fix = bytes & (noop_size - 1);
20413 #ifdef OBJ_ELF
20414       insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
20415 #endif
20416       memset (p, 0, fix);
20417       p += fix;
20418       bytes -= fix;
20419     }
20420 
20421   if (narrow_noop)
20422     {
20423       if (bytes & noop_size)
20424 	{
20425 	  /* Insert a narrow noop.  */
20426 	  memcpy (p, narrow_noop, noop_size);
20427 	  p += noop_size;
20428 	  bytes -= noop_size;
20429 	  fix += noop_size;
20430 	}
20431 
20432       /* Use wide noops for the remainder */
20433       noop_size = 4;
20434     }
20435 
20436   while (bytes >= noop_size)
20437     {
20438       memcpy (p, noop, noop_size);
20439       p += noop_size;
20440       bytes -= noop_size;
20441       fix += noop_size;
20442     }
20443 
20444   fragP->fr_fix += fix;
20445 }
20446 
20447 /* Called from md_do_align.  Used to create an alignment
20448    frag in a code section.  */
20449 
20450 void
20451 arm_frag_align_code (int n, int max)
20452 {
20453   char * p;
20454 
20455   /* We assume that there will never be a requirement
20456      to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
20457   if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
20458     {
20459       char err_msg[128];
20460 
20461       sprintf (err_msg,
20462         _("alignments greater than %d bytes not supported in .text sections."),
20463         MAX_MEM_FOR_RS_ALIGN_CODE + 1);
20464       as_fatal ("%s", err_msg);
20465     }
20466 
20467   p = frag_var (rs_align_code,
20468 		MAX_MEM_FOR_RS_ALIGN_CODE,
20469 		1,
20470 		(relax_substateT) max,
20471 		(symbolS *) NULL,
20472 		(offsetT) n,
20473 		(char *) NULL);
20474   *p = 0;
20475 }
20476 
20477 /* Perform target specific initialisation of a frag.
20478    Note - despite the name this initialisation is not done when the frag
20479    is created, but only when its type is assigned.  A frag can be created
20480    and used a long time before its type is set, so beware of assuming that
20481    this initialisationis performed first.  */
20482 
20483 #ifndef OBJ_ELF
20484 void
20485 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
20486 {
20487   /* Record whether this frag is in an ARM or a THUMB area.  */
20488   fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20489 }
20490 
20491 #else /* OBJ_ELF is defined.  */
20492 void
20493 arm_init_frag (fragS * fragP, int max_chars)
20494 {
20495   /* If the current ARM vs THUMB mode has not already
20496      been recorded into this frag then do so now.  */
20497   if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
20498     {
20499       fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20500 
20501       /* Record a mapping symbol for alignment frags.  We will delete this
20502 	 later if the alignment ends up empty.  */
20503       switch (fragP->fr_type)
20504 	{
20505 	  case rs_align:
20506 	  case rs_align_test:
20507 	  case rs_fill:
20508 	    mapping_state_2 (MAP_DATA, max_chars);
20509 	    break;
20510 	  case rs_align_code:
20511 	    mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
20512 	    break;
20513 	  default:
20514 	    break;
20515 	}
20516     }
20517 }
20518 
20519 /* When we change sections we need to issue a new mapping symbol.  */
20520 
20521 void
20522 arm_elf_change_section (void)
20523 {
20524   /* Link an unlinked unwind index table section to the .text section.	*/
20525   if (elf_section_type (now_seg) == SHT_ARM_EXIDX
20526       && elf_linked_to_section (now_seg) == NULL)
20527     elf_linked_to_section (now_seg) = text_section;
20528 }
20529 
20530 int
20531 arm_elf_section_type (const char * str, size_t len)
20532 {
20533   if (len == 5 && strncmp (str, "exidx", 5) == 0)
20534     return SHT_ARM_EXIDX;
20535 
20536   return -1;
20537 }
20538 
20539 /* Code to deal with unwinding tables.	*/
20540 
20541 static void add_unwind_adjustsp (offsetT);
20542 
20543 /* Generate any deferred unwind frame offset.  */
20544 
20545 static void
20546 flush_pending_unwind (void)
20547 {
20548   offsetT offset;
20549 
20550   offset = unwind.pending_offset;
20551   unwind.pending_offset = 0;
20552   if (offset != 0)
20553     add_unwind_adjustsp (offset);
20554 }
20555 
20556 /* Add an opcode to this list for this function.  Two-byte opcodes should
20557    be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
20558    order.  */
20559 
20560 static void
20561 add_unwind_opcode (valueT op, int length)
20562 {
20563   /* Add any deferred stack adjustment.	 */
20564   if (unwind.pending_offset)
20565     flush_pending_unwind ();
20566 
20567   unwind.sp_restored = 0;
20568 
20569   if (unwind.opcode_count + length > unwind.opcode_alloc)
20570     {
20571       unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
20572       if (unwind.opcodes)
20573 	unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
20574                                                      unwind.opcode_alloc);
20575       else
20576 	unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
20577     }
20578   while (length > 0)
20579     {
20580       length--;
20581       unwind.opcodes[unwind.opcode_count] = op & 0xff;
20582       op >>= 8;
20583       unwind.opcode_count++;
20584     }
20585 }
20586 
20587 /* Add unwind opcodes to adjust the stack pointer.  */
20588 
20589 static void
20590 add_unwind_adjustsp (offsetT offset)
20591 {
20592   valueT op;
20593 
20594   if (offset > 0x200)
20595     {
20596       /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
20597       char bytes[5];
20598       int n;
20599       valueT o;
20600 
20601       /* Long form: 0xb2, uleb128.  */
20602       /* This might not fit in a word so add the individual bytes,
20603 	 remembering the list is built in reverse order.  */
20604       o = (valueT) ((offset - 0x204) >> 2);
20605       if (o == 0)
20606 	add_unwind_opcode (0, 1);
20607 
20608       /* Calculate the uleb128 encoding of the offset.	*/
20609       n = 0;
20610       while (o)
20611 	{
20612 	  bytes[n] = o & 0x7f;
20613 	  o >>= 7;
20614 	  if (o)
20615 	    bytes[n] |= 0x80;
20616 	  n++;
20617 	}
20618       /* Add the insn.	*/
20619       for (; n; n--)
20620 	add_unwind_opcode (bytes[n - 1], 1);
20621       add_unwind_opcode (0xb2, 1);
20622     }
20623   else if (offset > 0x100)
20624     {
20625       /* Two short opcodes.  */
20626       add_unwind_opcode (0x3f, 1);
20627       op = (offset - 0x104) >> 2;
20628       add_unwind_opcode (op, 1);
20629     }
20630   else if (offset > 0)
20631     {
20632       /* Short opcode.	*/
20633       op = (offset - 4) >> 2;
20634       add_unwind_opcode (op, 1);
20635     }
20636   else if (offset < 0)
20637     {
20638       offset = -offset;
20639       while (offset > 0x100)
20640 	{
20641 	  add_unwind_opcode (0x7f, 1);
20642 	  offset -= 0x100;
20643 	}
20644       op = ((offset - 4) >> 2) | 0x40;
20645       add_unwind_opcode (op, 1);
20646     }
20647 }
20648 
20649 /* Finish the list of unwind opcodes for this function.	 */
20650 static void
20651 finish_unwind_opcodes (void)
20652 {
20653   valueT op;
20654 
20655   if (unwind.fp_used)
20656     {
20657       /* Adjust sp as necessary.  */
20658       unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
20659       flush_pending_unwind ();
20660 
20661       /* After restoring sp from the frame pointer.  */
20662       op = 0x90 | unwind.fp_reg;
20663       add_unwind_opcode (op, 1);
20664     }
20665   else
20666     flush_pending_unwind ();
20667 }
20668 
20669 
20670 /* Start an exception table entry.  If idx is nonzero this is an index table
20671    entry.  */
20672 
20673 static void
20674 start_unwind_section (const segT text_seg, int idx)
20675 {
20676   const char * text_name;
20677   const char * prefix;
20678   const char * prefix_once;
20679   const char * group_name;
20680   size_t prefix_len;
20681   size_t text_len;
20682   char * sec_name;
20683   size_t sec_name_len;
20684   int type;
20685   int flags;
20686   int linkonce;
20687 
20688   if (idx)
20689     {
20690       prefix = ELF_STRING_ARM_unwind;
20691       prefix_once = ELF_STRING_ARM_unwind_once;
20692       type = SHT_ARM_EXIDX;
20693     }
20694   else
20695     {
20696       prefix = ELF_STRING_ARM_unwind_info;
20697       prefix_once = ELF_STRING_ARM_unwind_info_once;
20698       type = SHT_PROGBITS;
20699     }
20700 
20701   text_name = segment_name (text_seg);
20702   if (streq (text_name, ".text"))
20703     text_name = "";
20704 
20705   if (strncmp (text_name, ".gnu.linkonce.t.",
20706 	       strlen (".gnu.linkonce.t.")) == 0)
20707     {
20708       prefix = prefix_once;
20709       text_name += strlen (".gnu.linkonce.t.");
20710     }
20711 
20712   prefix_len = strlen (prefix);
20713   text_len = strlen (text_name);
20714   sec_name_len = prefix_len + text_len;
20715   sec_name = (char *) xmalloc (sec_name_len + 1);
20716   memcpy (sec_name, prefix, prefix_len);
20717   memcpy (sec_name + prefix_len, text_name, text_len);
20718   sec_name[prefix_len + text_len] = '\0';
20719 
20720   flags = SHF_ALLOC;
20721   linkonce = 0;
20722   group_name = 0;
20723 
20724   /* Handle COMDAT group.  */
20725   if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
20726     {
20727       group_name = elf_group_name (text_seg);
20728       if (group_name == NULL)
20729 	{
20730 	  as_bad (_("Group section `%s' has no group signature"),
20731 		  segment_name (text_seg));
20732 	  ignore_rest_of_line ();
20733 	  return;
20734 	}
20735       flags |= SHF_GROUP;
20736       linkonce = 1;
20737     }
20738 
20739   obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
20740 
20741   /* Set the section link for index tables.  */
20742   if (idx)
20743     elf_linked_to_section (now_seg) = text_seg;
20744 }
20745 
20746 
20747 /* Start an unwind table entry.	 HAVE_DATA is nonzero if we have additional
20748    personality routine data.  Returns zero, or the index table value for
20749    and inline entry.  */
20750 
20751 static valueT
20752 create_unwind_entry (int have_data)
20753 {
20754   int size;
20755   addressT where;
20756   char *ptr;
20757   /* The current word of data.	*/
20758   valueT data;
20759   /* The number of bytes left in this word.  */
20760   int n;
20761 
20762   finish_unwind_opcodes ();
20763 
20764   /* Remember the current text section.	 */
20765   unwind.saved_seg = now_seg;
20766   unwind.saved_subseg = now_subseg;
20767 
20768   start_unwind_section (now_seg, 0);
20769 
20770   if (unwind.personality_routine == NULL)
20771     {
20772       if (unwind.personality_index == -2)
20773 	{
20774 	  if (have_data)
20775 	    as_bad (_("handlerdata in cantunwind frame"));
20776 	  return 1; /* EXIDX_CANTUNWIND.  */
20777 	}
20778 
20779       /* Use a default personality routine if none is specified.  */
20780       if (unwind.personality_index == -1)
20781 	{
20782 	  if (unwind.opcode_count > 3)
20783 	    unwind.personality_index = 1;
20784 	  else
20785 	    unwind.personality_index = 0;
20786 	}
20787 
20788       /* Space for the personality routine entry.  */
20789       if (unwind.personality_index == 0)
20790 	{
20791 	  if (unwind.opcode_count > 3)
20792 	    as_bad (_("too many unwind opcodes for personality routine 0"));
20793 
20794 	  if (!have_data)
20795 	    {
20796 	      /* All the data is inline in the index table.  */
20797 	      data = 0x80;
20798 	      n = 3;
20799 	      while (unwind.opcode_count > 0)
20800 		{
20801 		  unwind.opcode_count--;
20802 		  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20803 		  n--;
20804 		}
20805 
20806 	      /* Pad with "finish" opcodes.  */
20807 	      while (n--)
20808 		data = (data << 8) | 0xb0;
20809 
20810 	      return data;
20811 	    }
20812 	  size = 0;
20813 	}
20814       else
20815 	/* We get two opcodes "free" in the first word.	 */
20816 	size = unwind.opcode_count - 2;
20817     }
20818   else
20819     {
20820       gas_assert (unwind.personality_index == -1);
20821 
20822       /* An extra byte is required for the opcode count.	*/
20823       size = unwind.opcode_count + 1;
20824     }
20825 
20826   size = (size + 3) >> 2;
20827   if (size > 0xff)
20828     as_bad (_("too many unwind opcodes"));
20829 
20830   frag_align (2, 0, 0);
20831   record_alignment (now_seg, 2);
20832   unwind.table_entry = expr_build_dot ();
20833 
20834   /* Allocate the table entry.	*/
20835   ptr = frag_more ((size << 2) + 4);
20836   /* PR 13449: Zero the table entries in case some of them are not used.  */
20837   memset (ptr, 0, (size << 2) + 4);
20838   where = frag_now_fix () - ((size << 2) + 4);
20839 
20840   switch (unwind.personality_index)
20841     {
20842     case -1:
20843       /* ??? Should this be a PLT generating relocation?  */
20844       /* Custom personality routine.  */
20845       fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
20846 	       BFD_RELOC_ARM_PREL31);
20847 
20848       where += 4;
20849       ptr += 4;
20850 
20851       /* Set the first byte to the number of additional words.	*/
20852       data = size > 0 ? size - 1 : 0;
20853       n = 3;
20854       break;
20855 
20856     /* ABI defined personality routines.  */
20857     case 0:
20858       /* Three opcodes bytes are packed into the first word.  */
20859       data = 0x80;
20860       n = 3;
20861       break;
20862 
20863     case 1:
20864     case 2:
20865       /* The size and first two opcode bytes go in the first word.  */
20866       data = ((0x80 + unwind.personality_index) << 8) | size;
20867       n = 2;
20868       break;
20869 
20870     default:
20871       /* Should never happen.  */
20872       abort ();
20873     }
20874 
20875   /* Pack the opcodes into words (MSB first), reversing the list at the same
20876      time.  */
20877   while (unwind.opcode_count > 0)
20878     {
20879       if (n == 0)
20880 	{
20881 	  md_number_to_chars (ptr, data, 4);
20882 	  ptr += 4;
20883 	  n = 4;
20884 	  data = 0;
20885 	}
20886       unwind.opcode_count--;
20887       n--;
20888       data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20889     }
20890 
20891   /* Finish off the last word.	*/
20892   if (n < 4)
20893     {
20894       /* Pad with "finish" opcodes.  */
20895       while (n--)
20896 	data = (data << 8) | 0xb0;
20897 
20898       md_number_to_chars (ptr, data, 4);
20899     }
20900 
20901   if (!have_data)
20902     {
20903       /* Add an empty descriptor if there is no user-specified data.   */
20904       ptr = frag_more (4);
20905       md_number_to_chars (ptr, 0, 4);
20906     }
20907 
20908   return 0;
20909 }
20910 
20911 
20912 /* Initialize the DWARF-2 unwind information for this procedure.  */
20913 
20914 void
20915 tc_arm_frame_initial_instructions (void)
20916 {
20917   cfi_add_CFA_def_cfa (REG_SP, 0);
20918 }
20919 #endif /* OBJ_ELF */
20920 
20921 /* Convert REGNAME to a DWARF-2 register number.  */
20922 
20923 int
20924 tc_arm_regname_to_dw2regnum (char *regname)
20925 {
20926   int reg = arm_reg_parse (&regname, REG_TYPE_RN);
20927 
20928   if (reg == FAIL)
20929     return -1;
20930 
20931   return reg;
20932 }
20933 
20934 #ifdef TE_PE
20935 void
20936 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
20937 {
20938   expressionS exp;
20939 
20940   exp.X_op = O_secrel;
20941   exp.X_add_symbol = symbol;
20942   exp.X_add_number = 0;
20943   emit_expr (&exp, size);
20944 }
20945 #endif
20946 
20947 /* MD interface: Symbol and relocation handling.  */
20948 
20949 /* Return the address within the segment that a PC-relative fixup is
20950    relative to.  For ARM, PC-relative fixups applied to instructions
20951    are generally relative to the location of the fixup plus 8 bytes.
20952    Thumb branches are offset by 4, and Thumb loads relative to PC
20953    require special handling.  */
20954 
20955 long
20956 md_pcrel_from_section (fixS * fixP, segT seg)
20957 {
20958   offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
20959 
20960   /* If this is pc-relative and we are going to emit a relocation
20961      then we just want to put out any pipeline compensation that the linker
20962      will need.  Otherwise we want to use the calculated base.
20963      For WinCE we skip the bias for externals as well, since this
20964      is how the MS ARM-CE assembler behaves and we want to be compatible.  */
20965   if (fixP->fx_pcrel
20966       && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
20967 	  || (arm_force_relocation (fixP)
20968 #ifdef TE_WINCE
20969 	      && !S_IS_EXTERNAL (fixP->fx_addsy)
20970 #endif
20971 	      )))
20972     base = 0;
20973 
20974 
20975   switch (fixP->fx_r_type)
20976     {
20977       /* PC relative addressing on the Thumb is slightly odd as the
20978 	 bottom two bits of the PC are forced to zero for the
20979 	 calculation.  This happens *after* application of the
20980 	 pipeline offset.  However, Thumb adrl already adjusts for
20981 	 this, so we need not do it again.  */
20982     case BFD_RELOC_ARM_THUMB_ADD:
20983       return base & ~3;
20984 
20985     case BFD_RELOC_ARM_THUMB_OFFSET:
20986     case BFD_RELOC_ARM_T32_OFFSET_IMM:
20987     case BFD_RELOC_ARM_T32_ADD_PC12:
20988     case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20989       return (base + 4) & ~3;
20990 
20991       /* Thumb branches are simply offset by +4.  */
20992     case BFD_RELOC_THUMB_PCREL_BRANCH7:
20993     case BFD_RELOC_THUMB_PCREL_BRANCH9:
20994     case BFD_RELOC_THUMB_PCREL_BRANCH12:
20995     case BFD_RELOC_THUMB_PCREL_BRANCH20:
20996     case BFD_RELOC_THUMB_PCREL_BRANCH25:
20997       return base + 4;
20998 
20999     case BFD_RELOC_THUMB_PCREL_BRANCH23:
21000       if (fixP->fx_addsy
21001 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21002 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21003 	  && ARM_IS_FUNC (fixP->fx_addsy)
21004  	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21005  	base = fixP->fx_where + fixP->fx_frag->fr_address;
21006        return base + 4;
21007 
21008       /* BLX is like branches above, but forces the low two bits of PC to
21009 	 zero.  */
21010     case BFD_RELOC_THUMB_PCREL_BLX:
21011       if (fixP->fx_addsy
21012 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21013 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21014  	  && THUMB_IS_FUNC (fixP->fx_addsy)
21015  	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21016  	base = fixP->fx_where + fixP->fx_frag->fr_address;
21017       return (base + 4) & ~3;
21018 
21019       /* ARM mode branches are offset by +8.  However, the Windows CE
21020 	 loader expects the relocation not to take this into account.  */
21021     case BFD_RELOC_ARM_PCREL_BLX:
21022       if (fixP->fx_addsy
21023 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21024 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21025  	  && ARM_IS_FUNC (fixP->fx_addsy)
21026  	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21027  	base = fixP->fx_where + fixP->fx_frag->fr_address;
21028       return base + 8;
21029 
21030     case BFD_RELOC_ARM_PCREL_CALL:
21031       if (fixP->fx_addsy
21032 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21033 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21034  	  && THUMB_IS_FUNC (fixP->fx_addsy)
21035  	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21036  	base = fixP->fx_where + fixP->fx_frag->fr_address;
21037       return base + 8;
21038 
21039     case BFD_RELOC_ARM_PCREL_BRANCH:
21040     case BFD_RELOC_ARM_PCREL_JUMP:
21041     case BFD_RELOC_ARM_PLT32:
21042 #ifdef TE_WINCE
21043       /* When handling fixups immediately, because we have already
21044          discovered the value of a symbol, or the address of the frag involved
21045 	 we must account for the offset by +8, as the OS loader will never see the reloc.
21046          see fixup_segment() in write.c
21047          The S_IS_EXTERNAL test handles the case of global symbols.
21048          Those need the calculated base, not just the pipe compensation the linker will need.  */
21049       if (fixP->fx_pcrel
21050 	  && fixP->fx_addsy != NULL
21051 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21052 	  && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21053 	return base + 8;
21054       return base;
21055 #else
21056       return base + 8;
21057 #endif
21058 
21059 
21060       /* ARM mode loads relative to PC are also offset by +8.  Unlike
21061 	 branches, the Windows CE loader *does* expect the relocation
21062 	 to take this into account.  */
21063     case BFD_RELOC_ARM_OFFSET_IMM:
21064     case BFD_RELOC_ARM_OFFSET_IMM8:
21065     case BFD_RELOC_ARM_HWLITERAL:
21066     case BFD_RELOC_ARM_LITERAL:
21067     case BFD_RELOC_ARM_CP_OFF_IMM:
21068       return base + 8;
21069 
21070 
21071       /* Other PC-relative relocations are un-offset.  */
21072     default:
21073       return base;
21074     }
21075 }
21076 
21077 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21078    Otherwise we have no need to default values of symbols.  */
21079 
21080 symbolS *
21081 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21082 {
21083 #ifdef OBJ_ELF
21084   if (name[0] == '_' && name[1] == 'G'
21085       && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21086     {
21087       if (!GOT_symbol)
21088 	{
21089 	  if (symbol_find (name))
21090 	    as_bad (_("GOT already in the symbol table"));
21091 
21092 	  GOT_symbol = symbol_new (name, undefined_section,
21093 				   (valueT) 0, & zero_address_frag);
21094 	}
21095 
21096       return GOT_symbol;
21097     }
21098 #endif
21099 
21100   return NULL;
21101 }
21102 
21103 /* Subroutine of md_apply_fix.	 Check to see if an immediate can be
21104    computed as two separate immediate values, added together.  We
21105    already know that this value cannot be computed by just one ARM
21106    instruction.	 */
21107 
21108 static unsigned int
21109 validate_immediate_twopart (unsigned int   val,
21110 			    unsigned int * highpart)
21111 {
21112   unsigned int a;
21113   unsigned int i;
21114 
21115   for (i = 0; i < 32; i += 2)
21116     if (((a = rotate_left (val, i)) & 0xff) != 0)
21117       {
21118 	if (a & 0xff00)
21119 	  {
21120 	    if (a & ~ 0xffff)
21121 	      continue;
21122 	    * highpart = (a  >> 8) | ((i + 24) << 7);
21123 	  }
21124 	else if (a & 0xff0000)
21125 	  {
21126 	    if (a & 0xff000000)
21127 	      continue;
21128 	    * highpart = (a >> 16) | ((i + 16) << 7);
21129 	  }
21130 	else
21131 	  {
21132 	    gas_assert (a & 0xff000000);
21133 	    * highpart = (a >> 24) | ((i + 8) << 7);
21134 	  }
21135 
21136 	return (a & 0xff) | (i << 7);
21137       }
21138 
21139   return FAIL;
21140 }
21141 
21142 static int
21143 validate_offset_imm (unsigned int val, int hwse)
21144 {
21145   if ((hwse && val > 255) || val > 4095)
21146     return FAIL;
21147   return val;
21148 }
21149 
21150 /* Subroutine of md_apply_fix.	 Do those data_ops which can take a
21151    negative immediate constant by altering the instruction.  A bit of
21152    a hack really.
21153 	MOV <-> MVN
21154 	AND <-> BIC
21155 	ADC <-> SBC
21156 	by inverting the second operand, and
21157 	ADD <-> SUB
21158 	CMP <-> CMN
21159 	by negating the second operand.	 */
21160 
21161 static int
21162 negate_data_op (unsigned long * instruction,
21163 		unsigned long	value)
21164 {
21165   int op, new_inst;
21166   unsigned long negated, inverted;
21167 
21168   negated = encode_arm_immediate (-value);
21169   inverted = encode_arm_immediate (~value);
21170 
21171   op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21172   switch (op)
21173     {
21174       /* First negates.	 */
21175     case OPCODE_SUB:		 /* ADD <-> SUB	 */
21176       new_inst = OPCODE_ADD;
21177       value = negated;
21178       break;
21179 
21180     case OPCODE_ADD:
21181       new_inst = OPCODE_SUB;
21182       value = negated;
21183       break;
21184 
21185     case OPCODE_CMP:		 /* CMP <-> CMN	 */
21186       new_inst = OPCODE_CMN;
21187       value = negated;
21188       break;
21189 
21190     case OPCODE_CMN:
21191       new_inst = OPCODE_CMP;
21192       value = negated;
21193       break;
21194 
21195       /* Now Inverted ops.  */
21196     case OPCODE_MOV:		 /* MOV <-> MVN	 */
21197       new_inst = OPCODE_MVN;
21198       value = inverted;
21199       break;
21200 
21201     case OPCODE_MVN:
21202       new_inst = OPCODE_MOV;
21203       value = inverted;
21204       break;
21205 
21206     case OPCODE_AND:		 /* AND <-> BIC	 */
21207       new_inst = OPCODE_BIC;
21208       value = inverted;
21209       break;
21210 
21211     case OPCODE_BIC:
21212       new_inst = OPCODE_AND;
21213       value = inverted;
21214       break;
21215 
21216     case OPCODE_ADC:		  /* ADC <-> SBC  */
21217       new_inst = OPCODE_SBC;
21218       value = inverted;
21219       break;
21220 
21221     case OPCODE_SBC:
21222       new_inst = OPCODE_ADC;
21223       value = inverted;
21224       break;
21225 
21226       /* We cannot do anything.	 */
21227     default:
21228       return FAIL;
21229     }
21230 
21231   if (value == (unsigned) FAIL)
21232     return FAIL;
21233 
21234   *instruction &= OPCODE_MASK;
21235   *instruction |= new_inst << DATA_OP_SHIFT;
21236   return value;
21237 }
21238 
21239 /* Like negate_data_op, but for Thumb-2.   */
21240 
21241 static unsigned int
21242 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
21243 {
21244   int op, new_inst;
21245   int rd;
21246   unsigned int negated, inverted;
21247 
21248   negated = encode_thumb32_immediate (-value);
21249   inverted = encode_thumb32_immediate (~value);
21250 
21251   rd = (*instruction >> 8) & 0xf;
21252   op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
21253   switch (op)
21254     {
21255       /* ADD <-> SUB.  Includes CMP <-> CMN.  */
21256     case T2_OPCODE_SUB:
21257       new_inst = T2_OPCODE_ADD;
21258       value = negated;
21259       break;
21260 
21261     case T2_OPCODE_ADD:
21262       new_inst = T2_OPCODE_SUB;
21263       value = negated;
21264       break;
21265 
21266       /* ORR <-> ORN.  Includes MOV <-> MVN.  */
21267     case T2_OPCODE_ORR:
21268       new_inst = T2_OPCODE_ORN;
21269       value = inverted;
21270       break;
21271 
21272     case T2_OPCODE_ORN:
21273       new_inst = T2_OPCODE_ORR;
21274       value = inverted;
21275       break;
21276 
21277       /* AND <-> BIC.  TST has no inverted equivalent.  */
21278     case T2_OPCODE_AND:
21279       new_inst = T2_OPCODE_BIC;
21280       if (rd == 15)
21281 	value = FAIL;
21282       else
21283 	value = inverted;
21284       break;
21285 
21286     case T2_OPCODE_BIC:
21287       new_inst = T2_OPCODE_AND;
21288       value = inverted;
21289       break;
21290 
21291       /* ADC <-> SBC  */
21292     case T2_OPCODE_ADC:
21293       new_inst = T2_OPCODE_SBC;
21294       value = inverted;
21295       break;
21296 
21297     case T2_OPCODE_SBC:
21298       new_inst = T2_OPCODE_ADC;
21299       value = inverted;
21300       break;
21301 
21302       /* We cannot do anything.	 */
21303     default:
21304       return FAIL;
21305     }
21306 
21307   if (value == (unsigned int)FAIL)
21308     return FAIL;
21309 
21310   *instruction &= T2_OPCODE_MASK;
21311   *instruction |= new_inst << T2_DATA_OP_SHIFT;
21312   return value;
21313 }
21314 
21315 /* Read a 32-bit thumb instruction from buf.  */
21316 static unsigned long
21317 get_thumb32_insn (char * buf)
21318 {
21319   unsigned long insn;
21320   insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
21321   insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21322 
21323   return insn;
21324 }
21325 
21326 
21327 /* We usually want to set the low bit on the address of thumb function
21328    symbols.  In particular .word foo - . should have the low bit set.
21329    Generic code tries to fold the difference of two symbols to
21330    a constant.  Prevent this and force a relocation when the first symbols
21331    is a thumb function.  */
21332 
21333 bfd_boolean
21334 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
21335 {
21336   if (op == O_subtract
21337       && l->X_op == O_symbol
21338       && r->X_op == O_symbol
21339       && THUMB_IS_FUNC (l->X_add_symbol))
21340     {
21341       l->X_op = O_subtract;
21342       l->X_op_symbol = r->X_add_symbol;
21343       l->X_add_number -= r->X_add_number;
21344       return TRUE;
21345     }
21346 
21347   /* Process as normal.  */
21348   return FALSE;
21349 }
21350 
21351 /* Encode Thumb2 unconditional branches and calls. The encoding
21352    for the 2 are identical for the immediate values.  */
21353 
21354 static void
21355 encode_thumb2_b_bl_offset (char * buf, offsetT value)
21356 {
21357 #define T2I1I2MASK  ((1 << 13) | (1 << 11))
21358   offsetT newval;
21359   offsetT newval2;
21360   addressT S, I1, I2, lo, hi;
21361 
21362   S = (value >> 24) & 0x01;
21363   I1 = (value >> 23) & 0x01;
21364   I2 = (value >> 22) & 0x01;
21365   hi = (value >> 12) & 0x3ff;
21366   lo = (value >> 1) & 0x7ff;
21367   newval   = md_chars_to_number (buf, THUMB_SIZE);
21368   newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21369   newval  |= (S << 10) | hi;
21370   newval2 &=  ~T2I1I2MASK;
21371   newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
21372   md_number_to_chars (buf, newval, THUMB_SIZE);
21373   md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21374 }
21375 
21376 void
21377 md_apply_fix (fixS *	fixP,
21378 	       valueT * valP,
21379 	       segT	seg)
21380 {
21381   offsetT	 value = * valP;
21382   offsetT	 newval;
21383   unsigned int	 newimm;
21384   unsigned long	 temp;
21385   int		 sign;
21386   char *	 buf = fixP->fx_where + fixP->fx_frag->fr_literal;
21387 
21388   gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
21389 
21390   /* Note whether this will delete the relocation.  */
21391 
21392   if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
21393     fixP->fx_done = 1;
21394 
21395   /* On a 64-bit host, silently truncate 'value' to 32 bits for
21396      consistency with the behaviour on 32-bit hosts.  Remember value
21397      for emit_reloc.  */
21398   value &= 0xffffffff;
21399   value ^= 0x80000000;
21400   value -= 0x80000000;
21401 
21402   *valP = value;
21403   fixP->fx_addnumber = value;
21404 
21405   /* Same treatment for fixP->fx_offset.  */
21406   fixP->fx_offset &= 0xffffffff;
21407   fixP->fx_offset ^= 0x80000000;
21408   fixP->fx_offset -= 0x80000000;
21409 
21410   switch (fixP->fx_r_type)
21411     {
21412     case BFD_RELOC_NONE:
21413       /* This will need to go in the object file.  */
21414       fixP->fx_done = 0;
21415       break;
21416 
21417     case BFD_RELOC_ARM_IMMEDIATE:
21418       /* We claim that this fixup has been processed here,
21419 	 even if in fact we generate an error because we do
21420 	 not have a reloc for it, so tc_gen_reloc will reject it.  */
21421       fixP->fx_done = 1;
21422 
21423       if (fixP->fx_addsy)
21424 	{
21425 	  const char *msg = 0;
21426 
21427 	  if (! S_IS_DEFINED (fixP->fx_addsy))
21428 	    msg = _("undefined symbol %s used as an immediate value");
21429 	  else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21430 	    msg = _("symbol %s is in a different section");
21431 	  else if (S_IS_WEAK (fixP->fx_addsy))
21432 	    msg = _("symbol %s is weak and may be overridden later");
21433 
21434 	  if (msg)
21435 	    {
21436 	      as_bad_where (fixP->fx_file, fixP->fx_line,
21437 			    msg, S_GET_NAME (fixP->fx_addsy));
21438 	      break;
21439 	    }
21440 	}
21441 
21442       temp = md_chars_to_number (buf, INSN_SIZE);
21443 
21444       /* If the offset is negative, we should use encoding A2 for ADR.  */
21445       if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
21446 	newimm = negate_data_op (&temp, value);
21447       else
21448 	{
21449 	  newimm = encode_arm_immediate (value);
21450 
21451 	  /* If the instruction will fail, see if we can fix things up by
21452 	     changing the opcode.  */
21453 	  if (newimm == (unsigned int) FAIL)
21454 	    newimm = negate_data_op (&temp, value);
21455 	}
21456 
21457       if (newimm == (unsigned int) FAIL)
21458 	{
21459 	  as_bad_where (fixP->fx_file, fixP->fx_line,
21460 			_("invalid constant (%lx) after fixup"),
21461 			(unsigned long) value);
21462 	  break;
21463 	}
21464 
21465       newimm |= (temp & 0xfffff000);
21466       md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21467       break;
21468 
21469     case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21470       {
21471 	unsigned int highpart = 0;
21472 	unsigned int newinsn  = 0xe1a00000; /* nop.  */
21473 
21474 	if (fixP->fx_addsy)
21475 	  {
21476 	    const char *msg = 0;
21477 
21478 	    if (! S_IS_DEFINED (fixP->fx_addsy))
21479 	      msg = _("undefined symbol %s used as an immediate value");
21480 	    else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21481 	      msg = _("symbol %s is in a different section");
21482 	    else if (S_IS_WEAK (fixP->fx_addsy))
21483 	      msg = _("symbol %s is weak and may be overridden later");
21484 
21485 	    if (msg)
21486 	      {
21487 		as_bad_where (fixP->fx_file, fixP->fx_line,
21488 			      msg, S_GET_NAME (fixP->fx_addsy));
21489 		break;
21490 	      }
21491 	  }
21492 
21493 	newimm = encode_arm_immediate (value);
21494 	temp = md_chars_to_number (buf, INSN_SIZE);
21495 
21496 	/* If the instruction will fail, see if we can fix things up by
21497 	   changing the opcode.	 */
21498 	if (newimm == (unsigned int) FAIL
21499 	    && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
21500 	  {
21501 	    /* No ?  OK - try using two ADD instructions to generate
21502 	       the value.  */
21503 	    newimm = validate_immediate_twopart (value, & highpart);
21504 
21505 	    /* Yes - then make sure that the second instruction is
21506 	       also an add.  */
21507 	    if (newimm != (unsigned int) FAIL)
21508 	      newinsn = temp;
21509 	    /* Still No ?  Try using a negated value.  */
21510 	    else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
21511 	      temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
21512 	    /* Otherwise - give up.  */
21513 	    else
21514 	      {
21515 		as_bad_where (fixP->fx_file, fixP->fx_line,
21516 			      _("unable to compute ADRL instructions for PC offset of 0x%lx"),
21517 			      (long) value);
21518 		break;
21519 	      }
21520 
21521 	    /* Replace the first operand in the 2nd instruction (which
21522 	       is the PC) with the destination register.  We have
21523 	       already added in the PC in the first instruction and we
21524 	       do not want to do it again.  */
21525 	    newinsn &= ~ 0xf0000;
21526 	    newinsn |= ((newinsn & 0x0f000) << 4);
21527 	  }
21528 
21529 	newimm |= (temp & 0xfffff000);
21530 	md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21531 
21532 	highpart |= (newinsn & 0xfffff000);
21533 	md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
21534       }
21535       break;
21536 
21537     case BFD_RELOC_ARM_OFFSET_IMM:
21538       if (!fixP->fx_done && seg->use_rela_p)
21539 	value = 0;
21540 
21541     case BFD_RELOC_ARM_LITERAL:
21542       sign = value > 0;
21543 
21544       if (value < 0)
21545 	value = - value;
21546 
21547       if (validate_offset_imm (value, 0) == FAIL)
21548 	{
21549 	  if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
21550 	    as_bad_where (fixP->fx_file, fixP->fx_line,
21551 			  _("invalid literal constant: pool needs to be closer"));
21552 	  else
21553 	    as_bad_where (fixP->fx_file, fixP->fx_line,
21554 			  _("bad immediate value for offset (%ld)"),
21555 			  (long) value);
21556 	  break;
21557 	}
21558 
21559       newval = md_chars_to_number (buf, INSN_SIZE);
21560       if (value == 0)
21561 	newval &= 0xfffff000;
21562       else
21563 	{
21564 	  newval &= 0xff7ff000;
21565 	  newval |= value | (sign ? INDEX_UP : 0);
21566 	}
21567       md_number_to_chars (buf, newval, INSN_SIZE);
21568       break;
21569 
21570     case BFD_RELOC_ARM_OFFSET_IMM8:
21571     case BFD_RELOC_ARM_HWLITERAL:
21572       sign = value > 0;
21573 
21574       if (value < 0)
21575 	value = - value;
21576 
21577       if (validate_offset_imm (value, 1) == FAIL)
21578 	{
21579 	  if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
21580 	    as_bad_where (fixP->fx_file, fixP->fx_line,
21581 			  _("invalid literal constant: pool needs to be closer"));
21582 	  else
21583 	    as_bad_where (fixP->fx_file, fixP->fx_line,
21584 			  _("bad immediate value for 8-bit offset (%ld)"),
21585 			  (long) value);
21586 	  break;
21587 	}
21588 
21589       newval = md_chars_to_number (buf, INSN_SIZE);
21590       if (value == 0)
21591 	newval &= 0xfffff0f0;
21592       else
21593 	{
21594 	  newval &= 0xff7ff0f0;
21595 	  newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
21596 	}
21597       md_number_to_chars (buf, newval, INSN_SIZE);
21598       break;
21599 
21600     case BFD_RELOC_ARM_T32_OFFSET_U8:
21601       if (value < 0 || value > 1020 || value % 4 != 0)
21602 	as_bad_where (fixP->fx_file, fixP->fx_line,
21603 		      _("bad immediate value for offset (%ld)"), (long) value);
21604       value /= 4;
21605 
21606       newval = md_chars_to_number (buf+2, THUMB_SIZE);
21607       newval |= value;
21608       md_number_to_chars (buf+2, newval, THUMB_SIZE);
21609       break;
21610 
21611     case BFD_RELOC_ARM_T32_OFFSET_IMM:
21612       /* This is a complicated relocation used for all varieties of Thumb32
21613 	 load/store instruction with immediate offset:
21614 
21615 	 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
21616 	                                           *4, optional writeback(W)
21617 						   (doubleword load/store)
21618 
21619 	 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
21620 	 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
21621 	 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
21622 	 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
21623 	 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
21624 
21625 	 Uppercase letters indicate bits that are already encoded at
21626 	 this point.  Lowercase letters are our problem.  For the
21627 	 second block of instructions, the secondary opcode nybble
21628 	 (bits 8..11) is present, and bit 23 is zero, even if this is
21629 	 a PC-relative operation.  */
21630       newval = md_chars_to_number (buf, THUMB_SIZE);
21631       newval <<= 16;
21632       newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
21633 
21634       if ((newval & 0xf0000000) == 0xe0000000)
21635 	{
21636 	  /* Doubleword load/store: 8-bit offset, scaled by 4.  */
21637 	  if (value >= 0)
21638 	    newval |= (1 << 23);
21639 	  else
21640 	    value = -value;
21641 	  if (value % 4 != 0)
21642 	    {
21643 	      as_bad_where (fixP->fx_file, fixP->fx_line,
21644 			    _("offset not a multiple of 4"));
21645 	      break;
21646 	    }
21647 	  value /= 4;
21648 	  if (value > 0xff)
21649 	    {
21650 	      as_bad_where (fixP->fx_file, fixP->fx_line,
21651 			    _("offset out of range"));
21652 	      break;
21653 	    }
21654 	  newval &= ~0xff;
21655 	}
21656       else if ((newval & 0x000f0000) == 0x000f0000)
21657 	{
21658 	  /* PC-relative, 12-bit offset.  */
21659 	  if (value >= 0)
21660 	    newval |= (1 << 23);
21661 	  else
21662 	    value = -value;
21663 	  if (value > 0xfff)
21664 	    {
21665 	      as_bad_where (fixP->fx_file, fixP->fx_line,
21666 			    _("offset out of range"));
21667 	      break;
21668 	    }
21669 	  newval &= ~0xfff;
21670 	}
21671       else if ((newval & 0x00000100) == 0x00000100)
21672 	{
21673 	  /* Writeback: 8-bit, +/- offset.  */
21674 	  if (value >= 0)
21675 	    newval |= (1 << 9);
21676 	  else
21677 	    value = -value;
21678 	  if (value > 0xff)
21679 	    {
21680 	      as_bad_where (fixP->fx_file, fixP->fx_line,
21681 			    _("offset out of range"));
21682 	      break;
21683 	    }
21684 	  newval &= ~0xff;
21685 	}
21686       else if ((newval & 0x00000f00) == 0x00000e00)
21687 	{
21688 	  /* T-instruction: positive 8-bit offset.  */
21689 	  if (value < 0 || value > 0xff)
21690 	    {
21691 	      as_bad_where (fixP->fx_file, fixP->fx_line,
21692 			    _("offset out of range"));
21693 	      break;
21694 	    }
21695 	  newval &= ~0xff;
21696 	  newval |= value;
21697 	}
21698       else
21699 	{
21700 	  /* Positive 12-bit or negative 8-bit offset.  */
21701 	  int limit;
21702 	  if (value >= 0)
21703 	    {
21704 	      newval |= (1 << 23);
21705 	      limit = 0xfff;
21706 	    }
21707 	  else
21708 	    {
21709 	      value = -value;
21710 	      limit = 0xff;
21711 	    }
21712 	  if (value > limit)
21713 	    {
21714 	      as_bad_where (fixP->fx_file, fixP->fx_line,
21715 			    _("offset out of range"));
21716 	      break;
21717 	    }
21718 	  newval &= ~limit;
21719 	}
21720 
21721       newval |= value;
21722       md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
21723       md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
21724       break;
21725 
21726     case BFD_RELOC_ARM_SHIFT_IMM:
21727       newval = md_chars_to_number (buf, INSN_SIZE);
21728       if (((unsigned long) value) > 32
21729 	  || (value == 32
21730 	      && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
21731 	{
21732 	  as_bad_where (fixP->fx_file, fixP->fx_line,
21733 			_("shift expression is too large"));
21734 	  break;
21735 	}
21736 
21737       if (value == 0)
21738 	/* Shifts of zero must be done as lsl.	*/
21739 	newval &= ~0x60;
21740       else if (value == 32)
21741 	value = 0;
21742       newval &= 0xfffff07f;
21743       newval |= (value & 0x1f) << 7;
21744       md_number_to_chars (buf, newval, INSN_SIZE);
21745       break;
21746 
21747     case BFD_RELOC_ARM_T32_IMMEDIATE:
21748     case BFD_RELOC_ARM_T32_ADD_IMM:
21749     case BFD_RELOC_ARM_T32_IMM12:
21750     case BFD_RELOC_ARM_T32_ADD_PC12:
21751       /* We claim that this fixup has been processed here,
21752 	 even if in fact we generate an error because we do
21753 	 not have a reloc for it, so tc_gen_reloc will reject it.  */
21754       fixP->fx_done = 1;
21755 
21756       if (fixP->fx_addsy
21757 	  && ! S_IS_DEFINED (fixP->fx_addsy))
21758 	{
21759 	  as_bad_where (fixP->fx_file, fixP->fx_line,
21760 			_("undefined symbol %s used as an immediate value"),
21761 			S_GET_NAME (fixP->fx_addsy));
21762 	  break;
21763 	}
21764 
21765       newval = md_chars_to_number (buf, THUMB_SIZE);
21766       newval <<= 16;
21767       newval |= md_chars_to_number (buf+2, THUMB_SIZE);
21768 
21769       newimm = FAIL;
21770       if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
21771 	  || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
21772 	{
21773 	  newimm = encode_thumb32_immediate (value);
21774 	  if (newimm == (unsigned int) FAIL)
21775 	    newimm = thumb32_negate_data_op (&newval, value);
21776 	}
21777       if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
21778 	  && newimm == (unsigned int) FAIL)
21779 	{
21780 	  /* Turn add/sum into addw/subw.  */
21781 	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
21782 	    newval = (newval & 0xfeffffff) | 0x02000000;
21783 	  /* No flat 12-bit imm encoding for addsw/subsw.  */
21784 	  if ((newval & 0x00100000) == 0)
21785 	    {
21786 	      /* 12 bit immediate for addw/subw.  */
21787 	      if (value < 0)
21788 		{
21789 		  value = -value;
21790 		  newval ^= 0x00a00000;
21791 		}
21792 	      if (value > 0xfff)
21793 		newimm = (unsigned int) FAIL;
21794 	      else
21795 		newimm = value;
21796 	    }
21797 	}
21798 
21799       if (newimm == (unsigned int)FAIL)
21800 	{
21801 	  as_bad_where (fixP->fx_file, fixP->fx_line,
21802 			_("invalid constant (%lx) after fixup"),
21803 			(unsigned long) value);
21804 	  break;
21805 	}
21806 
21807       newval |= (newimm & 0x800) << 15;
21808       newval |= (newimm & 0x700) << 4;
21809       newval |= (newimm & 0x0ff);
21810 
21811       md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
21812       md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
21813       break;
21814 
21815     case BFD_RELOC_ARM_SMC:
21816       if (((unsigned long) value) > 0xffff)
21817 	as_bad_where (fixP->fx_file, fixP->fx_line,
21818 		      _("invalid smc expression"));
21819       newval = md_chars_to_number (buf, INSN_SIZE);
21820       newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21821       md_number_to_chars (buf, newval, INSN_SIZE);
21822       break;
21823 
21824     case BFD_RELOC_ARM_HVC:
21825       if (((unsigned long) value) > 0xffff)
21826 	as_bad_where (fixP->fx_file, fixP->fx_line,
21827 		      _("invalid hvc expression"));
21828       newval = md_chars_to_number (buf, INSN_SIZE);
21829       newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21830       md_number_to_chars (buf, newval, INSN_SIZE);
21831       break;
21832 
21833     case BFD_RELOC_ARM_SWI:
21834       if (fixP->tc_fix_data != 0)
21835 	{
21836 	  if (((unsigned long) value) > 0xff)
21837 	    as_bad_where (fixP->fx_file, fixP->fx_line,
21838 			  _("invalid swi expression"));
21839 	  newval = md_chars_to_number (buf, THUMB_SIZE);
21840 	  newval |= value;
21841 	  md_number_to_chars (buf, newval, THUMB_SIZE);
21842 	}
21843       else
21844 	{
21845 	  if (((unsigned long) value) > 0x00ffffff)
21846 	    as_bad_where (fixP->fx_file, fixP->fx_line,
21847 			  _("invalid swi expression"));
21848 	  newval = md_chars_to_number (buf, INSN_SIZE);
21849 	  newval |= value;
21850 	  md_number_to_chars (buf, newval, INSN_SIZE);
21851 	}
21852       break;
21853 
21854     case BFD_RELOC_ARM_MULTI:
21855       if (((unsigned long) value) > 0xffff)
21856 	as_bad_where (fixP->fx_file, fixP->fx_line,
21857 		      _("invalid expression in load/store multiple"));
21858       newval = value | md_chars_to_number (buf, INSN_SIZE);
21859       md_number_to_chars (buf, newval, INSN_SIZE);
21860       break;
21861 
21862 #ifdef OBJ_ELF
21863     case BFD_RELOC_ARM_PCREL_CALL:
21864 
21865       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21866 	  && fixP->fx_addsy
21867 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21868 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21869 	  && THUMB_IS_FUNC (fixP->fx_addsy))
21870 	/* Flip the bl to blx. This is a simple flip
21871 	   bit here because we generate PCREL_CALL for
21872 	   unconditional bls.  */
21873 	{
21874 	  newval = md_chars_to_number (buf, INSN_SIZE);
21875 	  newval = newval | 0x10000000;
21876 	  md_number_to_chars (buf, newval, INSN_SIZE);
21877 	  temp = 1;
21878 	  fixP->fx_done = 1;
21879 	}
21880       else
21881 	temp = 3;
21882       goto arm_branch_common;
21883 
21884     case BFD_RELOC_ARM_PCREL_JUMP:
21885       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21886 	  && fixP->fx_addsy
21887 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21888 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21889 	  && THUMB_IS_FUNC (fixP->fx_addsy))
21890 	{
21891 	  /* This would map to a bl<cond>, b<cond>,
21892 	     b<always> to a Thumb function. We
21893 	     need to force a relocation for this particular
21894 	     case.  */
21895 	  newval = md_chars_to_number (buf, INSN_SIZE);
21896 	  fixP->fx_done = 0;
21897 	}
21898 
21899     case BFD_RELOC_ARM_PLT32:
21900 #endif
21901     case BFD_RELOC_ARM_PCREL_BRANCH:
21902       temp = 3;
21903       goto arm_branch_common;
21904 
21905     case BFD_RELOC_ARM_PCREL_BLX:
21906 
21907       temp = 1;
21908       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21909 	  && fixP->fx_addsy
21910 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21911 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21912 	  && ARM_IS_FUNC (fixP->fx_addsy))
21913 	{
21914 	  /* Flip the blx to a bl and warn.  */
21915 	  const char *name = S_GET_NAME (fixP->fx_addsy);
21916 	  newval = 0xeb000000;
21917 	  as_warn_where (fixP->fx_file, fixP->fx_line,
21918 			 _("blx to '%s' an ARM ISA state function changed to bl"),
21919 			  name);
21920 	  md_number_to_chars (buf, newval, INSN_SIZE);
21921 	  temp = 3;
21922 	  fixP->fx_done = 1;
21923 	}
21924 
21925 #ifdef OBJ_ELF
21926        if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21927          fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
21928 #endif
21929 
21930     arm_branch_common:
21931       /* We are going to store value (shifted right by two) in the
21932 	 instruction, in a 24 bit, signed field.  Bits 26 through 32 either
21933 	 all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
21934 	 also be be clear.  */
21935       if (value & temp)
21936 	as_bad_where (fixP->fx_file, fixP->fx_line,
21937 		      _("misaligned branch destination"));
21938       if ((value & (offsetT)0xfe000000) != (offsetT)0
21939 	  && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
21940 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21941 
21942       if (fixP->fx_done || !seg->use_rela_p)
21943 	{
21944 	  newval = md_chars_to_number (buf, INSN_SIZE);
21945 	  newval |= (value >> 2) & 0x00ffffff;
21946 	  /* Set the H bit on BLX instructions.  */
21947 	  if (temp == 1)
21948 	    {
21949 	      if (value & 2)
21950 		newval |= 0x01000000;
21951 	      else
21952 		newval &= ~0x01000000;
21953 	    }
21954 	  md_number_to_chars (buf, newval, INSN_SIZE);
21955 	}
21956       break;
21957 
21958     case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
21959       /* CBZ can only branch forward.  */
21960 
21961       /* Attempts to use CBZ to branch to the next instruction
21962          (which, strictly speaking, are prohibited) will be turned into
21963          no-ops.
21964 
21965 	 FIXME: It may be better to remove the instruction completely and
21966 	 perform relaxation.  */
21967       if (value == -2)
21968 	{
21969 	  newval = md_chars_to_number (buf, THUMB_SIZE);
21970 	  newval = 0xbf00; /* NOP encoding T1 */
21971 	  md_number_to_chars (buf, newval, THUMB_SIZE);
21972 	}
21973       else
21974 	{
21975 	  if (value & ~0x7e)
21976 	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21977 
21978           if (fixP->fx_done || !seg->use_rela_p)
21979 	    {
21980 	      newval = md_chars_to_number (buf, THUMB_SIZE);
21981 	      newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
21982 	      md_number_to_chars (buf, newval, THUMB_SIZE);
21983 	    }
21984 	}
21985       break;
21986 
21987     case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.	*/
21988       if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
21989 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21990 
21991       if (fixP->fx_done || !seg->use_rela_p)
21992 	{
21993 	  newval = md_chars_to_number (buf, THUMB_SIZE);
21994 	  newval |= (value & 0x1ff) >> 1;
21995 	  md_number_to_chars (buf, newval, THUMB_SIZE);
21996 	}
21997       break;
21998 
21999     case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
22000       if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22001 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22002 
22003       if (fixP->fx_done || !seg->use_rela_p)
22004 	{
22005 	  newval = md_chars_to_number (buf, THUMB_SIZE);
22006 	  newval |= (value & 0xfff) >> 1;
22007 	  md_number_to_chars (buf, newval, THUMB_SIZE);
22008 	}
22009       break;
22010 
22011     case BFD_RELOC_THUMB_PCREL_BRANCH20:
22012       if (fixP->fx_addsy
22013 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22014 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22015 	  && ARM_IS_FUNC (fixP->fx_addsy)
22016 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22017 	{
22018 	  /* Force a relocation for a branch 20 bits wide.  */
22019 	  fixP->fx_done = 0;
22020 	}
22021       if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22022 	as_bad_where (fixP->fx_file, fixP->fx_line,
22023 		      _("conditional branch out of range"));
22024 
22025       if (fixP->fx_done || !seg->use_rela_p)
22026 	{
22027 	  offsetT newval2;
22028 	  addressT S, J1, J2, lo, hi;
22029 
22030 	  S  = (value & 0x00100000) >> 20;
22031 	  J2 = (value & 0x00080000) >> 19;
22032 	  J1 = (value & 0x00040000) >> 18;
22033 	  hi = (value & 0x0003f000) >> 12;
22034 	  lo = (value & 0x00000ffe) >> 1;
22035 
22036 	  newval   = md_chars_to_number (buf, THUMB_SIZE);
22037 	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22038 	  newval  |= (S << 10) | hi;
22039 	  newval2 |= (J1 << 13) | (J2 << 11) | lo;
22040 	  md_number_to_chars (buf, newval, THUMB_SIZE);
22041 	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22042 	}
22043       break;
22044 
22045     case BFD_RELOC_THUMB_PCREL_BLX:
22046       /* If there is a blx from a thumb state function to
22047 	 another thumb function flip this to a bl and warn
22048 	 about it.  */
22049 
22050       if (fixP->fx_addsy
22051 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22052 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22053 	  && THUMB_IS_FUNC (fixP->fx_addsy))
22054 	{
22055 	  const char *name = S_GET_NAME (fixP->fx_addsy);
22056 	  as_warn_where (fixP->fx_file, fixP->fx_line,
22057 			 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22058 			 name);
22059 	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22060 	  newval = newval | 0x1000;
22061 	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22062 	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22063 	  fixP->fx_done = 1;
22064 	}
22065 
22066 
22067       goto thumb_bl_common;
22068 
22069     case BFD_RELOC_THUMB_PCREL_BRANCH23:
22070       /* A bl from Thumb state ISA to an internal ARM state function
22071 	 is converted to a blx.  */
22072       if (fixP->fx_addsy
22073 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22074 	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22075 	  && ARM_IS_FUNC (fixP->fx_addsy)
22076 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22077 	{
22078 	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22079 	  newval = newval & ~0x1000;
22080 	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22081 	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22082 	  fixP->fx_done = 1;
22083 	}
22084 
22085     thumb_bl_common:
22086 
22087 #ifdef OBJ_ELF
22088        if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22089 	   && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22090 	 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22091 #endif
22092 
22093       if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22094 	/* For a BLX instruction, make sure that the relocation is rounded up
22095 	   to a word boundary.  This follows the semantics of the instruction
22096 	   which specifies that bit 1 of the target address will come from bit
22097 	   1 of the base address.  */
22098 	value = (value + 1) & ~ 1;
22099 
22100       if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22101 	{
22102 	  if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22103 	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22104 	  else if ((value & ~0x1ffffff)
22105 		   && ((value & ~0x1ffffff) != ~0x1ffffff))
22106 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22107 			  _("Thumb2 branch out of range"));
22108 	}
22109 
22110       if (fixP->fx_done || !seg->use_rela_p)
22111 	encode_thumb2_b_bl_offset (buf, value);
22112 
22113       break;
22114 
22115     case BFD_RELOC_THUMB_PCREL_BRANCH25:
22116       if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22117 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22118 
22119       if (fixP->fx_done || !seg->use_rela_p)
22120 	  encode_thumb2_b_bl_offset (buf, value);
22121 
22122       break;
22123 
22124     case BFD_RELOC_8:
22125       if (fixP->fx_done || !seg->use_rela_p)
22126 	md_number_to_chars (buf, value, 1);
22127       break;
22128 
22129     case BFD_RELOC_16:
22130       if (fixP->fx_done || !seg->use_rela_p)
22131 	md_number_to_chars (buf, value, 2);
22132       break;
22133 
22134 #ifdef OBJ_ELF
22135     case BFD_RELOC_ARM_TLS_CALL:
22136     case BFD_RELOC_ARM_THM_TLS_CALL:
22137     case BFD_RELOC_ARM_TLS_DESCSEQ:
22138     case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22139       S_SET_THREAD_LOCAL (fixP->fx_addsy);
22140       break;
22141 
22142     case BFD_RELOC_ARM_TLS_GOTDESC:
22143     case BFD_RELOC_ARM_TLS_GD32:
22144     case BFD_RELOC_ARM_TLS_LE32:
22145     case BFD_RELOC_ARM_TLS_IE32:
22146     case BFD_RELOC_ARM_TLS_LDM32:
22147     case BFD_RELOC_ARM_TLS_LDO32:
22148       S_SET_THREAD_LOCAL (fixP->fx_addsy);
22149       /* fall through */
22150 
22151     case BFD_RELOC_ARM_GOT32:
22152     case BFD_RELOC_ARM_GOTOFF:
22153       if (fixP->fx_done || !seg->use_rela_p)
22154 	md_number_to_chars (buf, 0, 4);
22155       break;
22156 
22157     case BFD_RELOC_ARM_GOT_PREL:
22158       if (fixP->fx_done || !seg->use_rela_p)
22159         md_number_to_chars (buf, value, 4);
22160       break;
22161 
22162     case BFD_RELOC_ARM_TARGET2:
22163       /* TARGET2 is not partial-inplace, so we need to write the
22164          addend here for REL targets, because it won't be written out
22165          during reloc processing later.  */
22166       if (fixP->fx_done || !seg->use_rela_p)
22167 	md_number_to_chars (buf, fixP->fx_offset, 4);
22168       break;
22169 #endif
22170 
22171     case BFD_RELOC_RVA:
22172     case BFD_RELOC_32:
22173     case BFD_RELOC_ARM_TARGET1:
22174     case BFD_RELOC_ARM_ROSEGREL32:
22175     case BFD_RELOC_ARM_SBREL32:
22176     case BFD_RELOC_32_PCREL:
22177 #ifdef TE_PE
22178     case BFD_RELOC_32_SECREL:
22179 #endif
22180       if (fixP->fx_done || !seg->use_rela_p)
22181 #ifdef TE_WINCE
22182 	/* For WinCE we only do this for pcrel fixups.  */
22183 	if (fixP->fx_done || fixP->fx_pcrel)
22184 #endif
22185 	  md_number_to_chars (buf, value, 4);
22186       break;
22187 
22188 #ifdef OBJ_ELF
22189     case BFD_RELOC_ARM_PREL31:
22190       if (fixP->fx_done || !seg->use_rela_p)
22191 	{
22192 	  newval = md_chars_to_number (buf, 4) & 0x80000000;
22193 	  if ((value ^ (value >> 1)) & 0x40000000)
22194 	    {
22195 	      as_bad_where (fixP->fx_file, fixP->fx_line,
22196 			    _("rel31 relocation overflow"));
22197 	    }
22198 	  newval |= value & 0x7fffffff;
22199 	  md_number_to_chars (buf, newval, 4);
22200 	}
22201       break;
22202 #endif
22203 
22204     case BFD_RELOC_ARM_CP_OFF_IMM:
22205     case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22206       if (value < -1023 || value > 1023 || (value & 3))
22207 	as_bad_where (fixP->fx_file, fixP->fx_line,
22208 		      _("co-processor offset out of range"));
22209     cp_off_common:
22210       sign = value > 0;
22211       if (value < 0)
22212 	value = -value;
22213       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22214 	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22215 	newval = md_chars_to_number (buf, INSN_SIZE);
22216       else
22217 	newval = get_thumb32_insn (buf);
22218       if (value == 0)
22219 	newval &= 0xffffff00;
22220       else
22221 	{
22222 	  newval &= 0xff7fff00;
22223 	  newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22224 	}
22225       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22226 	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22227 	md_number_to_chars (buf, newval, INSN_SIZE);
22228       else
22229 	put_thumb32_insn (buf, newval);
22230       break;
22231 
22232     case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22233     case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22234       if (value < -255 || value > 255)
22235 	as_bad_where (fixP->fx_file, fixP->fx_line,
22236 		      _("co-processor offset out of range"));
22237       value *= 4;
22238       goto cp_off_common;
22239 
22240     case BFD_RELOC_ARM_THUMB_OFFSET:
22241       newval = md_chars_to_number (buf, THUMB_SIZE);
22242       /* Exactly what ranges, and where the offset is inserted depends
22243 	 on the type of instruction, we can establish this from the
22244 	 top 4 bits.  */
22245       switch (newval >> 12)
22246 	{
22247 	case 4: /* PC load.  */
22248 	  /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22249 	     forced to zero for these loads; md_pcrel_from has already
22250 	     compensated for this.  */
22251 	  if (value & 3)
22252 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22253 			  _("invalid offset, target not word aligned (0x%08lX)"),
22254 			  (((unsigned long) fixP->fx_frag->fr_address
22255 			    + (unsigned long) fixP->fx_where) & ~3)
22256 			  + (unsigned long) value);
22257 
22258 	  if (value & ~0x3fc)
22259 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22260 			  _("invalid offset, value too big (0x%08lX)"),
22261 			  (long) value);
22262 
22263 	  newval |= value >> 2;
22264 	  break;
22265 
22266 	case 9: /* SP load/store.  */
22267 	  if (value & ~0x3fc)
22268 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22269 			  _("invalid offset, value too big (0x%08lX)"),
22270 			  (long) value);
22271 	  newval |= value >> 2;
22272 	  break;
22273 
22274 	case 6: /* Word load/store.  */
22275 	  if (value & ~0x7c)
22276 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22277 			  _("invalid offset, value too big (0x%08lX)"),
22278 			  (long) value);
22279 	  newval |= value << 4; /* 6 - 2.  */
22280 	  break;
22281 
22282 	case 7: /* Byte load/store.  */
22283 	  if (value & ~0x1f)
22284 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22285 			  _("invalid offset, value too big (0x%08lX)"),
22286 			  (long) value);
22287 	  newval |= value << 6;
22288 	  break;
22289 
22290 	case 8: /* Halfword load/store.	 */
22291 	  if (value & ~0x3e)
22292 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22293 			  _("invalid offset, value too big (0x%08lX)"),
22294 			  (long) value);
22295 	  newval |= value << 5; /* 6 - 1.  */
22296 	  break;
22297 
22298 	default:
22299 	  as_bad_where (fixP->fx_file, fixP->fx_line,
22300 			"Unable to process relocation for thumb opcode: %lx",
22301 			(unsigned long) newval);
22302 	  break;
22303 	}
22304       md_number_to_chars (buf, newval, THUMB_SIZE);
22305       break;
22306 
22307     case BFD_RELOC_ARM_THUMB_ADD:
22308       /* This is a complicated relocation, since we use it for all of
22309 	 the following immediate relocations:
22310 
22311 	    3bit ADD/SUB
22312 	    8bit ADD/SUB
22313 	    9bit ADD/SUB SP word-aligned
22314 	   10bit ADD PC/SP word-aligned
22315 
22316 	 The type of instruction being processed is encoded in the
22317 	 instruction field:
22318 
22319 	   0x8000  SUB
22320 	   0x00F0  Rd
22321 	   0x000F  Rs
22322       */
22323       newval = md_chars_to_number (buf, THUMB_SIZE);
22324       {
22325 	int rd = (newval >> 4) & 0xf;
22326 	int rs = newval & 0xf;
22327 	int subtract = !!(newval & 0x8000);
22328 
22329 	/* Check for HI regs, only very restricted cases allowed:
22330 	   Adjusting SP, and using PC or SP to get an address.	*/
22331 	if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
22332 	    || (rs > 7 && rs != REG_SP && rs != REG_PC))
22333 	  as_bad_where (fixP->fx_file, fixP->fx_line,
22334 			_("invalid Hi register with immediate"));
22335 
22336 	/* If value is negative, choose the opposite instruction.  */
22337 	if (value < 0)
22338 	  {
22339 	    value = -value;
22340 	    subtract = !subtract;
22341 	    if (value < 0)
22342 	      as_bad_where (fixP->fx_file, fixP->fx_line,
22343 			    _("immediate value out of range"));
22344 	  }
22345 
22346 	if (rd == REG_SP)
22347 	  {
22348 	    if (value & ~0x1fc)
22349 	      as_bad_where (fixP->fx_file, fixP->fx_line,
22350 			    _("invalid immediate for stack address calculation"));
22351 	    newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
22352 	    newval |= value >> 2;
22353 	  }
22354 	else if (rs == REG_PC || rs == REG_SP)
22355 	  {
22356 	    if (subtract || value & ~0x3fc)
22357 	      as_bad_where (fixP->fx_file, fixP->fx_line,
22358 			    _("invalid immediate for address calculation (value = 0x%08lX)"),
22359 			    (unsigned long) value);
22360 	    newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
22361 	    newval |= rd << 8;
22362 	    newval |= value >> 2;
22363 	  }
22364 	else if (rs == rd)
22365 	  {
22366 	    if (value & ~0xff)
22367 	      as_bad_where (fixP->fx_file, fixP->fx_line,
22368 			    _("immediate value out of range"));
22369 	    newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
22370 	    newval |= (rd << 8) | value;
22371 	  }
22372 	else
22373 	  {
22374 	    if (value & ~0x7)
22375 	      as_bad_where (fixP->fx_file, fixP->fx_line,
22376 			    _("immediate value out of range"));
22377 	    newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
22378 	    newval |= rd | (rs << 3) | (value << 6);
22379 	  }
22380       }
22381       md_number_to_chars (buf, newval, THUMB_SIZE);
22382       break;
22383 
22384     case BFD_RELOC_ARM_THUMB_IMM:
22385       newval = md_chars_to_number (buf, THUMB_SIZE);
22386       if (value < 0 || value > 255)
22387 	as_bad_where (fixP->fx_file, fixP->fx_line,
22388 		      _("invalid immediate: %ld is out of range"),
22389 		      (long) value);
22390       newval |= value;
22391       md_number_to_chars (buf, newval, THUMB_SIZE);
22392       break;
22393 
22394     case BFD_RELOC_ARM_THUMB_SHIFT:
22395       /* 5bit shift value (0..32).  LSL cannot take 32.	 */
22396       newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
22397       temp = newval & 0xf800;
22398       if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
22399 	as_bad_where (fixP->fx_file, fixP->fx_line,
22400 		      _("invalid shift value: %ld"), (long) value);
22401       /* Shifts of zero must be encoded as LSL.	 */
22402       if (value == 0)
22403 	newval = (newval & 0x003f) | T_OPCODE_LSL_I;
22404       /* Shifts of 32 are encoded as zero.  */
22405       else if (value == 32)
22406 	value = 0;
22407       newval |= value << 6;
22408       md_number_to_chars (buf, newval, THUMB_SIZE);
22409       break;
22410 
22411     case BFD_RELOC_VTABLE_INHERIT:
22412     case BFD_RELOC_VTABLE_ENTRY:
22413       fixP->fx_done = 0;
22414       return;
22415 
22416     case BFD_RELOC_ARM_MOVW:
22417     case BFD_RELOC_ARM_MOVT:
22418     case BFD_RELOC_ARM_THUMB_MOVW:
22419     case BFD_RELOC_ARM_THUMB_MOVT:
22420       if (fixP->fx_done || !seg->use_rela_p)
22421 	{
22422 	  /* REL format relocations are limited to a 16-bit addend.  */
22423 	  if (!fixP->fx_done)
22424 	    {
22425 	      if (value < -0x8000 || value > 0x7fff)
22426 		  as_bad_where (fixP->fx_file, fixP->fx_line,
22427 				_("offset out of range"));
22428 	    }
22429 	  else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22430 		   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22431 	    {
22432 	      value >>= 16;
22433 	    }
22434 
22435 	  if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22436 	      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22437 	    {
22438 	      newval = get_thumb32_insn (buf);
22439 	      newval &= 0xfbf08f00;
22440 	      newval |= (value & 0xf000) << 4;
22441 	      newval |= (value & 0x0800) << 15;
22442 	      newval |= (value & 0x0700) << 4;
22443 	      newval |= (value & 0x00ff);
22444 	      put_thumb32_insn (buf, newval);
22445 	    }
22446 	  else
22447 	    {
22448 	      newval = md_chars_to_number (buf, 4);
22449 	      newval &= 0xfff0f000;
22450 	      newval |= value & 0x0fff;
22451 	      newval |= (value & 0xf000) << 4;
22452 	      md_number_to_chars (buf, newval, 4);
22453 	    }
22454 	}
22455       return;
22456 
22457    case BFD_RELOC_ARM_ALU_PC_G0_NC:
22458    case BFD_RELOC_ARM_ALU_PC_G0:
22459    case BFD_RELOC_ARM_ALU_PC_G1_NC:
22460    case BFD_RELOC_ARM_ALU_PC_G1:
22461    case BFD_RELOC_ARM_ALU_PC_G2:
22462    case BFD_RELOC_ARM_ALU_SB_G0_NC:
22463    case BFD_RELOC_ARM_ALU_SB_G0:
22464    case BFD_RELOC_ARM_ALU_SB_G1_NC:
22465    case BFD_RELOC_ARM_ALU_SB_G1:
22466    case BFD_RELOC_ARM_ALU_SB_G2:
22467      gas_assert (!fixP->fx_done);
22468      if (!seg->use_rela_p)
22469        {
22470          bfd_vma insn;
22471          bfd_vma encoded_addend;
22472          bfd_vma addend_abs = abs (value);
22473 
22474          /* Check that the absolute value of the addend can be
22475             expressed as an 8-bit constant plus a rotation.  */
22476          encoded_addend = encode_arm_immediate (addend_abs);
22477          if (encoded_addend == (unsigned int) FAIL)
22478 	   as_bad_where (fixP->fx_file, fixP->fx_line,
22479 	                 _("the offset 0x%08lX is not representable"),
22480                          (unsigned long) addend_abs);
22481 
22482          /* Extract the instruction.  */
22483          insn = md_chars_to_number (buf, INSN_SIZE);
22484 
22485          /* If the addend is positive, use an ADD instruction.
22486             Otherwise use a SUB.  Take care not to destroy the S bit.  */
22487          insn &= 0xff1fffff;
22488          if (value < 0)
22489            insn |= 1 << 22;
22490          else
22491            insn |= 1 << 23;
22492 
22493          /* Place the encoded addend into the first 12 bits of the
22494             instruction.  */
22495          insn &= 0xfffff000;
22496          insn |= encoded_addend;
22497 
22498          /* Update the instruction.  */
22499          md_number_to_chars (buf, insn, INSN_SIZE);
22500        }
22501      break;
22502 
22503     case BFD_RELOC_ARM_LDR_PC_G0:
22504     case BFD_RELOC_ARM_LDR_PC_G1:
22505     case BFD_RELOC_ARM_LDR_PC_G2:
22506     case BFD_RELOC_ARM_LDR_SB_G0:
22507     case BFD_RELOC_ARM_LDR_SB_G1:
22508     case BFD_RELOC_ARM_LDR_SB_G2:
22509       gas_assert (!fixP->fx_done);
22510       if (!seg->use_rela_p)
22511         {
22512           bfd_vma insn;
22513           bfd_vma addend_abs = abs (value);
22514 
22515           /* Check that the absolute value of the addend can be
22516              encoded in 12 bits.  */
22517           if (addend_abs >= 0x1000)
22518 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22519 	  	          _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
22520                           (unsigned long) addend_abs);
22521 
22522           /* Extract the instruction.  */
22523           insn = md_chars_to_number (buf, INSN_SIZE);
22524 
22525           /* If the addend is negative, clear bit 23 of the instruction.
22526              Otherwise set it.  */
22527           if (value < 0)
22528             insn &= ~(1 << 23);
22529           else
22530             insn |= 1 << 23;
22531 
22532           /* Place the absolute value of the addend into the first 12 bits
22533              of the instruction.  */
22534           insn &= 0xfffff000;
22535           insn |= addend_abs;
22536 
22537           /* Update the instruction.  */
22538           md_number_to_chars (buf, insn, INSN_SIZE);
22539         }
22540       break;
22541 
22542     case BFD_RELOC_ARM_LDRS_PC_G0:
22543     case BFD_RELOC_ARM_LDRS_PC_G1:
22544     case BFD_RELOC_ARM_LDRS_PC_G2:
22545     case BFD_RELOC_ARM_LDRS_SB_G0:
22546     case BFD_RELOC_ARM_LDRS_SB_G1:
22547     case BFD_RELOC_ARM_LDRS_SB_G2:
22548       gas_assert (!fixP->fx_done);
22549       if (!seg->use_rela_p)
22550         {
22551           bfd_vma insn;
22552           bfd_vma addend_abs = abs (value);
22553 
22554           /* Check that the absolute value of the addend can be
22555              encoded in 8 bits.  */
22556           if (addend_abs >= 0x100)
22557 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22558 	  	          _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
22559                           (unsigned long) addend_abs);
22560 
22561           /* Extract the instruction.  */
22562           insn = md_chars_to_number (buf, INSN_SIZE);
22563 
22564           /* If the addend is negative, clear bit 23 of the instruction.
22565              Otherwise set it.  */
22566           if (value < 0)
22567             insn &= ~(1 << 23);
22568           else
22569             insn |= 1 << 23;
22570 
22571           /* Place the first four bits of the absolute value of the addend
22572              into the first 4 bits of the instruction, and the remaining
22573              four into bits 8 .. 11.  */
22574           insn &= 0xfffff0f0;
22575           insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
22576 
22577           /* Update the instruction.  */
22578           md_number_to_chars (buf, insn, INSN_SIZE);
22579         }
22580       break;
22581 
22582     case BFD_RELOC_ARM_LDC_PC_G0:
22583     case BFD_RELOC_ARM_LDC_PC_G1:
22584     case BFD_RELOC_ARM_LDC_PC_G2:
22585     case BFD_RELOC_ARM_LDC_SB_G0:
22586     case BFD_RELOC_ARM_LDC_SB_G1:
22587     case BFD_RELOC_ARM_LDC_SB_G2:
22588       gas_assert (!fixP->fx_done);
22589       if (!seg->use_rela_p)
22590         {
22591           bfd_vma insn;
22592           bfd_vma addend_abs = abs (value);
22593 
22594           /* Check that the absolute value of the addend is a multiple of
22595              four and, when divided by four, fits in 8 bits.  */
22596           if (addend_abs & 0x3)
22597 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22598 	  	          _("bad offset 0x%08lX (must be word-aligned)"),
22599                           (unsigned long) addend_abs);
22600 
22601           if ((addend_abs >> 2) > 0xff)
22602 	    as_bad_where (fixP->fx_file, fixP->fx_line,
22603 	  	          _("bad offset 0x%08lX (must be an 8-bit number of words)"),
22604                           (unsigned long) addend_abs);
22605 
22606           /* Extract the instruction.  */
22607           insn = md_chars_to_number (buf, INSN_SIZE);
22608 
22609           /* If the addend is negative, clear bit 23 of the instruction.
22610              Otherwise set it.  */
22611           if (value < 0)
22612             insn &= ~(1 << 23);
22613           else
22614             insn |= 1 << 23;
22615 
22616           /* Place the addend (divided by four) into the first eight
22617              bits of the instruction.  */
22618           insn &= 0xfffffff0;
22619           insn |= addend_abs >> 2;
22620 
22621           /* Update the instruction.  */
22622           md_number_to_chars (buf, insn, INSN_SIZE);
22623         }
22624       break;
22625 
22626     case BFD_RELOC_ARM_V4BX:
22627       /* This will need to go in the object file.  */
22628       fixP->fx_done = 0;
22629       break;
22630 
22631     case BFD_RELOC_UNUSED:
22632     default:
22633       as_bad_where (fixP->fx_file, fixP->fx_line,
22634 		    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
22635     }
22636 }
22637 
22638 /* Translate internal representation of relocation info to BFD target
22639    format.  */
22640 
22641 arelent *
22642 tc_gen_reloc (asection *section, fixS *fixp)
22643 {
22644   arelent * reloc;
22645   bfd_reloc_code_real_type code;
22646 
22647   reloc = (arelent *) xmalloc (sizeof (arelent));
22648 
22649   reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
22650   *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
22651   reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
22652 
22653   if (fixp->fx_pcrel)
22654     {
22655       if (section->use_rela_p)
22656 	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
22657       else
22658 	fixp->fx_offset = reloc->address;
22659     }
22660   reloc->addend = fixp->fx_offset;
22661 
22662   switch (fixp->fx_r_type)
22663     {
22664     case BFD_RELOC_8:
22665       if (fixp->fx_pcrel)
22666 	{
22667 	  code = BFD_RELOC_8_PCREL;
22668 	  break;
22669 	}
22670 
22671     case BFD_RELOC_16:
22672       if (fixp->fx_pcrel)
22673 	{
22674 	  code = BFD_RELOC_16_PCREL;
22675 	  break;
22676 	}
22677 
22678     case BFD_RELOC_32:
22679       if (fixp->fx_pcrel)
22680 	{
22681 	  code = BFD_RELOC_32_PCREL;
22682 	  break;
22683 	}
22684 
22685     case BFD_RELOC_ARM_MOVW:
22686       if (fixp->fx_pcrel)
22687 	{
22688 	  code = BFD_RELOC_ARM_MOVW_PCREL;
22689 	  break;
22690 	}
22691 
22692     case BFD_RELOC_ARM_MOVT:
22693       if (fixp->fx_pcrel)
22694 	{
22695 	  code = BFD_RELOC_ARM_MOVT_PCREL;
22696 	  break;
22697 	}
22698 
22699     case BFD_RELOC_ARM_THUMB_MOVW:
22700       if (fixp->fx_pcrel)
22701 	{
22702 	  code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
22703 	  break;
22704 	}
22705 
22706     case BFD_RELOC_ARM_THUMB_MOVT:
22707       if (fixp->fx_pcrel)
22708 	{
22709 	  code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
22710 	  break;
22711 	}
22712 
22713     case BFD_RELOC_NONE:
22714     case BFD_RELOC_ARM_PCREL_BRANCH:
22715     case BFD_RELOC_ARM_PCREL_BLX:
22716     case BFD_RELOC_RVA:
22717     case BFD_RELOC_THUMB_PCREL_BRANCH7:
22718     case BFD_RELOC_THUMB_PCREL_BRANCH9:
22719     case BFD_RELOC_THUMB_PCREL_BRANCH12:
22720     case BFD_RELOC_THUMB_PCREL_BRANCH20:
22721     case BFD_RELOC_THUMB_PCREL_BRANCH23:
22722     case BFD_RELOC_THUMB_PCREL_BRANCH25:
22723     case BFD_RELOC_VTABLE_ENTRY:
22724     case BFD_RELOC_VTABLE_INHERIT:
22725 #ifdef TE_PE
22726     case BFD_RELOC_32_SECREL:
22727 #endif
22728       code = fixp->fx_r_type;
22729       break;
22730 
22731     case BFD_RELOC_THUMB_PCREL_BLX:
22732 #ifdef OBJ_ELF
22733       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22734 	code = BFD_RELOC_THUMB_PCREL_BRANCH23;
22735       else
22736 #endif
22737 	code = BFD_RELOC_THUMB_PCREL_BLX;
22738       break;
22739 
22740     case BFD_RELOC_ARM_LITERAL:
22741     case BFD_RELOC_ARM_HWLITERAL:
22742       /* If this is called then the a literal has
22743 	 been referenced across a section boundary.  */
22744       as_bad_where (fixp->fx_file, fixp->fx_line,
22745 		    _("literal referenced across section boundary"));
22746       return NULL;
22747 
22748 #ifdef OBJ_ELF
22749     case BFD_RELOC_ARM_TLS_CALL:
22750     case BFD_RELOC_ARM_THM_TLS_CALL:
22751     case BFD_RELOC_ARM_TLS_DESCSEQ:
22752     case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22753     case BFD_RELOC_ARM_GOT32:
22754     case BFD_RELOC_ARM_GOTOFF:
22755     case BFD_RELOC_ARM_GOT_PREL:
22756     case BFD_RELOC_ARM_PLT32:
22757     case BFD_RELOC_ARM_TARGET1:
22758     case BFD_RELOC_ARM_ROSEGREL32:
22759     case BFD_RELOC_ARM_SBREL32:
22760     case BFD_RELOC_ARM_PREL31:
22761     case BFD_RELOC_ARM_TARGET2:
22762     case BFD_RELOC_ARM_TLS_LE32:
22763     case BFD_RELOC_ARM_TLS_LDO32:
22764     case BFD_RELOC_ARM_PCREL_CALL:
22765     case BFD_RELOC_ARM_PCREL_JUMP:
22766     case BFD_RELOC_ARM_ALU_PC_G0_NC:
22767     case BFD_RELOC_ARM_ALU_PC_G0:
22768     case BFD_RELOC_ARM_ALU_PC_G1_NC:
22769     case BFD_RELOC_ARM_ALU_PC_G1:
22770     case BFD_RELOC_ARM_ALU_PC_G2:
22771     case BFD_RELOC_ARM_LDR_PC_G0:
22772     case BFD_RELOC_ARM_LDR_PC_G1:
22773     case BFD_RELOC_ARM_LDR_PC_G2:
22774     case BFD_RELOC_ARM_LDRS_PC_G0:
22775     case BFD_RELOC_ARM_LDRS_PC_G1:
22776     case BFD_RELOC_ARM_LDRS_PC_G2:
22777     case BFD_RELOC_ARM_LDC_PC_G0:
22778     case BFD_RELOC_ARM_LDC_PC_G1:
22779     case BFD_RELOC_ARM_LDC_PC_G2:
22780     case BFD_RELOC_ARM_ALU_SB_G0_NC:
22781     case BFD_RELOC_ARM_ALU_SB_G0:
22782     case BFD_RELOC_ARM_ALU_SB_G1_NC:
22783     case BFD_RELOC_ARM_ALU_SB_G1:
22784     case BFD_RELOC_ARM_ALU_SB_G2:
22785     case BFD_RELOC_ARM_LDR_SB_G0:
22786     case BFD_RELOC_ARM_LDR_SB_G1:
22787     case BFD_RELOC_ARM_LDR_SB_G2:
22788     case BFD_RELOC_ARM_LDRS_SB_G0:
22789     case BFD_RELOC_ARM_LDRS_SB_G1:
22790     case BFD_RELOC_ARM_LDRS_SB_G2:
22791     case BFD_RELOC_ARM_LDC_SB_G0:
22792     case BFD_RELOC_ARM_LDC_SB_G1:
22793     case BFD_RELOC_ARM_LDC_SB_G2:
22794     case BFD_RELOC_ARM_V4BX:
22795       code = fixp->fx_r_type;
22796       break;
22797 
22798     case BFD_RELOC_ARM_TLS_GOTDESC:
22799     case BFD_RELOC_ARM_TLS_GD32:
22800     case BFD_RELOC_ARM_TLS_IE32:
22801     case BFD_RELOC_ARM_TLS_LDM32:
22802       /* BFD will include the symbol's address in the addend.
22803 	 But we don't want that, so subtract it out again here.  */
22804       if (!S_IS_COMMON (fixp->fx_addsy))
22805 	reloc->addend -= (*reloc->sym_ptr_ptr)->value;
22806       code = fixp->fx_r_type;
22807       break;
22808 #endif
22809 
22810     case BFD_RELOC_ARM_IMMEDIATE:
22811       as_bad_where (fixp->fx_file, fixp->fx_line,
22812 		    _("internal relocation (type: IMMEDIATE) not fixed up"));
22813       return NULL;
22814 
22815     case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22816       as_bad_where (fixp->fx_file, fixp->fx_line,
22817 		    _("ADRL used for a symbol not defined in the same file"));
22818       return NULL;
22819 
22820     case BFD_RELOC_ARM_OFFSET_IMM:
22821       if (section->use_rela_p)
22822 	{
22823 	  code = fixp->fx_r_type;
22824 	  break;
22825 	}
22826 
22827       if (fixp->fx_addsy != NULL
22828 	  && !S_IS_DEFINED (fixp->fx_addsy)
22829 	  && S_IS_LOCAL (fixp->fx_addsy))
22830 	{
22831 	  as_bad_where (fixp->fx_file, fixp->fx_line,
22832 			_("undefined local label `%s'"),
22833 			S_GET_NAME (fixp->fx_addsy));
22834 	  return NULL;
22835 	}
22836 
22837       as_bad_where (fixp->fx_file, fixp->fx_line,
22838 		    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
22839       return NULL;
22840 
22841     default:
22842       {
22843 	char * type;
22844 
22845 	switch (fixp->fx_r_type)
22846 	  {
22847 	  case BFD_RELOC_NONE:		   type = "NONE";	  break;
22848 	  case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
22849 	  case BFD_RELOC_ARM_SHIFT_IMM:	   type = "SHIFT_IMM";	  break;
22850 	  case BFD_RELOC_ARM_SMC:	   type = "SMC";	  break;
22851 	  case BFD_RELOC_ARM_SWI:	   type = "SWI";	  break;
22852 	  case BFD_RELOC_ARM_MULTI:	   type = "MULTI";	  break;
22853 	  case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";	  break;
22854 	  case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
22855 	  case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
22856 	  case BFD_RELOC_ARM_THUMB_ADD:	   type = "THUMB_ADD";	  break;
22857 	  case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
22858 	  case BFD_RELOC_ARM_THUMB_IMM:	   type = "THUMB_IMM";	  break;
22859 	  case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
22860 	  default:			   type = _("<unknown>"); break;
22861 	  }
22862 	as_bad_where (fixp->fx_file, fixp->fx_line,
22863 		      _("cannot represent %s relocation in this object file format"),
22864 		      type);
22865 	return NULL;
22866       }
22867     }
22868 
22869 #ifdef OBJ_ELF
22870   if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
22871       && GOT_symbol
22872       && fixp->fx_addsy == GOT_symbol)
22873     {
22874       code = BFD_RELOC_ARM_GOTPC;
22875       reloc->addend = fixp->fx_offset = reloc->address;
22876     }
22877 #endif
22878 
22879   reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
22880 
22881   if (reloc->howto == NULL)
22882     {
22883       as_bad_where (fixp->fx_file, fixp->fx_line,
22884 		    _("cannot represent %s relocation in this object file format"),
22885 		    bfd_get_reloc_code_name (code));
22886       return NULL;
22887     }
22888 
22889   /* HACK: Since arm ELF uses Rel instead of Rela, encode the
22890      vtable entry to be used in the relocation's section offset.  */
22891   if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22892     reloc->address = fixp->fx_offset;
22893 
22894   return reloc;
22895 }
22896 
22897 /* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
22898 
22899 void
22900 cons_fix_new_arm (fragS *	frag,
22901 		  int		where,
22902 		  int		size,
22903 		  expressionS * exp)
22904 {
22905   bfd_reloc_code_real_type type;
22906   int pcrel = 0;
22907 
22908   /* Pick a reloc.
22909      FIXME: @@ Should look at CPU word size.  */
22910   switch (size)
22911     {
22912     case 1:
22913       type = BFD_RELOC_8;
22914       break;
22915     case 2:
22916       type = BFD_RELOC_16;
22917       break;
22918     case 4:
22919     default:
22920       type = BFD_RELOC_32;
22921       break;
22922     case 8:
22923       type = BFD_RELOC_64;
22924       break;
22925     }
22926 
22927 #ifdef TE_PE
22928   if (exp->X_op == O_secrel)
22929   {
22930     exp->X_op = O_symbol;
22931     type = BFD_RELOC_32_SECREL;
22932   }
22933 #endif
22934 
22935   fix_new_exp (frag, where, (int) size, exp, pcrel, type);
22936 }
22937 
22938 #if defined (OBJ_COFF)
22939 void
22940 arm_validate_fix (fixS * fixP)
22941 {
22942   /* If the destination of the branch is a defined symbol which does not have
22943      the THUMB_FUNC attribute, then we must be calling a function which has
22944      the (interfacearm) attribute.  We look for the Thumb entry point to that
22945      function and change the branch to refer to that function instead.	*/
22946   if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
22947       && fixP->fx_addsy != NULL
22948       && S_IS_DEFINED (fixP->fx_addsy)
22949       && ! THUMB_IS_FUNC (fixP->fx_addsy))
22950     {
22951       fixP->fx_addsy = find_real_start (fixP->fx_addsy);
22952     }
22953 }
22954 #endif
22955 
22956 
22957 int
22958 arm_force_relocation (struct fix * fixp)
22959 {
22960 #if defined (OBJ_COFF) && defined (TE_PE)
22961   if (fixp->fx_r_type == BFD_RELOC_RVA)
22962     return 1;
22963 #endif
22964 
22965   /* In case we have a call or a branch to a function in ARM ISA mode from
22966      a thumb function or vice-versa force the relocation. These relocations
22967      are cleared off for some cores that might have blx and simple transformations
22968      are possible.  */
22969 
22970 #ifdef OBJ_ELF
22971   switch (fixp->fx_r_type)
22972     {
22973     case BFD_RELOC_ARM_PCREL_JUMP:
22974     case BFD_RELOC_ARM_PCREL_CALL:
22975     case BFD_RELOC_THUMB_PCREL_BLX:
22976       if (THUMB_IS_FUNC (fixp->fx_addsy))
22977 	return 1;
22978       break;
22979 
22980     case BFD_RELOC_ARM_PCREL_BLX:
22981     case BFD_RELOC_THUMB_PCREL_BRANCH25:
22982     case BFD_RELOC_THUMB_PCREL_BRANCH20:
22983     case BFD_RELOC_THUMB_PCREL_BRANCH23:
22984       if (ARM_IS_FUNC (fixp->fx_addsy))
22985 	return 1;
22986       break;
22987 
22988     default:
22989       break;
22990     }
22991 #endif
22992 
22993   /* Resolve these relocations even if the symbol is extern or weak.
22994      Technically this is probably wrong due to symbol preemption.
22995      In practice these relocations do not have enough range to be useful
22996      at dynamic link time, and some code (e.g. in the Linux kernel)
22997      expects these references to be resolved.  */
22998   if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
22999       || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23000       || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23001       || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23002       || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23003       || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23004       || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23005       || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23006       || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23007       || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23008       || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23009       || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23010       || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23011       || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23012     return 0;
23013 
23014   /* Always leave these relocations for the linker.  */
23015   if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23016        && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23017       || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23018     return 1;
23019 
23020   /* Always generate relocations against function symbols.  */
23021   if (fixp->fx_r_type == BFD_RELOC_32
23022       && fixp->fx_addsy
23023       && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
23024     return 1;
23025 
23026   return generic_force_reloc (fixp);
23027 }
23028 
23029 #if defined (OBJ_ELF) || defined (OBJ_COFF)
23030 /* Relocations against function names must be left unadjusted,
23031    so that the linker can use this information to generate interworking
23032    stubs.  The MIPS version of this function
23033    also prevents relocations that are mips-16 specific, but I do not
23034    know why it does this.
23035 
23036    FIXME:
23037    There is one other problem that ought to be addressed here, but
23038    which currently is not:  Taking the address of a label (rather
23039    than a function) and then later jumping to that address.  Such
23040    addresses also ought to have their bottom bit set (assuming that
23041    they reside in Thumb code), but at the moment they will not.	 */
23042 
23043 bfd_boolean
23044 arm_fix_adjustable (fixS * fixP)
23045 {
23046   if (fixP->fx_addsy == NULL)
23047     return 1;
23048 
23049   /* Preserve relocations against symbols with function type.  */
23050   if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23051     return FALSE;
23052 
23053   if (THUMB_IS_FUNC (fixP->fx_addsy)
23054       && fixP->fx_subsy == NULL)
23055     return FALSE;
23056 
23057   /* We need the symbol name for the VTABLE entries.  */
23058   if (	 fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23059       || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23060     return FALSE;
23061 
23062   /* Don't allow symbols to be discarded on GOT related relocs.	 */
23063   if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23064       || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23065       || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23066       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23067       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23068       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23069       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23070       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23071       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23072       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23073       || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23074       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23075       || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23076       || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23077     return FALSE;
23078 
23079   /* Similarly for group relocations.  */
23080   if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23081        && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23082       || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23083     return FALSE;
23084 
23085   /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
23086   if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23087       || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23088       || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23089       || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23090       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23091       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23092       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23093       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23094     return FALSE;
23095 
23096   return TRUE;
23097 }
23098 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23099 
23100 #ifdef OBJ_ELF
23101 
23102 const char *
23103 elf32_arm_target_format (void)
23104 {
23105 #ifdef TE_SYMBIAN
23106   return (target_big_endian
23107 	  ? "elf32-bigarm-symbian"
23108 	  : "elf32-littlearm-symbian");
23109 #elif defined (TE_VXWORKS)
23110   return (target_big_endian
23111 	  ? "elf32-bigarm-vxworks"
23112 	  : "elf32-littlearm-vxworks");
23113 #elif defined (TE_NACL)
23114   return (target_big_endian
23115 	  ? "elf32-bigarm-nacl"
23116 	  : "elf32-littlearm-nacl");
23117 #else
23118   if (target_big_endian)
23119     return "elf32-bigarm";
23120   else
23121     return "elf32-littlearm";
23122 #endif
23123 }
23124 
23125 void
23126 armelf_frob_symbol (symbolS * symp,
23127 		    int *     puntp)
23128 {
23129   elf_frob_symbol (symp, puntp);
23130 }
23131 #endif
23132 
23133 /* MD interface: Finalization.	*/
23134 
23135 void
23136 arm_cleanup (void)
23137 {
23138   literal_pool * pool;
23139 
23140   /* Ensure that all the IT blocks are properly closed.  */
23141   check_it_blocks_finished ();
23142 
23143   for (pool = list_of_pools; pool; pool = pool->next)
23144     {
23145       /* Put it at the end of the relevant section.  */
23146       subseg_set (pool->section, pool->sub_section);
23147 #ifdef OBJ_ELF
23148       arm_elf_change_section ();
23149 #endif
23150       s_ltorg (0);
23151     }
23152 }
23153 
23154 #ifdef OBJ_ELF
23155 /* Remove any excess mapping symbols generated for alignment frags in
23156    SEC.  We may have created a mapping symbol before a zero byte
23157    alignment; remove it if there's a mapping symbol after the
23158    alignment.  */
23159 static void
23160 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23161 		       void *dummy ATTRIBUTE_UNUSED)
23162 {
23163   segment_info_type *seginfo = seg_info (sec);
23164   fragS *fragp;
23165 
23166   if (seginfo == NULL || seginfo->frchainP == NULL)
23167     return;
23168 
23169   for (fragp = seginfo->frchainP->frch_root;
23170        fragp != NULL;
23171        fragp = fragp->fr_next)
23172     {
23173       symbolS *sym = fragp->tc_frag_data.last_map;
23174       fragS *next = fragp->fr_next;
23175 
23176       /* Variable-sized frags have been converted to fixed size by
23177 	 this point.  But if this was variable-sized to start with,
23178 	 there will be a fixed-size frag after it.  So don't handle
23179 	 next == NULL.  */
23180       if (sym == NULL || next == NULL)
23181 	continue;
23182 
23183       if (S_GET_VALUE (sym) < next->fr_address)
23184 	/* Not at the end of this frag.  */
23185 	continue;
23186       know (S_GET_VALUE (sym) == next->fr_address);
23187 
23188       do
23189 	{
23190 	  if (next->tc_frag_data.first_map != NULL)
23191 	    {
23192 	      /* Next frag starts with a mapping symbol.  Discard this
23193 		 one.  */
23194 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23195 	      break;
23196 	    }
23197 
23198 	  if (next->fr_next == NULL)
23199 	    {
23200 	      /* This mapping symbol is at the end of the section.  Discard
23201 		 it.  */
23202 	      know (next->fr_fix == 0 && next->fr_var == 0);
23203 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23204 	      break;
23205 	    }
23206 
23207 	  /* As long as we have empty frags without any mapping symbols,
23208 	     keep looking.  */
23209 	  /* If the next frag is non-empty and does not start with a
23210 	     mapping symbol, then this mapping symbol is required.  */
23211 	  if (next->fr_address != next->fr_next->fr_address)
23212 	    break;
23213 
23214 	  next = next->fr_next;
23215 	}
23216       while (next != NULL);
23217     }
23218 }
23219 #endif
23220 
23221 /* Adjust the symbol table.  This marks Thumb symbols as distinct from
23222    ARM ones.  */
23223 
23224 void
23225 arm_adjust_symtab (void)
23226 {
23227 #ifdef OBJ_COFF
23228   symbolS * sym;
23229 
23230   for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23231     {
23232       if (ARM_IS_THUMB (sym))
23233 	{
23234 	  if (THUMB_IS_FUNC (sym))
23235 	    {
23236 	      /* Mark the symbol as a Thumb function.  */
23237 	      if (   S_GET_STORAGE_CLASS (sym) == C_STAT
23238 		  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!	 */
23239 		S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
23240 
23241 	      else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
23242 		S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
23243 	      else
23244 		as_bad (_("%s: unexpected function type: %d"),
23245 			S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
23246 	    }
23247 	  else switch (S_GET_STORAGE_CLASS (sym))
23248 	    {
23249 	    case C_EXT:
23250 	      S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
23251 	      break;
23252 	    case C_STAT:
23253 	      S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
23254 	      break;
23255 	    case C_LABEL:
23256 	      S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
23257 	      break;
23258 	    default:
23259 	      /* Do nothing.  */
23260 	      break;
23261 	    }
23262 	}
23263 
23264       if (ARM_IS_INTERWORK (sym))
23265 	coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
23266     }
23267 #endif
23268 #ifdef OBJ_ELF
23269   symbolS * sym;
23270   char	    bind;
23271 
23272   for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23273     {
23274       if (ARM_IS_THUMB (sym))
23275 	{
23276 	  elf_symbol_type * elf_sym;
23277 
23278 	  elf_sym = elf_symbol (symbol_get_bfdsym (sym));
23279 	  bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
23280 
23281 	  if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
23282 		BFD_ARM_SPECIAL_SYM_TYPE_ANY))
23283 	    {
23284 	      /* If it's a .thumb_func, declare it as so,
23285 		 otherwise tag label as .code 16.  */
23286 	      if (THUMB_IS_FUNC (sym))
23287 		elf_sym->internal_elf_sym.st_target_internal
23288 		  = ST_BRANCH_TO_THUMB;
23289 	      else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23290 		elf_sym->internal_elf_sym.st_info =
23291 		  ELF_ST_INFO (bind, STT_ARM_16BIT);
23292 	    }
23293 	}
23294     }
23295 
23296   /* Remove any overlapping mapping symbols generated by alignment frags.  */
23297   bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
23298   /* Now do generic ELF adjustments.  */
23299   elf_adjust_symtab ();
23300 #endif
23301 }
23302 
23303 /* MD interface: Initialization.  */
23304 
23305 static void
23306 set_constant_flonums (void)
23307 {
23308   int i;
23309 
23310   for (i = 0; i < NUM_FLOAT_VALS; i++)
23311     if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
23312       abort ();
23313 }
23314 
23315 /* Auto-select Thumb mode if it's the only available instruction set for the
23316    given architecture.  */
23317 
23318 static void
23319 autoselect_thumb_from_cpu_variant (void)
23320 {
23321   if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23322     opcode_select (16);
23323 }
23324 
23325 void
23326 md_begin (void)
23327 {
23328   unsigned mach;
23329   unsigned int i;
23330 
23331   if (	 (arm_ops_hsh = hash_new ()) == NULL
23332       || (arm_cond_hsh = hash_new ()) == NULL
23333       || (arm_shift_hsh = hash_new ()) == NULL
23334       || (arm_psr_hsh = hash_new ()) == NULL
23335       || (arm_v7m_psr_hsh = hash_new ()) == NULL
23336       || (arm_reg_hsh = hash_new ()) == NULL
23337       || (arm_reloc_hsh = hash_new ()) == NULL
23338       || (arm_barrier_opt_hsh = hash_new ()) == NULL)
23339     as_fatal (_("virtual memory exhausted"));
23340 
23341   for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
23342     hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
23343   for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
23344     hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
23345   for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
23346     hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
23347   for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
23348     hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
23349   for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
23350     hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
23351                  (void *) (v7m_psrs + i));
23352   for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
23353     hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
23354   for (i = 0;
23355        i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
23356        i++)
23357     hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
23358 		 (void *) (barrier_opt_names + i));
23359 #ifdef OBJ_ELF
23360   for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
23361     {
23362       struct reloc_entry * entry = reloc_names + i;
23363 
23364       if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
23365 	/* This makes encode_branch() use the EABI versions of this relocation.  */
23366 	entry->reloc = BFD_RELOC_UNUSED;
23367 
23368       hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
23369     }
23370 #endif
23371 
23372   set_constant_flonums ();
23373 
23374   /* Set the cpu variant based on the command-line options.  We prefer
23375      -mcpu= over -march= if both are set (as for GCC); and we prefer
23376      -mfpu= over any other way of setting the floating point unit.
23377      Use of legacy options with new options are faulted.  */
23378   if (legacy_cpu)
23379     {
23380       if (mcpu_cpu_opt || march_cpu_opt)
23381 	as_bad (_("use of old and new-style options to set CPU type"));
23382 
23383       mcpu_cpu_opt = legacy_cpu;
23384     }
23385   else if (!mcpu_cpu_opt)
23386     mcpu_cpu_opt = march_cpu_opt;
23387 
23388   if (legacy_fpu)
23389     {
23390       if (mfpu_opt)
23391 	as_bad (_("use of old and new-style options to set FPU type"));
23392 
23393       mfpu_opt = legacy_fpu;
23394     }
23395   else if (!mfpu_opt)
23396     {
23397 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23398 	|| defined (TE_NetBSD) || defined (TE_VXWORKS))
23399       /* Some environments specify a default FPU.  If they don't, infer it
23400 	 from the processor.  */
23401       if (mcpu_fpu_opt)
23402 	mfpu_opt = mcpu_fpu_opt;
23403       else
23404 	mfpu_opt = march_fpu_opt;
23405 #else
23406       mfpu_opt = &fpu_default;
23407 #endif
23408     }
23409 
23410   if (!mfpu_opt)
23411     {
23412       if (mcpu_cpu_opt != NULL)
23413 	mfpu_opt = &fpu_default;
23414       else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
23415 	mfpu_opt = &fpu_arch_vfp_v2;
23416       else
23417 	mfpu_opt = &fpu_arch_fpa;
23418     }
23419 
23420 #ifdef CPU_DEFAULT
23421   if (!mcpu_cpu_opt)
23422     {
23423       mcpu_cpu_opt = &cpu_default;
23424       selected_cpu = cpu_default;
23425     }
23426 #else
23427   if (mcpu_cpu_opt)
23428     selected_cpu = *mcpu_cpu_opt;
23429   else
23430     mcpu_cpu_opt = &arm_arch_any;
23431 #endif
23432 
23433   ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23434 
23435   autoselect_thumb_from_cpu_variant ();
23436 
23437   arm_arch_used = thumb_arch_used = arm_arch_none;
23438 
23439 #if defined OBJ_COFF || defined OBJ_ELF
23440   {
23441     unsigned int flags = 0;
23442 
23443 #if defined OBJ_ELF
23444     flags = meabi_flags;
23445 
23446     switch (meabi_flags)
23447       {
23448       case EF_ARM_EABI_UNKNOWN:
23449 #endif
23450 	/* Set the flags in the private structure.  */
23451 	if (uses_apcs_26)      flags |= F_APCS26;
23452 	if (support_interwork) flags |= F_INTERWORK;
23453 	if (uses_apcs_float)   flags |= F_APCS_FLOAT;
23454 	if (pic_code)	       flags |= F_PIC;
23455 	if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
23456 	  flags |= F_SOFT_FLOAT;
23457 
23458 	switch (mfloat_abi_opt)
23459 	  {
23460 	  case ARM_FLOAT_ABI_SOFT:
23461 	  case ARM_FLOAT_ABI_SOFTFP:
23462 	    flags |= F_SOFT_FLOAT;
23463 	    break;
23464 
23465 	  case ARM_FLOAT_ABI_HARD:
23466 	    if (flags & F_SOFT_FLOAT)
23467 	      as_bad (_("hard-float conflicts with specified fpu"));
23468 	    break;
23469 	  }
23470 
23471 	/* Using pure-endian doubles (even if soft-float).	*/
23472 	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
23473 	  flags |= F_VFP_FLOAT;
23474 
23475 #if defined OBJ_ELF
23476 	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
23477 	    flags |= EF_ARM_MAVERICK_FLOAT;
23478 	break;
23479 
23480       case EF_ARM_EABI_VER4:
23481       case EF_ARM_EABI_VER5:
23482 	/* No additional flags to set.	*/
23483 	break;
23484 
23485       default:
23486 	abort ();
23487       }
23488 #endif
23489     bfd_set_private_flags (stdoutput, flags);
23490 
23491     /* We have run out flags in the COFF header to encode the
23492        status of ATPCS support, so instead we create a dummy,
23493        empty, debug section called .arm.atpcs.	*/
23494     if (atpcs)
23495       {
23496 	asection * sec;
23497 
23498 	sec = bfd_make_section (stdoutput, ".arm.atpcs");
23499 
23500 	if (sec != NULL)
23501 	  {
23502 	    bfd_set_section_flags
23503 	      (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
23504 	    bfd_set_section_size (stdoutput, sec, 0);
23505 	    bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
23506 	  }
23507       }
23508   }
23509 #endif
23510 
23511   /* Record the CPU type as well.  */
23512   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
23513     mach = bfd_mach_arm_iWMMXt2;
23514   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
23515     mach = bfd_mach_arm_iWMMXt;
23516   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
23517     mach = bfd_mach_arm_XScale;
23518   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
23519     mach = bfd_mach_arm_ep9312;
23520   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
23521     mach = bfd_mach_arm_5TE;
23522   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
23523     {
23524       if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23525 	mach = bfd_mach_arm_5T;
23526       else
23527 	mach = bfd_mach_arm_5;
23528     }
23529   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
23530     {
23531       if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23532 	mach = bfd_mach_arm_4T;
23533       else
23534 	mach = bfd_mach_arm_4;
23535     }
23536   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
23537     mach = bfd_mach_arm_3M;
23538   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
23539     mach = bfd_mach_arm_3;
23540   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
23541     mach = bfd_mach_arm_2a;
23542   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
23543     mach = bfd_mach_arm_2;
23544   else
23545     mach = bfd_mach_arm_unknown;
23546 
23547   bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
23548 }
23549 
23550 /* Command line processing.  */
23551 
23552 /* md_parse_option
23553       Invocation line includes a switch not recognized by the base assembler.
23554       See if it's a processor-specific option.
23555 
23556       This routine is somewhat complicated by the need for backwards
23557       compatibility (since older releases of gcc can't be changed).
23558       The new options try to make the interface as compatible as
23559       possible with GCC.
23560 
23561       New options (supported) are:
23562 
23563 	      -mcpu=<cpu name>		 Assemble for selected processor
23564 	      -march=<architecture name> Assemble for selected architecture
23565 	      -mfpu=<fpu architecture>	 Assemble for selected FPU.
23566 	      -EB/-mbig-endian		 Big-endian
23567 	      -EL/-mlittle-endian	 Little-endian
23568 	      -k			 Generate PIC code
23569 	      -mthumb			 Start in Thumb mode
23570 	      -mthumb-interwork		 Code supports ARM/Thumb interworking
23571 
23572 	      -m[no-]warn-deprecated     Warn about deprecated features
23573 
23574       For now we will also provide support for:
23575 
23576 	      -mapcs-32			 32-bit Program counter
23577 	      -mapcs-26			 26-bit Program counter
23578 	      -macps-float		 Floats passed in FP registers
23579 	      -mapcs-reentrant		 Reentrant code
23580 	      -matpcs
23581       (sometime these will probably be replaced with -mapcs=<list of options>
23582       and -matpcs=<list of options>)
23583 
23584       The remaining options are only supported for back-wards compatibility.
23585       Cpu variants, the arm part is optional:
23586 	      -m[arm]1		      Currently not supported.
23587 	      -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
23588 	      -m[arm]3		      Arm 3 processor
23589 	      -m[arm]6[xx],	      Arm 6 processors
23590 	      -m[arm]7[xx][t][[d]m]   Arm 7 processors
23591 	      -m[arm]8[10]	      Arm 8 processors
23592 	      -m[arm]9[20][tdmi]      Arm 9 processors
23593 	      -mstrongarm[110[0]]     StrongARM processors
23594 	      -mxscale		      XScale processors
23595 	      -m[arm]v[2345[t[e]]]    Arm architectures
23596 	      -mall		      All (except the ARM1)
23597       FP variants:
23598 	      -mfpa10, -mfpa11	      FPA10 and 11 co-processor instructions
23599 	      -mfpe-old		      (No float load/store multiples)
23600 	      -mvfpxd		      VFP Single precision
23601 	      -mvfp		      All VFP
23602 	      -mno-fpu		      Disable all floating point instructions
23603 
23604       The following CPU names are recognized:
23605 	      arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
23606 	      arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
23607 	      arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
23608 	      arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
23609 	      arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
23610 	      arm10t arm10e, arm1020t, arm1020e, arm10200e,
23611 	      strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
23612 
23613       */
23614 
23615 const char * md_shortopts = "m:k";
23616 
23617 #ifdef ARM_BI_ENDIAN
23618 #define OPTION_EB (OPTION_MD_BASE + 0)
23619 #define OPTION_EL (OPTION_MD_BASE + 1)
23620 #else
23621 #if TARGET_BYTES_BIG_ENDIAN
23622 #define OPTION_EB (OPTION_MD_BASE + 0)
23623 #else
23624 #define OPTION_EL (OPTION_MD_BASE + 1)
23625 #endif
23626 #endif
23627 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
23628 
23629 struct option md_longopts[] =
23630 {
23631 #ifdef OPTION_EB
23632   {"EB", no_argument, NULL, OPTION_EB},
23633 #endif
23634 #ifdef OPTION_EL
23635   {"EL", no_argument, NULL, OPTION_EL},
23636 #endif
23637   {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
23638   {NULL, no_argument, NULL, 0}
23639 };
23640 
23641 size_t md_longopts_size = sizeof (md_longopts);
23642 
23643 struct arm_option_table
23644 {
23645   char *option;		/* Option name to match.  */
23646   char *help;		/* Help information.  */
23647   int  *var;		/* Variable to change.	*/
23648   int	value;		/* What to change it to.  */
23649   char *deprecated;	/* If non-null, print this message.  */
23650 };
23651 
23652 struct arm_option_table arm_opts[] =
23653 {
23654   {"k",	     N_("generate PIC code"),	   &pic_code,	 1, NULL},
23655   {"mthumb", N_("assemble Thumb code"),	   &thumb_mode,	 1, NULL},
23656   {"mthumb-interwork", N_("support ARM/Thumb interworking"),
23657    &support_interwork, 1, NULL},
23658   {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
23659   {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
23660   {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
23661    1, NULL},
23662   {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
23663   {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
23664   {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
23665   {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
23666    NULL},
23667 
23668   /* These are recognized by the assembler, but have no affect on code.	 */
23669   {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
23670   {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
23671 
23672   {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
23673   {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
23674    &warn_on_deprecated, 0, NULL},
23675   {NULL, NULL, NULL, 0, NULL}
23676 };
23677 
23678 struct arm_legacy_option_table
23679 {
23680   char *option;				/* Option name to match.  */
23681   const arm_feature_set	**var;		/* Variable to change.	*/
23682   const arm_feature_set	value;		/* What to change it to.  */
23683   char *deprecated;			/* If non-null, print this message.  */
23684 };
23685 
23686 const struct arm_legacy_option_table arm_legacy_opts[] =
23687 {
23688   /* DON'T add any new processors to this list -- we want the whole list
23689      to go away...  Add them to the processors table instead.  */
23690   {"marm1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
23691   {"m1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
23692   {"marm2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
23693   {"m2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
23694   {"marm250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
23695   {"m250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
23696   {"marm3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
23697   {"m3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
23698   {"marm6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
23699   {"m6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
23700   {"marm600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
23701   {"m600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
23702   {"marm610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
23703   {"m610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
23704   {"marm620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
23705   {"m620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
23706   {"marm7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
23707   {"m7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
23708   {"marm70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
23709   {"m70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
23710   {"marm700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
23711   {"m700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
23712   {"marm700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
23713   {"m700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
23714   {"marm710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
23715   {"m710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
23716   {"marm710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
23717   {"m710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
23718   {"marm720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
23719   {"m720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
23720   {"marm7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
23721   {"m7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
23722   {"marm7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
23723   {"m7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
23724   {"marm7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
23725   {"m7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
23726   {"marm7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
23727   {"m7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
23728   {"marm7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
23729   {"m7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
23730   {"marm7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
23731   {"m7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
23732   {"marm7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
23733   {"m7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
23734   {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
23735   {"m7500fe",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
23736   {"marm7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23737   {"m7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23738   {"marm7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23739   {"m7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23740   {"marm710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
23741   {"m710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
23742   {"marm720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
23743   {"m720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
23744   {"marm740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
23745   {"m740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
23746   {"marm8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
23747   {"m8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
23748   {"marm810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
23749   {"m810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
23750   {"marm9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
23751   {"m9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
23752   {"marm9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
23753   {"m9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
23754   {"marm920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
23755   {"m920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
23756   {"marm940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
23757   {"m940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
23758   {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
23759   {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
23760    N_("use -mcpu=strongarm110")},
23761   {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
23762    N_("use -mcpu=strongarm1100")},
23763   {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
23764    N_("use -mcpu=strongarm1110")},
23765   {"mxscale",	 &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
23766   {"miwmmxt",	 &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
23767   {"mall",	 &legacy_cpu, ARM_ANY,	       N_("use -mcpu=all")},
23768 
23769   /* Architecture variants -- don't add any more to this list either.  */
23770   {"mv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
23771   {"marmv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
23772   {"mv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
23773   {"marmv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
23774   {"mv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
23775   {"marmv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
23776   {"mv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
23777   {"marmv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
23778   {"mv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
23779   {"marmv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
23780   {"mv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
23781   {"marmv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
23782   {"mv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
23783   {"marmv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
23784   {"mv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
23785   {"marmv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
23786   {"mv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
23787   {"marmv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
23788 
23789   /* Floating point variants -- don't add any more to this list either.	 */
23790   {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
23791   {"mfpa10",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
23792   {"mfpa11",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
23793   {"mno-fpu",  &legacy_fpu, ARM_ARCH_NONE,
23794    N_("use either -mfpu=softfpa or -mfpu=softvfp")},
23795 
23796   {NULL, NULL, ARM_ARCH_NONE, NULL}
23797 };
23798 
23799 struct arm_cpu_option_table
23800 {
23801   char *name;
23802   size_t name_len;
23803   const arm_feature_set	value;
23804   /* For some CPUs we assume an FPU unless the user explicitly sets
23805      -mfpu=...	*/
23806   const arm_feature_set	default_fpu;
23807   /* The canonical name of the CPU, or NULL to use NAME converted to upper
23808      case.  */
23809   const char *canonical_name;
23810 };
23811 
23812 /* This list should, at a minimum, contain all the cpu names
23813    recognized by GCC.  */
23814 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
23815 static const struct arm_cpu_option_table arm_cpus[] =
23816 {
23817   ARM_CPU_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA,    NULL),
23818   ARM_CPU_OPT ("arm1",		ARM_ARCH_V1,	 FPU_ARCH_FPA,    NULL),
23819   ARM_CPU_OPT ("arm2",		ARM_ARCH_V2,	 FPU_ARCH_FPA,    NULL),
23820   ARM_CPU_OPT ("arm250",	ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
23821   ARM_CPU_OPT ("arm3",		ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
23822   ARM_CPU_OPT ("arm6",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23823   ARM_CPU_OPT ("arm60",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23824   ARM_CPU_OPT ("arm600",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23825   ARM_CPU_OPT ("arm610",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23826   ARM_CPU_OPT ("arm620",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23827   ARM_CPU_OPT ("arm7",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23828   ARM_CPU_OPT ("arm7m",		ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
23829   ARM_CPU_OPT ("arm7d",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23830   ARM_CPU_OPT ("arm7dm",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
23831   ARM_CPU_OPT ("arm7di",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23832   ARM_CPU_OPT ("arm7dmi",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
23833   ARM_CPU_OPT ("arm70",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23834   ARM_CPU_OPT ("arm700",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23835   ARM_CPU_OPT ("arm700i",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23836   ARM_CPU_OPT ("arm710",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23837   ARM_CPU_OPT ("arm710t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23838   ARM_CPU_OPT ("arm720",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23839   ARM_CPU_OPT ("arm720t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23840   ARM_CPU_OPT ("arm740t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23841   ARM_CPU_OPT ("arm710c",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23842   ARM_CPU_OPT ("arm7100",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23843   ARM_CPU_OPT ("arm7500",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23844   ARM_CPU_OPT ("arm7500fe",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
23845   ARM_CPU_OPT ("arm7t",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23846   ARM_CPU_OPT ("arm7tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23847   ARM_CPU_OPT ("arm7tdmi-s",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23848   ARM_CPU_OPT ("arm8",		ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
23849   ARM_CPU_OPT ("arm810",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
23850   ARM_CPU_OPT ("strongarm",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
23851   ARM_CPU_OPT ("strongarm1",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
23852   ARM_CPU_OPT ("strongarm110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
23853   ARM_CPU_OPT ("strongarm1100",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
23854   ARM_CPU_OPT ("strongarm1110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
23855   ARM_CPU_OPT ("arm9",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23856   ARM_CPU_OPT ("arm920",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    "ARM920T"),
23857   ARM_CPU_OPT ("arm920t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23858   ARM_CPU_OPT ("arm922t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23859   ARM_CPU_OPT ("arm940t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
23860   ARM_CPU_OPT ("arm9tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,	  NULL),
23861   ARM_CPU_OPT ("fa526",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
23862   ARM_CPU_OPT ("fa626",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
23863   /* For V5 or later processors we default to using VFP; but the user
23864      should really set the FPU type explicitly.	 */
23865   ARM_CPU_OPT ("arm9e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23866   ARM_CPU_OPT ("arm9e",		ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
23867   ARM_CPU_OPT ("arm926ej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23868   ARM_CPU_OPT ("arm926ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23869   ARM_CPU_OPT ("arm926ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
23870   ARM_CPU_OPT ("arm946e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23871   ARM_CPU_OPT ("arm946e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM946E-S"),
23872   ARM_CPU_OPT ("arm946e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
23873   ARM_CPU_OPT ("arm966e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23874   ARM_CPU_OPT ("arm966e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM966E-S"),
23875   ARM_CPU_OPT ("arm966e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
23876   ARM_CPU_OPT ("arm968e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
23877   ARM_CPU_OPT ("arm10t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
23878   ARM_CPU_OPT ("arm10tdmi",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
23879   ARM_CPU_OPT ("arm10e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
23880   ARM_CPU_OPT ("arm1020",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM1020E"),
23881   ARM_CPU_OPT ("arm1020t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
23882   ARM_CPU_OPT ("arm1020e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
23883   ARM_CPU_OPT ("arm1022e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
23884   ARM_CPU_OPT ("arm1026ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2,
23885 								 "ARM1026EJ-S"),
23886   ARM_CPU_OPT ("arm1026ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
23887   ARM_CPU_OPT ("fa606te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
23888   ARM_CPU_OPT ("fa616te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
23889   ARM_CPU_OPT ("fa626te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
23890   ARM_CPU_OPT ("fmp626",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
23891   ARM_CPU_OPT ("fa726te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
23892   ARM_CPU_OPT ("arm1136js",	ARM_ARCH_V6,	 FPU_NONE,	  "ARM1136J-S"),
23893   ARM_CPU_OPT ("arm1136j-s",	ARM_ARCH_V6,	 FPU_NONE,	  NULL),
23894   ARM_CPU_OPT ("arm1136jfs",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2,
23895 								 "ARM1136JF-S"),
23896   ARM_CPU_OPT ("arm1136jf-s",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2, NULL),
23897   ARM_CPU_OPT ("mpcore",	ARM_ARCH_V6K,	 FPU_ARCH_VFP_V2, "MPCore"),
23898   ARM_CPU_OPT ("mpcorenovfp",	ARM_ARCH_V6K,	 FPU_NONE,	  "MPCore"),
23899   ARM_CPU_OPT ("arm1156t2-s",	ARM_ARCH_V6T2,	 FPU_NONE,	  NULL),
23900   ARM_CPU_OPT ("arm1156t2f-s",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP_V2, NULL),
23901   ARM_CPU_OPT ("arm1176jz-s",	ARM_ARCH_V6ZK,	 FPU_NONE,	  NULL),
23902   ARM_CPU_OPT ("arm1176jzf-s",	ARM_ARCH_V6ZK,	 FPU_ARCH_VFP_V2, NULL),
23903   ARM_CPU_OPT ("cortex-a5",	ARM_ARCH_V7A_MP_SEC,
23904 						 FPU_NONE,	  "Cortex-A5"),
23905   ARM_CPU_OPT ("cortex-a7",	ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23906 						 FPU_ARCH_NEON_VFP_V4,
23907 								  "Cortex-A7"),
23908   ARM_CPU_OPT ("cortex-a8",	ARM_ARCH_V7A_SEC,
23909 						 ARM_FEATURE (0, FPU_VFP_V3
23910                                                         | FPU_NEON_EXT_V1),
23911 								  "Cortex-A8"),
23912   ARM_CPU_OPT ("cortex-a9",	ARM_ARCH_V7A_MP_SEC,
23913 						 ARM_FEATURE (0, FPU_VFP_V3
23914                                                         | FPU_NEON_EXT_V1),
23915 								  "Cortex-A9"),
23916   ARM_CPU_OPT ("cortex-a15",	ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23917 						 FPU_ARCH_NEON_VFP_V4,
23918 								  "Cortex-A15"),
23919   ARM_CPU_OPT ("cortex-r4",	ARM_ARCH_V7R,	 FPU_NONE,	  "Cortex-R4"),
23920   ARM_CPU_OPT ("cortex-r4f",	ARM_ARCH_V7R,	 FPU_ARCH_VFP_V3D16,
23921 								  "Cortex-R4F"),
23922   ARM_CPU_OPT ("cortex-r5",	ARM_ARCH_V7R_IDIV,
23923 						 FPU_NONE,	  "Cortex-R5"),
23924   ARM_CPU_OPT ("cortex-r7",	ARM_ARCH_V7R_IDIV,
23925 						 FPU_ARCH_VFP_V3D16,
23926 								  "Cortex-R7"),
23927   ARM_CPU_OPT ("cortex-m4",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M4"),
23928   ARM_CPU_OPT ("cortex-m3",	ARM_ARCH_V7M,	 FPU_NONE,	  "Cortex-M3"),
23929   ARM_CPU_OPT ("cortex-m1",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M1"),
23930   ARM_CPU_OPT ("cortex-m0",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0"),
23931   ARM_CPU_OPT ("cortex-m0plus",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0+"),
23932   /* ??? XSCALE is really an architecture.  */
23933   ARM_CPU_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23934   /* ??? iwmmxt is not a processor.  */
23935   ARM_CPU_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
23936   ARM_CPU_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
23937   ARM_CPU_OPT ("i80200",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23938   /* Maverick */
23939   ARM_CPU_OPT ("ep9312",	ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
23940 						 FPU_ARCH_MAVERICK,
23941 								  "ARM920T"),
23942   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
23943 };
23944 #undef ARM_CPU_OPT
23945 
23946 struct arm_arch_option_table
23947 {
23948   char *name;
23949   size_t name_len;
23950   const arm_feature_set	value;
23951   const arm_feature_set	default_fpu;
23952 };
23953 
23954 /* This list should, at a minimum, contain all the architecture names
23955    recognized by GCC.  */
23956 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
23957 static const struct arm_arch_option_table arm_archs[] =
23958 {
23959   ARM_ARCH_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA),
23960   ARM_ARCH_OPT ("armv1",	ARM_ARCH_V1,	 FPU_ARCH_FPA),
23961   ARM_ARCH_OPT ("armv2",	ARM_ARCH_V2,	 FPU_ARCH_FPA),
23962   ARM_ARCH_OPT ("armv2a",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
23963   ARM_ARCH_OPT ("armv2s",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
23964   ARM_ARCH_OPT ("armv3",	ARM_ARCH_V3,	 FPU_ARCH_FPA),
23965   ARM_ARCH_OPT ("armv3m",	ARM_ARCH_V3M,	 FPU_ARCH_FPA),
23966   ARM_ARCH_OPT ("armv4",	ARM_ARCH_V4,	 FPU_ARCH_FPA),
23967   ARM_ARCH_OPT ("armv4xm",	ARM_ARCH_V4xM,	 FPU_ARCH_FPA),
23968   ARM_ARCH_OPT ("armv4t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA),
23969   ARM_ARCH_OPT ("armv4txm",	ARM_ARCH_V4TxM,	 FPU_ARCH_FPA),
23970   ARM_ARCH_OPT ("armv5",	ARM_ARCH_V5,	 FPU_ARCH_VFP),
23971   ARM_ARCH_OPT ("armv5t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP),
23972   ARM_ARCH_OPT ("armv5txm",	ARM_ARCH_V5TxM,	 FPU_ARCH_VFP),
23973   ARM_ARCH_OPT ("armv5te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP),
23974   ARM_ARCH_OPT ("armv5texp",	ARM_ARCH_V5TExP, FPU_ARCH_VFP),
23975   ARM_ARCH_OPT ("armv5tej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP),
23976   ARM_ARCH_OPT ("armv6",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
23977   ARM_ARCH_OPT ("armv6j",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
23978   ARM_ARCH_OPT ("armv6k",	ARM_ARCH_V6K,	 FPU_ARCH_VFP),
23979   ARM_ARCH_OPT ("armv6z",	ARM_ARCH_V6Z,	 FPU_ARCH_VFP),
23980   ARM_ARCH_OPT ("armv6zk",	ARM_ARCH_V6ZK,	 FPU_ARCH_VFP),
23981   ARM_ARCH_OPT ("armv6t2",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP),
23982   ARM_ARCH_OPT ("armv6kt2",	ARM_ARCH_V6KT2,	 FPU_ARCH_VFP),
23983   ARM_ARCH_OPT ("armv6zt2",	ARM_ARCH_V6ZT2,	 FPU_ARCH_VFP),
23984   ARM_ARCH_OPT ("armv6zkt2",	ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
23985   ARM_ARCH_OPT ("armv6-m",	ARM_ARCH_V6M,	 FPU_ARCH_VFP),
23986   ARM_ARCH_OPT ("armv6s-m",	ARM_ARCH_V6SM,	 FPU_ARCH_VFP),
23987   ARM_ARCH_OPT ("armv7",	ARM_ARCH_V7,	 FPU_ARCH_VFP),
23988   /* The official spelling of the ARMv7 profile variants is the dashed form.
23989      Accept the non-dashed form for compatibility with old toolchains.  */
23990   ARM_ARCH_OPT ("armv7a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
23991   ARM_ARCH_OPT ("armv7r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
23992   ARM_ARCH_OPT ("armv7m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
23993   ARM_ARCH_OPT ("armv7-a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
23994   ARM_ARCH_OPT ("armv7-r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
23995   ARM_ARCH_OPT ("armv7-m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
23996   ARM_ARCH_OPT ("armv7e-m",	ARM_ARCH_V7EM,	 FPU_ARCH_VFP),
23997   ARM_ARCH_OPT ("armv8-a",	ARM_ARCH_V8A,	 FPU_ARCH_VFP),
23998   ARM_ARCH_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP),
23999   ARM_ARCH_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
24000   ARM_ARCH_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
24001   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24002 };
24003 #undef ARM_ARCH_OPT
24004 
24005 /* ISA extensions in the co-processor and main instruction set space.  */
24006 struct arm_option_extension_value_table
24007 {
24008   char *name;
24009   size_t name_len;
24010   const arm_feature_set value;
24011   const arm_feature_set allowed_archs;
24012 };
24013 
24014 /* The following table must be in alphabetical order with a NULL last entry.
24015    */
24016 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
24017 static const struct arm_option_extension_value_table arm_extensions[] =
24018 {
24019   ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24020 				   ARM_FEATURE (ARM_EXT_V8, 0)),
24021   ARM_EXT_OPT ("fp",     FPU_ARCH_VFP_ARMV8,
24022 				   ARM_FEATURE (ARM_EXT_V8, 0)),
24023   ARM_EXT_OPT ("idiv",	ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
24024 				   ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24025   ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT),	ARM_ANY),
24026   ARM_EXT_OPT ("iwmmxt2",
24027 			ARM_FEATURE (0, ARM_CEXT_IWMMXT2),	ARM_ANY),
24028   ARM_EXT_OPT ("maverick",
24029                         ARM_FEATURE (0, ARM_CEXT_MAVERICK),	ARM_ANY),
24030   ARM_EXT_OPT ("mp",	ARM_FEATURE (ARM_EXT_MP, 0),
24031 				   ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24032   ARM_EXT_OPT ("simd",   FPU_ARCH_NEON_VFP_ARMV8,
24033 				   ARM_FEATURE (ARM_EXT_V8, 0)),
24034   ARM_EXT_OPT ("os",	ARM_FEATURE (ARM_EXT_OS, 0),
24035 				   ARM_FEATURE (ARM_EXT_V6M, 0)),
24036   ARM_EXT_OPT ("sec",	ARM_FEATURE (ARM_EXT_SEC, 0),
24037 				   ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
24038   ARM_EXT_OPT ("virt",	ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
24039 				     | ARM_EXT_DIV, 0),
24040 				   ARM_FEATURE (ARM_EXT_V7A, 0)),
24041   ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE),	ARM_ANY),
24042   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24043 };
24044 #undef ARM_EXT_OPT
24045 
24046 /* ISA floating-point and Advanced SIMD extensions.  */
24047 struct arm_option_fpu_value_table
24048 {
24049   char *name;
24050   const arm_feature_set value;
24051 };
24052 
24053 /* This list should, at a minimum, contain all the fpu names
24054    recognized by GCC.  */
24055 static const struct arm_option_fpu_value_table arm_fpus[] =
24056 {
24057   {"softfpa",		FPU_NONE},
24058   {"fpe",		FPU_ARCH_FPE},
24059   {"fpe2",		FPU_ARCH_FPE},
24060   {"fpe3",		FPU_ARCH_FPA},	/* Third release supports LFM/SFM.  */
24061   {"fpa",		FPU_ARCH_FPA},
24062   {"fpa10",		FPU_ARCH_FPA},
24063   {"fpa11",		FPU_ARCH_FPA},
24064   {"arm7500fe",		FPU_ARCH_FPA},
24065   {"softvfp",		FPU_ARCH_VFP},
24066   {"softvfp+vfp",	FPU_ARCH_VFP_V2},
24067   {"vfp",		FPU_ARCH_VFP_V2},
24068   {"vfp9",		FPU_ARCH_VFP_V2},
24069   {"vfp3",              FPU_ARCH_VFP_V3}, /* For backwards compatbility.  */
24070   {"vfp10",		FPU_ARCH_VFP_V2},
24071   {"vfp10-r0",		FPU_ARCH_VFP_V1},
24072   {"vfpxd",		FPU_ARCH_VFP_V1xD},
24073   {"vfpv2",		FPU_ARCH_VFP_V2},
24074   {"vfpv3",		FPU_ARCH_VFP_V3},
24075   {"vfpv3-fp16",	FPU_ARCH_VFP_V3_FP16},
24076   {"vfpv3-d16",		FPU_ARCH_VFP_V3D16},
24077   {"vfpv3-d16-fp16",	FPU_ARCH_VFP_V3D16_FP16},
24078   {"vfpv3xd",		FPU_ARCH_VFP_V3xD},
24079   {"vfpv3xd-fp16",	FPU_ARCH_VFP_V3xD_FP16},
24080   {"arm1020t",		FPU_ARCH_VFP_V1},
24081   {"arm1020e",		FPU_ARCH_VFP_V2},
24082   {"arm1136jfs",	FPU_ARCH_VFP_V2},
24083   {"arm1136jf-s",	FPU_ARCH_VFP_V2},
24084   {"maverick",		FPU_ARCH_MAVERICK},
24085   {"neon",              FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24086   {"neon-fp16",		FPU_ARCH_NEON_FP16},
24087   {"vfpv4",		FPU_ARCH_VFP_V4},
24088   {"vfpv4-d16",		FPU_ARCH_VFP_V4D16},
24089   {"fpv4-sp-d16",	FPU_ARCH_VFP_V4_SP_D16},
24090   {"neon-vfpv4",	FPU_ARCH_NEON_VFP_V4},
24091   {"fp-armv8",		FPU_ARCH_VFP_ARMV8},
24092   {"neon-fp-armv8",	FPU_ARCH_NEON_VFP_ARMV8},
24093   {"crypto-neon-fp-armv8",
24094 			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24095   {NULL,		ARM_ARCH_NONE}
24096 };
24097 
24098 struct arm_option_value_table
24099 {
24100   char *name;
24101   long value;
24102 };
24103 
24104 static const struct arm_option_value_table arm_float_abis[] =
24105 {
24106   {"hard",	ARM_FLOAT_ABI_HARD},
24107   {"softfp",	ARM_FLOAT_ABI_SOFTFP},
24108   {"soft",	ARM_FLOAT_ABI_SOFT},
24109   {NULL,	0}
24110 };
24111 
24112 #ifdef OBJ_ELF
24113 /* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
24114 static const struct arm_option_value_table arm_eabis[] =
24115 {
24116   {"gnu",	EF_ARM_EABI_UNKNOWN},
24117   {"4",		EF_ARM_EABI_VER4},
24118   {"5",		EF_ARM_EABI_VER5},
24119   {NULL,	0}
24120 };
24121 #endif
24122 
24123 struct arm_long_option_table
24124 {
24125   char * option;		/* Substring to match.	*/
24126   char * help;			/* Help information.  */
24127   int (* func) (char * subopt);	/* Function to decode sub-option.  */
24128   char * deprecated;		/* If non-null, print this message.  */
24129 };
24130 
24131 static bfd_boolean
24132 arm_parse_extension (char *str, const arm_feature_set **opt_p)
24133 {
24134   arm_feature_set *ext_set = (arm_feature_set *)
24135       xmalloc (sizeof (arm_feature_set));
24136 
24137   /* We insist on extensions being specified in alphabetical order, and with
24138      extensions being added before being removed.  We achieve this by having
24139      the global ARM_EXTENSIONS table in alphabetical order, and using the
24140      ADDING_VALUE variable to indicate whether we are adding an extension (1)
24141      or removing it (0) and only allowing it to change in the order
24142      -1 -> 1 -> 0.  */
24143   const struct arm_option_extension_value_table * opt = NULL;
24144   int adding_value = -1;
24145 
24146   /* Copy the feature set, so that we can modify it.  */
24147   *ext_set = **opt_p;
24148   *opt_p = ext_set;
24149 
24150   while (str != NULL && *str != 0)
24151     {
24152       char *ext;
24153       size_t len;
24154 
24155       if (*str != '+')
24156 	{
24157 	  as_bad (_("invalid architectural extension"));
24158 	  return FALSE;
24159 	}
24160 
24161       str++;
24162       ext = strchr (str, '+');
24163 
24164       if (ext != NULL)
24165 	len = ext - str;
24166       else
24167 	len = strlen (str);
24168 
24169       if (len >= 2 && strncmp (str, "no", 2) == 0)
24170 	{
24171 	  if (adding_value != 0)
24172 	    {
24173 	      adding_value = 0;
24174 	      opt = arm_extensions;
24175 	    }
24176 
24177 	  len -= 2;
24178 	  str += 2;
24179 	}
24180       else if (len > 0)
24181 	{
24182 	  if (adding_value == -1)
24183 	    {
24184 	      adding_value = 1;
24185 	      opt = arm_extensions;
24186 	    }
24187 	  else if (adding_value != 1)
24188 	    {
24189 	      as_bad (_("must specify extensions to add before specifying "
24190 			"those to remove"));
24191 	      return FALSE;
24192 	    }
24193 	}
24194 
24195       if (len == 0)
24196 	{
24197 	  as_bad (_("missing architectural extension"));
24198 	  return FALSE;
24199 	}
24200 
24201       gas_assert (adding_value != -1);
24202       gas_assert (opt != NULL);
24203 
24204       /* Scan over the options table trying to find an exact match. */
24205       for (; opt->name != NULL; opt++)
24206 	if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24207 	  {
24208 	    /* Check we can apply the extension to this architecture.  */
24209 	    if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
24210 	      {
24211 		as_bad (_("extension does not apply to the base architecture"));
24212 		return FALSE;
24213 	      }
24214 
24215 	    /* Add or remove the extension.  */
24216 	    if (adding_value)
24217 	      ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
24218 	    else
24219 	      ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
24220 
24221 	    break;
24222 	  }
24223 
24224       if (opt->name == NULL)
24225 	{
24226 	  /* Did we fail to find an extension because it wasn't specified in
24227 	     alphabetical order, or because it does not exist?  */
24228 
24229 	  for (opt = arm_extensions; opt->name != NULL; opt++)
24230 	    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24231 	      break;
24232 
24233 	  if (opt->name == NULL)
24234 	    as_bad (_("unknown architectural extension `%s'"), str);
24235 	  else
24236 	    as_bad (_("architectural extensions must be specified in "
24237 		      "alphabetical order"));
24238 
24239 	  return FALSE;
24240 	}
24241       else
24242 	{
24243 	  /* We should skip the extension we've just matched the next time
24244 	     round.  */
24245 	  opt++;
24246 	}
24247 
24248       str = ext;
24249     };
24250 
24251   return TRUE;
24252 }
24253 
24254 static bfd_boolean
24255 arm_parse_cpu (char *str)
24256 {
24257   const struct arm_cpu_option_table *opt;
24258   char *ext = strchr (str, '+');
24259   size_t len;
24260 
24261   if (ext != NULL)
24262     len = ext - str;
24263   else
24264     len = strlen (str);
24265 
24266   if (len == 0)
24267     {
24268       as_bad (_("missing cpu name `%s'"), str);
24269       return FALSE;
24270     }
24271 
24272   for (opt = arm_cpus; opt->name != NULL; opt++)
24273     if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24274       {
24275 	mcpu_cpu_opt = &opt->value;
24276 	mcpu_fpu_opt = &opt->default_fpu;
24277 	if (opt->canonical_name)
24278 	  strcpy (selected_cpu_name, opt->canonical_name);
24279 	else
24280 	  {
24281 	    size_t i;
24282 
24283 	    for (i = 0; i < len; i++)
24284 	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
24285 	    selected_cpu_name[i] = 0;
24286 	  }
24287 
24288 	if (ext != NULL)
24289 	  return arm_parse_extension (ext, &mcpu_cpu_opt);
24290 
24291 	return TRUE;
24292       }
24293 
24294   as_bad (_("unknown cpu `%s'"), str);
24295   return FALSE;
24296 }
24297 
24298 static bfd_boolean
24299 arm_parse_arch (char *str)
24300 {
24301   const struct arm_arch_option_table *opt;
24302   char *ext = strchr (str, '+');
24303   size_t len;
24304 
24305   if (ext != NULL)
24306     len = ext - str;
24307   else
24308     len = strlen (str);
24309 
24310   if (len == 0)
24311     {
24312       as_bad (_("missing architecture name `%s'"), str);
24313       return FALSE;
24314     }
24315 
24316   for (opt = arm_archs; opt->name != NULL; opt++)
24317     if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24318       {
24319 	march_cpu_opt = &opt->value;
24320 	march_fpu_opt = &opt->default_fpu;
24321 	strcpy (selected_cpu_name, opt->name);
24322 
24323 	if (ext != NULL)
24324 	  return arm_parse_extension (ext, &march_cpu_opt);
24325 
24326 	return TRUE;
24327       }
24328 
24329   as_bad (_("unknown architecture `%s'\n"), str);
24330   return FALSE;
24331 }
24332 
24333 static bfd_boolean
24334 arm_parse_fpu (char * str)
24335 {
24336   const struct arm_option_fpu_value_table * opt;
24337 
24338   for (opt = arm_fpus; opt->name != NULL; opt++)
24339     if (streq (opt->name, str))
24340       {
24341 	mfpu_opt = &opt->value;
24342 	return TRUE;
24343       }
24344 
24345   as_bad (_("unknown floating point format `%s'\n"), str);
24346   return FALSE;
24347 }
24348 
24349 static bfd_boolean
24350 arm_parse_float_abi (char * str)
24351 {
24352   const struct arm_option_value_table * opt;
24353 
24354   for (opt = arm_float_abis; opt->name != NULL; opt++)
24355     if (streq (opt->name, str))
24356       {
24357 	mfloat_abi_opt = opt->value;
24358 	return TRUE;
24359       }
24360 
24361   as_bad (_("unknown floating point abi `%s'\n"), str);
24362   return FALSE;
24363 }
24364 
24365 #ifdef OBJ_ELF
24366 static bfd_boolean
24367 arm_parse_eabi (char * str)
24368 {
24369   const struct arm_option_value_table *opt;
24370 
24371   for (opt = arm_eabis; opt->name != NULL; opt++)
24372     if (streq (opt->name, str))
24373       {
24374 	meabi_flags = opt->value;
24375 	return TRUE;
24376       }
24377   as_bad (_("unknown EABI `%s'\n"), str);
24378   return FALSE;
24379 }
24380 #endif
24381 
24382 static bfd_boolean
24383 arm_parse_it_mode (char * str)
24384 {
24385   bfd_boolean ret = TRUE;
24386 
24387   if (streq ("arm", str))
24388     implicit_it_mode = IMPLICIT_IT_MODE_ARM;
24389   else if (streq ("thumb", str))
24390     implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
24391   else if (streq ("always", str))
24392     implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
24393   else if (streq ("never", str))
24394     implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
24395   else
24396     {
24397       as_bad (_("unknown implicit IT mode `%s', should be "\
24398                 "arm, thumb, always, or never."), str);
24399       ret = FALSE;
24400     }
24401 
24402   return ret;
24403 }
24404 
24405 struct arm_long_option_table arm_long_opts[] =
24406 {
24407   {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
24408    arm_parse_cpu, NULL},
24409   {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
24410    arm_parse_arch, NULL},
24411   {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
24412    arm_parse_fpu, NULL},
24413   {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
24414    arm_parse_float_abi, NULL},
24415 #ifdef OBJ_ELF
24416   {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
24417    arm_parse_eabi, NULL},
24418 #endif
24419   {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
24420    arm_parse_it_mode, NULL},
24421   {NULL, NULL, 0, NULL}
24422 };
24423 
24424 int
24425 md_parse_option (int c, char * arg)
24426 {
24427   struct arm_option_table *opt;
24428   const struct arm_legacy_option_table *fopt;
24429   struct arm_long_option_table *lopt;
24430 
24431   switch (c)
24432     {
24433 #ifdef OPTION_EB
24434     case OPTION_EB:
24435       target_big_endian = 1;
24436       break;
24437 #endif
24438 
24439 #ifdef OPTION_EL
24440     case OPTION_EL:
24441       target_big_endian = 0;
24442       break;
24443 #endif
24444 
24445     case OPTION_FIX_V4BX:
24446       fix_v4bx = TRUE;
24447       break;
24448 
24449     case 'a':
24450       /* Listing option.  Just ignore these, we don't support additional
24451 	 ones.	*/
24452       return 0;
24453 
24454     default:
24455       for (opt = arm_opts; opt->option != NULL; opt++)
24456 	{
24457 	  if (c == opt->option[0]
24458 	      && ((arg == NULL && opt->option[1] == 0)
24459 		  || streq (arg, opt->option + 1)))
24460 	    {
24461 	      /* If the option is deprecated, tell the user.  */
24462 	      if (warn_on_deprecated && opt->deprecated != NULL)
24463 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24464 			   arg ? arg : "", _(opt->deprecated));
24465 
24466 	      if (opt->var != NULL)
24467 		*opt->var = opt->value;
24468 
24469 	      return 1;
24470 	    }
24471 	}
24472 
24473       for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
24474 	{
24475 	  if (c == fopt->option[0]
24476 	      && ((arg == NULL && fopt->option[1] == 0)
24477 		  || streq (arg, fopt->option + 1)))
24478 	    {
24479 	      /* If the option is deprecated, tell the user.  */
24480 	      if (warn_on_deprecated && fopt->deprecated != NULL)
24481 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24482 			   arg ? arg : "", _(fopt->deprecated));
24483 
24484 	      if (fopt->var != NULL)
24485 		*fopt->var = &fopt->value;
24486 
24487 	      return 1;
24488 	    }
24489 	}
24490 
24491       for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24492 	{
24493 	  /* These options are expected to have an argument.  */
24494 	  if (c == lopt->option[0]
24495 	      && arg != NULL
24496 	      && strncmp (arg, lopt->option + 1,
24497 			  strlen (lopt->option + 1)) == 0)
24498 	    {
24499 	      /* If the option is deprecated, tell the user.  */
24500 	      if (warn_on_deprecated && lopt->deprecated != NULL)
24501 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
24502 			   _(lopt->deprecated));
24503 
24504 	      /* Call the sup-option parser.  */
24505 	      return lopt->func (arg + strlen (lopt->option) - 1);
24506 	    }
24507 	}
24508 
24509       return 0;
24510     }
24511 
24512   return 1;
24513 }
24514 
24515 void
24516 md_show_usage (FILE * fp)
24517 {
24518   struct arm_option_table *opt;
24519   struct arm_long_option_table *lopt;
24520 
24521   fprintf (fp, _(" ARM-specific assembler options:\n"));
24522 
24523   for (opt = arm_opts; opt->option != NULL; opt++)
24524     if (opt->help != NULL)
24525       fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
24526 
24527   for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24528     if (lopt->help != NULL)
24529       fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
24530 
24531 #ifdef OPTION_EB
24532   fprintf (fp, _("\
24533   -EB                     assemble code for a big-endian cpu\n"));
24534 #endif
24535 
24536 #ifdef OPTION_EL
24537   fprintf (fp, _("\
24538   -EL                     assemble code for a little-endian cpu\n"));
24539 #endif
24540 
24541   fprintf (fp, _("\
24542   --fix-v4bx              Allow BX in ARMv4 code\n"));
24543 }
24544 
24545 
24546 #ifdef OBJ_ELF
24547 typedef struct
24548 {
24549   int val;
24550   arm_feature_set flags;
24551 } cpu_arch_ver_table;
24552 
24553 /* Mapping from CPU features to EABI CPU arch values.  Table must be sorted
24554    least features first.  */
24555 static const cpu_arch_ver_table cpu_arch_ver[] =
24556 {
24557     {1, ARM_ARCH_V4},
24558     {2, ARM_ARCH_V4T},
24559     {3, ARM_ARCH_V5},
24560     {3, ARM_ARCH_V5T},
24561     {4, ARM_ARCH_V5TE},
24562     {5, ARM_ARCH_V5TEJ},
24563     {6, ARM_ARCH_V6},
24564     {9, ARM_ARCH_V6K},
24565     {7, ARM_ARCH_V6Z},
24566     {11, ARM_ARCH_V6M},
24567     {12, ARM_ARCH_V6SM},
24568     {8, ARM_ARCH_V6T2},
24569     {10, ARM_ARCH_V7A_IDIV_MP_SEC_VIRT},
24570     {10, ARM_ARCH_V7R},
24571     {10, ARM_ARCH_V7M},
24572     {14, ARM_ARCH_V8A},
24573     {0, ARM_ARCH_NONE}
24574 };
24575 
24576 /* Set an attribute if it has not already been set by the user.  */
24577 static void
24578 aeabi_set_attribute_int (int tag, int value)
24579 {
24580   if (tag < 1
24581       || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
24582       || !attributes_set_explicitly[tag])
24583     bfd_elf_add_proc_attr_int (stdoutput, tag, value);
24584 }
24585 
24586 static void
24587 aeabi_set_attribute_string (int tag, const char *value)
24588 {
24589   if (tag < 1
24590       || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
24591       || !attributes_set_explicitly[tag])
24592     bfd_elf_add_proc_attr_string (stdoutput, tag, value);
24593 }
24594 
24595 /* Set the public EABI object attributes.  */
24596 static void
24597 aeabi_set_public_attributes (void)
24598 {
24599   int arch;
24600   char profile;
24601   int virt_sec = 0;
24602   int fp16_optional = 0;
24603   arm_feature_set flags;
24604   arm_feature_set tmp;
24605   const cpu_arch_ver_table *p;
24606 
24607   /* Choose the architecture based on the capabilities of the requested cpu
24608      (if any) and/or the instructions actually used.  */
24609   ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
24610   ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
24611   ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
24612 
24613   if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
24614     ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
24615 
24616   if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
24617     ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
24618 
24619   /* Allow the user to override the reported architecture.  */
24620   if (object_arch)
24621     {
24622       ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
24623       ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
24624     }
24625 
24626   /* We need to make sure that the attributes do not identify us as v6S-M
24627      when the only v6S-M feature in use is the Operating System Extensions.  */
24628   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
24629       if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
24630         ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
24631 
24632   tmp = flags;
24633   arch = 0;
24634   for (p = cpu_arch_ver; p->val; p++)
24635     {
24636       if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
24637 	{
24638 	  arch = p->val;
24639 	  ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
24640 	}
24641     }
24642 
24643   /* The table lookup above finds the last architecture to contribute
24644      a new feature.  Unfortunately, Tag13 is a subset of the union of
24645      v6T2 and v7-M, so it is never seen as contributing a new feature.
24646      We can not search for the last entry which is entirely used,
24647      because if no CPU is specified we build up only those flags
24648      actually used.  Perhaps we should separate out the specified
24649      and implicit cases.  Avoid taking this path for -march=all by
24650      checking for contradictory v7-A / v7-M features.  */
24651   if (arch == 10
24652       && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
24653       && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
24654       && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
24655     arch = 13;
24656 
24657   /* Tag_CPU_name.  */
24658   if (selected_cpu_name[0])
24659     {
24660       char *q;
24661 
24662       q = selected_cpu_name;
24663       if (strncmp (q, "armv", 4) == 0)
24664 	{
24665 	  int i;
24666 
24667 	  q += 4;
24668 	  for (i = 0; q[i]; i++)
24669 	    q[i] = TOUPPER (q[i]);
24670 	}
24671       aeabi_set_attribute_string (Tag_CPU_name, q);
24672     }
24673 
24674   /* Tag_CPU_arch.  */
24675   aeabi_set_attribute_int (Tag_CPU_arch, arch);
24676 
24677   /* Tag_CPU_arch_profile.  */
24678   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
24679     profile = 'A';
24680   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
24681     profile = 'R';
24682   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
24683     profile = 'M';
24684   else
24685     profile = '\0';
24686 
24687   if (profile != '\0')
24688     aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
24689 
24690   /* Tag_ARM_ISA_use.  */
24691   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
24692       || arch == 0)
24693     aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
24694 
24695   /* Tag_THUMB_ISA_use.  */
24696   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
24697       || arch == 0)
24698     aeabi_set_attribute_int (Tag_THUMB_ISA_use,
24699 	ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
24700 
24701   /* Tag_VFP_arch.  */
24702   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
24703     aeabi_set_attribute_int (Tag_VFP_arch, 7);
24704   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
24705     aeabi_set_attribute_int (Tag_VFP_arch,
24706 			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
24707 			     ? 5 : 6);
24708   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
24709     {
24710       fp16_optional = 1;
24711       aeabi_set_attribute_int (Tag_VFP_arch, 3);
24712     }
24713   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
24714     {
24715       aeabi_set_attribute_int (Tag_VFP_arch, 4);
24716       fp16_optional = 1;
24717     }
24718   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
24719     aeabi_set_attribute_int (Tag_VFP_arch, 2);
24720   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
24721            || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
24722     aeabi_set_attribute_int (Tag_VFP_arch, 1);
24723 
24724   /* Tag_ABI_HardFP_use.  */
24725   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
24726       && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
24727     aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
24728 
24729   /* Tag_WMMX_arch.  */
24730   if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
24731     aeabi_set_attribute_int (Tag_WMMX_arch, 2);
24732   else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
24733     aeabi_set_attribute_int (Tag_WMMX_arch, 1);
24734 
24735   /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
24736   if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
24737     aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
24738   else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
24739     {
24740       if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
24741 	{
24742 	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
24743 	}
24744       else
24745 	{
24746 	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
24747 	  fp16_optional = 1;
24748 	}
24749     }
24750 
24751   /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
24752   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
24753     aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
24754 
24755   /* Tag_DIV_use.
24756 
24757      We set Tag_DIV_use to two when integer divide instructions have been used
24758      in ARM state, or when Thumb integer divide instructions have been used,
24759      but we have no architecture profile set, nor have we any ARM instructions.
24760 
24761      For ARMv8 we set the tag to 0 as integer divide is implied by the base
24762      architecture.
24763 
24764      For new architectures we will have to check these tests.  */
24765   gas_assert (arch <= TAG_CPU_ARCH_V8);
24766   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
24767     aeabi_set_attribute_int (Tag_DIV_use, 0);
24768   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
24769 	   || (profile == '\0'
24770 	       && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
24771 	       && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
24772     aeabi_set_attribute_int (Tag_DIV_use, 2);
24773 
24774   /* Tag_MP_extension_use.  */
24775   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
24776     aeabi_set_attribute_int (Tag_MPextension_use, 1);
24777 
24778   /* Tag Virtualization_use.  */
24779   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
24780     virt_sec |= 1;
24781   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
24782     virt_sec |= 2;
24783   if (virt_sec != 0)
24784     aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
24785 }
24786 
24787 /* Add the default contents for the .ARM.attributes section.  */
24788 void
24789 arm_md_end (void)
24790 {
24791   if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24792     return;
24793 
24794   aeabi_set_public_attributes ();
24795 }
24796 #endif /* OBJ_ELF */
24797 
24798 
24799 /* Parse a .cpu directive.  */
24800 
24801 static void
24802 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
24803 {
24804   const struct arm_cpu_option_table *opt;
24805   char *name;
24806   char saved_char;
24807 
24808   name = input_line_pointer;
24809   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24810     input_line_pointer++;
24811   saved_char = *input_line_pointer;
24812   *input_line_pointer = 0;
24813 
24814   /* Skip the first "all" entry.  */
24815   for (opt = arm_cpus + 1; opt->name != NULL; opt++)
24816     if (streq (opt->name, name))
24817       {
24818 	mcpu_cpu_opt = &opt->value;
24819 	selected_cpu = opt->value;
24820 	if (opt->canonical_name)
24821 	  strcpy (selected_cpu_name, opt->canonical_name);
24822 	else
24823 	  {
24824 	    int i;
24825 	    for (i = 0; opt->name[i]; i++)
24826 	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
24827 
24828 	    selected_cpu_name[i] = 0;
24829 	  }
24830 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24831 	*input_line_pointer = saved_char;
24832 	demand_empty_rest_of_line ();
24833 	return;
24834       }
24835   as_bad (_("unknown cpu `%s'"), name);
24836   *input_line_pointer = saved_char;
24837   ignore_rest_of_line ();
24838 }
24839 
24840 
24841 /* Parse a .arch directive.  */
24842 
24843 static void
24844 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
24845 {
24846   const struct arm_arch_option_table *opt;
24847   char saved_char;
24848   char *name;
24849 
24850   name = input_line_pointer;
24851   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24852     input_line_pointer++;
24853   saved_char = *input_line_pointer;
24854   *input_line_pointer = 0;
24855 
24856   /* Skip the first "all" entry.  */
24857   for (opt = arm_archs + 1; opt->name != NULL; opt++)
24858     if (streq (opt->name, name))
24859       {
24860 	mcpu_cpu_opt = &opt->value;
24861 	selected_cpu = opt->value;
24862 	strcpy (selected_cpu_name, opt->name);
24863 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24864 	*input_line_pointer = saved_char;
24865 	demand_empty_rest_of_line ();
24866 	return;
24867       }
24868 
24869   as_bad (_("unknown architecture `%s'\n"), name);
24870   *input_line_pointer = saved_char;
24871   ignore_rest_of_line ();
24872 }
24873 
24874 
24875 /* Parse a .object_arch directive.  */
24876 
24877 static void
24878 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
24879 {
24880   const struct arm_arch_option_table *opt;
24881   char saved_char;
24882   char *name;
24883 
24884   name = input_line_pointer;
24885   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24886     input_line_pointer++;
24887   saved_char = *input_line_pointer;
24888   *input_line_pointer = 0;
24889 
24890   /* Skip the first "all" entry.  */
24891   for (opt = arm_archs + 1; opt->name != NULL; opt++)
24892     if (streq (opt->name, name))
24893       {
24894 	object_arch = &opt->value;
24895 	*input_line_pointer = saved_char;
24896 	demand_empty_rest_of_line ();
24897 	return;
24898       }
24899 
24900   as_bad (_("unknown architecture `%s'\n"), name);
24901   *input_line_pointer = saved_char;
24902   ignore_rest_of_line ();
24903 }
24904 
24905 /* Parse a .arch_extension directive.  */
24906 
24907 static void
24908 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
24909 {
24910   const struct arm_option_extension_value_table *opt;
24911   char saved_char;
24912   char *name;
24913   int adding_value = 1;
24914 
24915   name = input_line_pointer;
24916   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24917     input_line_pointer++;
24918   saved_char = *input_line_pointer;
24919   *input_line_pointer = 0;
24920 
24921   if (strlen (name) >= 2
24922       && strncmp (name, "no", 2) == 0)
24923     {
24924       adding_value = 0;
24925       name += 2;
24926     }
24927 
24928   for (opt = arm_extensions; opt->name != NULL; opt++)
24929     if (streq (opt->name, name))
24930       {
24931 	if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
24932 	  {
24933 	    as_bad (_("architectural extension `%s' is not allowed for the "
24934 		      "current base architecture"), name);
24935 	    break;
24936 	  }
24937 
24938 	if (adding_value)
24939 	  ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
24940 	else
24941 	  ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
24942 
24943 	mcpu_cpu_opt = &selected_cpu;
24944 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24945 	*input_line_pointer = saved_char;
24946 	demand_empty_rest_of_line ();
24947 	return;
24948       }
24949 
24950   if (opt->name == NULL)
24951     as_bad (_("unknown architecture `%s'\n"), name);
24952 
24953   *input_line_pointer = saved_char;
24954   ignore_rest_of_line ();
24955 }
24956 
24957 /* Parse a .fpu directive.  */
24958 
24959 static void
24960 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
24961 {
24962   const struct arm_option_fpu_value_table *opt;
24963   char saved_char;
24964   char *name;
24965 
24966   name = input_line_pointer;
24967   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24968     input_line_pointer++;
24969   saved_char = *input_line_pointer;
24970   *input_line_pointer = 0;
24971 
24972   for (opt = arm_fpus; opt->name != NULL; opt++)
24973     if (streq (opt->name, name))
24974       {
24975 	mfpu_opt = &opt->value;
24976 	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24977 	*input_line_pointer = saved_char;
24978 	demand_empty_rest_of_line ();
24979 	return;
24980       }
24981 
24982   as_bad (_("unknown floating point format `%s'\n"), name);
24983   *input_line_pointer = saved_char;
24984   ignore_rest_of_line ();
24985 }
24986 
24987 /* Copy symbol information.  */
24988 
24989 void
24990 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
24991 {
24992   ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
24993 }
24994 
24995 #ifdef OBJ_ELF
24996 /* Given a symbolic attribute NAME, return the proper integer value.
24997    Returns -1 if the attribute is not known.  */
24998 
24999 int
25000 arm_convert_symbolic_attribute (const char *name)
25001 {
25002   static const struct
25003   {
25004     const char * name;
25005     const int    tag;
25006   }
25007   attribute_table[] =
25008     {
25009       /* When you modify this table you should
25010 	 also modify the list in doc/c-arm.texi.  */
25011 #define T(tag) {#tag, tag}
25012       T (Tag_CPU_raw_name),
25013       T (Tag_CPU_name),
25014       T (Tag_CPU_arch),
25015       T (Tag_CPU_arch_profile),
25016       T (Tag_ARM_ISA_use),
25017       T (Tag_THUMB_ISA_use),
25018       T (Tag_FP_arch),
25019       T (Tag_VFP_arch),
25020       T (Tag_WMMX_arch),
25021       T (Tag_Advanced_SIMD_arch),
25022       T (Tag_PCS_config),
25023       T (Tag_ABI_PCS_R9_use),
25024       T (Tag_ABI_PCS_RW_data),
25025       T (Tag_ABI_PCS_RO_data),
25026       T (Tag_ABI_PCS_GOT_use),
25027       T (Tag_ABI_PCS_wchar_t),
25028       T (Tag_ABI_FP_rounding),
25029       T (Tag_ABI_FP_denormal),
25030       T (Tag_ABI_FP_exceptions),
25031       T (Tag_ABI_FP_user_exceptions),
25032       T (Tag_ABI_FP_number_model),
25033       T (Tag_ABI_align_needed),
25034       T (Tag_ABI_align8_needed),
25035       T (Tag_ABI_align_preserved),
25036       T (Tag_ABI_align8_preserved),
25037       T (Tag_ABI_enum_size),
25038       T (Tag_ABI_HardFP_use),
25039       T (Tag_ABI_VFP_args),
25040       T (Tag_ABI_WMMX_args),
25041       T (Tag_ABI_optimization_goals),
25042       T (Tag_ABI_FP_optimization_goals),
25043       T (Tag_compatibility),
25044       T (Tag_CPU_unaligned_access),
25045       T (Tag_FP_HP_extension),
25046       T (Tag_VFP_HP_extension),
25047       T (Tag_ABI_FP_16bit_format),
25048       T (Tag_MPextension_use),
25049       T (Tag_DIV_use),
25050       T (Tag_nodefaults),
25051       T (Tag_also_compatible_with),
25052       T (Tag_conformance),
25053       T (Tag_T2EE_use),
25054       T (Tag_Virtualization_use),
25055       /* We deliberately do not include Tag_MPextension_use_legacy.  */
25056 #undef T
25057     };
25058   unsigned int i;
25059 
25060   if (name == NULL)
25061     return -1;
25062 
25063   for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25064     if (streq (name, attribute_table[i].name))
25065       return attribute_table[i].tag;
25066 
25067   return -1;
25068 }
25069 
25070 
25071 /* Apply sym value for relocations only in the case that
25072    they are for local symbols and you have the respective
25073    architectural feature for blx and simple switches.  */
25074 int
25075 arm_apply_sym_value (struct fix * fixP)
25076 {
25077   if (fixP->fx_addsy
25078       && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25079       && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25080     {
25081       switch (fixP->fx_r_type)
25082 	{
25083 	case BFD_RELOC_ARM_PCREL_BLX:
25084 	case BFD_RELOC_THUMB_PCREL_BRANCH23:
25085 	  if (ARM_IS_FUNC (fixP->fx_addsy))
25086 	    return 1;
25087 	  break;
25088 
25089 	case BFD_RELOC_ARM_PCREL_CALL:
25090 	case BFD_RELOC_THUMB_PCREL_BLX:
25091 	  if (THUMB_IS_FUNC (fixP->fx_addsy))
25092 	      return 1;
25093 	  break;
25094 
25095 	default:
25096 	  break;
25097 	}
25098 
25099     }
25100   return 0;
25101 }
25102 #endif /* OBJ_ELF */
25103