1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2024 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 #define streq(a, b) (strcmp (a, b) == 0)
42
43 #define END_OF_INSN '\0'
44
45 static aarch64_feature_set cpu_variant;
46
47 /* Variables that we set while parsing command-line options. Once all
48 options have been read we re-process these values to set the real
49 assembly flags. */
50 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
51 static const aarch64_feature_set *march_cpu_opt = NULL;
52
53 /* Constants for known architecture features. */
54 static const aarch64_feature_set cpu_default = AARCH64_ARCH_FEATURES (V8A);
55
56 /* Currently active instruction sequence. */
57 static aarch64_instr_sequence *insn_sequence = NULL;
58
59 #ifdef OBJ_ELF
60 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
61 static symbolS *GOT_symbol;
62 #endif
63
64 /* Which ABI to use. */
65 enum aarch64_abi_type
66 {
67 AARCH64_ABI_NONE = 0,
68 AARCH64_ABI_LP64 = 1,
69 AARCH64_ABI_ILP32 = 2,
70 AARCH64_ABI_LLP64 = 3
71 };
72
73 unsigned int aarch64_sframe_cfa_sp_reg;
74 /* The other CFA base register for SFrame stack trace info. */
75 unsigned int aarch64_sframe_cfa_fp_reg;
76 unsigned int aarch64_sframe_cfa_ra_reg;
77
78 #ifndef DEFAULT_ARCH
79 #define DEFAULT_ARCH "aarch64"
80 #endif
81
82 #ifdef OBJ_ELF
83 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
84 static const char *default_arch = DEFAULT_ARCH;
85 #endif
86
87 /* AArch64 ABI for the output file. */
88 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
89
90 /* When non-zero, program to a 32-bit model, in which the C data types
91 int, long and all pointer types are 32-bit objects (ILP32); or to a
92 64-bit model, in which the C int type is 32-bits but the C long type
93 and all pointer types are 64-bit objects (LP64). */
94 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
95
96 /* When non zero, C types int and long are 32 bit,
97 pointers, however are 64 bit */
98 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
99
100 enum vector_el_type
101 {
102 NT_invtype = -1,
103 NT_b,
104 NT_h,
105 NT_s,
106 NT_d,
107 NT_q,
108 NT_zero,
109 NT_merge
110 };
111
112 /* Bits for DEFINED field in vector_type_el. */
113 #define NTA_HASTYPE 1
114 #define NTA_HASINDEX 2
115 #define NTA_HASVARWIDTH 4
116
117 struct vector_type_el
118 {
119 enum vector_el_type type;
120 unsigned char defined;
121 unsigned element_size;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 aarch64_operand_error parsing_error;
144 /* The condition that appears in the assembly line. */
145 int cond;
146 /* Relocation information (including the GAS internal fixup). */
147 struct reloc reloc;
148 /* Need to generate an immediate in the literal pool. */
149 unsigned gen_lit_pool : 1;
150 };
151
152 typedef struct aarch64_instruction aarch64_instruction;
153
154 static aarch64_instruction inst;
155
156 static bool parse_operands (char *, const aarch64_opcode *);
157 static bool programmer_friendly_fixup (aarch64_instruction *);
158
159 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
160 data fields contain the following information:
161
162 data[0].i:
163 A mask of register types that would have been acceptable as bare
164 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
165 is set if a general parsing error occured for an operand (that is,
166 an error not related to registers, and having no error string).
167
168 data[1].i:
169 A mask of register types that would have been acceptable inside
170 a register list. In addition, SEF_IN_REGLIST is set if the
171 operand contained a '{' and if we got to the point of trying
172 to parse a register inside a list.
173
174 data[2].i:
175 The mask associated with the register that was actually seen, or 0
176 if none. A nonzero value describes a register inside a register
177 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
178 register.
179
180 The idea is that stringless errors from multiple opcode templates can
181 be ORed together to give a summary of the available alternatives. */
182 #define SEF_DEFAULT_ERROR (1U << 31)
183 #define SEF_IN_REGLIST (1U << 31)
184
185 /* Diagnostics inline function utilities.
186
187 These are lightweight utilities which should only be called by parse_operands
188 and other parsers. GAS processes each assembly line by parsing it against
189 instruction template(s), in the case of multiple templates (for the same
190 mnemonic name), those templates are tried one by one until one succeeds or
191 all fail. An assembly line may fail a few templates before being
192 successfully parsed; an error saved here in most cases is not a user error
193 but an error indicating the current template is not the right template.
194 Therefore it is very important that errors can be saved at a low cost during
195 the parsing; we don't want to slow down the whole parsing by recording
196 non-user errors in detail.
197
198 Remember that the objective is to help GAS pick up the most appropriate
199 error message in the case of multiple templates, e.g. FMOV which has 8
200 templates. */
201
202 static inline void
clear_error(void)203 clear_error (void)
204 {
205 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
206 inst.parsing_error.kind = AARCH64_OPDE_NIL;
207 }
208
209 static inline bool
error_p(void)210 error_p (void)
211 {
212 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
213 }
214
215 static inline void
set_error(enum aarch64_operand_error_kind kind,const char * error)216 set_error (enum aarch64_operand_error_kind kind, const char *error)
217 {
218 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
219 inst.parsing_error.index = -1;
220 inst.parsing_error.kind = kind;
221 inst.parsing_error.error = error;
222 }
223
224 static inline void
set_recoverable_error(const char * error)225 set_recoverable_error (const char *error)
226 {
227 set_error (AARCH64_OPDE_RECOVERABLE, error);
228 }
229
230 /* Use the DESC field of the corresponding aarch64_operand entry to compose
231 the error message. */
232 static inline void
set_default_error(void)233 set_default_error (void)
234 {
235 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
236 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
237 }
238
239 static inline void
set_syntax_error(const char * error)240 set_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
243 }
244
245 static inline void
set_first_syntax_error(const char * error)246 set_first_syntax_error (const char *error)
247 {
248 if (! error_p ())
249 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
250 }
251
252 static inline void
set_fatal_syntax_error(const char * error)253 set_fatal_syntax_error (const char *error)
254 {
255 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
256 }
257
258 /* Return value for certain parsers when the parsing fails; those parsers
259 return the information of the parsed result, e.g. register number, on
260 success. */
261 #define PARSE_FAIL -1
262
263 /* This is an invalid condition code that means no conditional field is
264 present. */
265 #define COND_ALWAYS 0x10
266
267 typedef struct
268 {
269 const char *template;
270 uint32_t value;
271 } asm_nzcv;
272
273 struct reloc_entry
274 {
275 char *name;
276 bfd_reloc_code_real_type reloc;
277 };
278
279 /* Macros to define the register types and masks for the purpose
280 of parsing. */
281
282 #undef AARCH64_REG_TYPES
283 #define AARCH64_REG_TYPES \
284 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
285 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
286 BASIC_REG_TYPE(SP_32) /* wsp */ \
287 BASIC_REG_TYPE(SP_64) /* sp */ \
288 BASIC_REG_TYPE(ZR_32) /* wzr */ \
289 BASIC_REG_TYPE(ZR_64) /* xzr */ \
290 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
291 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
292 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
293 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
294 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
295 BASIC_REG_TYPE(V) /* v[0-31] */ \
296 BASIC_REG_TYPE(Z) /* z[0-31] */ \
297 BASIC_REG_TYPE(P) /* p[0-15] */ \
298 BASIC_REG_TYPE(PN) /* pn[0-15] */ \
299 BASIC_REG_TYPE(ZA) /* za */ \
300 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
301 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
302 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
303 BASIC_REG_TYPE(ZT0) /* zt0 */ \
304 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
305 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
306 /* Typecheck: same, plus SVE registers. */ \
307 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z)) \
309 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
310 MULTI_REG_TYPE(R_ZR, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
312 /* Typecheck: same, plus SVE registers. */ \
313 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) \
315 | REG_TYPE(Z)) \
316 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
317 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
319 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
320 MULTI_REG_TYPE(R_ZR_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
323 /* Typecheck: any [BHSDQ]P FP. */ \
324 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
325 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
326 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
327 MULTI_REG_TYPE(R_ZR_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
329 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
330 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
331 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
332 be used for SVE instructions, since Zn and Pn are valid symbols \
333 in other contexts. */ \
334 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
335 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
336 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
337 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
338 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
339 | REG_TYPE(Z) | REG_TYPE(P)) \
340 /* Likewise, but with predicate-as-counter registers added. */ \
341 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP_PN, REG_TYPE(R_32) | REG_TYPE(R_64) \
342 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
343 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
344 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
345 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
346 | REG_TYPE(Z) | REG_TYPE(P) | REG_TYPE(PN)) \
347 /* Any integer register; used for error messages only. */ \
348 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
349 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
350 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
351 /* Any vector register. */ \
352 MULTI_REG_TYPE(VZ, REG_TYPE(V) | REG_TYPE(Z)) \
353 /* An SVE vector or predicate register. */ \
354 MULTI_REG_TYPE(ZP, REG_TYPE(Z) | REG_TYPE(P)) \
355 /* Any vector or predicate register. */ \
356 MULTI_REG_TYPE(VZP, REG_TYPE(V) | REG_TYPE(Z) | REG_TYPE(P)) \
357 /* The whole of ZA or a single tile. */ \
358 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
359 /* A horizontal or vertical slice of a ZA tile. */ \
360 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
361 /* Pseudo type to mark the end of the enumerator sequence. */ \
362 END_REG_TYPE(MAX)
363
364 #undef BASIC_REG_TYPE
365 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
366 #undef MULTI_REG_TYPE
367 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
368 #undef END_REG_TYPE
369 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
370
371 /* Register type enumerators. */
372 typedef enum aarch64_reg_type_
373 {
374 /* A list of REG_TYPE_*. */
375 AARCH64_REG_TYPES
376 } aarch64_reg_type;
377
378 #undef BASIC_REG_TYPE
379 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
380 #undef REG_TYPE
381 #define REG_TYPE(T) (1 << REG_TYPE_##T)
382 #undef MULTI_REG_TYPE
383 #define MULTI_REG_TYPE(T,V) V,
384 #undef END_REG_TYPE
385 #define END_REG_TYPE(T) 0
386
387 /* Structure for a hash table entry for a register. */
388 typedef struct
389 {
390 const char *name;
391 unsigned char number;
392 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
393 unsigned char builtin;
394 } reg_entry;
395
396 /* Values indexed by aarch64_reg_type to assist the type checking. */
397 static const unsigned reg_type_masks[] =
398 {
399 AARCH64_REG_TYPES
400 };
401
402 #undef BASIC_REG_TYPE
403 #undef REG_TYPE
404 #undef MULTI_REG_TYPE
405 #undef END_REG_TYPE
406 #undef AARCH64_REG_TYPES
407
408 /* We expected one of the registers in MASK to be specified. If a register
409 of some kind was specified, SEEN is a mask that contains that register,
410 otherwise it is zero.
411
412 If it is possible to provide a relatively pithy message that describes
413 the error exactly, return a string that does so, reporting the error
414 against "operand %d". Return null otherwise.
415
416 From a QoI perspective, any REG_TYPE_* that is passed as the first
417 argument to set_expected_reg_error should generally have its own message.
418 Providing messages for combinations of such REG_TYPE_*s can be useful if
419 it is possible to summarize the combination in a relatively natural way.
420 On the other hand, it seems better to avoid long lists of unrelated
421 things. */
422
423 static const char *
get_reg_expected_msg(unsigned int mask,unsigned int seen)424 get_reg_expected_msg (unsigned int mask, unsigned int seen)
425 {
426 /* First handle messages that use SEEN. */
427 if ((mask & reg_type_masks[REG_TYPE_ZAT])
428 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
429 return N_("expected an unsuffixed ZA tile at operand %d");
430
431 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
432 && (seen & reg_type_masks[REG_TYPE_ZAT]))
433 return N_("missing horizontal or vertical suffix at operand %d");
434
435 if ((mask & reg_type_masks[REG_TYPE_ZA])
436 && (seen & (reg_type_masks[REG_TYPE_ZAT]
437 | reg_type_masks[REG_TYPE_ZATHV])))
438 return N_("expected 'za' rather than a ZA tile at operand %d");
439
440 if ((mask & reg_type_masks[REG_TYPE_PN])
441 && (seen & reg_type_masks[REG_TYPE_P]))
442 return N_("expected a predicate-as-counter rather than predicate-as-mask"
443 " register at operand %d");
444
445 if ((mask & reg_type_masks[REG_TYPE_P])
446 && (seen & reg_type_masks[REG_TYPE_PN]))
447 return N_("expected a predicate-as-mask rather than predicate-as-counter"
448 " register at operand %d");
449
450 /* Integer, zero and stack registers. */
451 if (mask == reg_type_masks[REG_TYPE_R_64])
452 return N_("expected a 64-bit integer register at operand %d");
453 if (mask == reg_type_masks[REG_TYPE_R_ZR])
454 return N_("expected an integer or zero register at operand %d");
455 if (mask == reg_type_masks[REG_TYPE_R_SP])
456 return N_("expected an integer or stack pointer register at operand %d");
457
458 /* Floating-point and SIMD registers. */
459 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
460 return N_("expected a scalar SIMD or floating-point register"
461 " at operand %d");
462 if (mask == reg_type_masks[REG_TYPE_V])
463 return N_("expected an Advanced SIMD vector register at operand %d");
464 if (mask == reg_type_masks[REG_TYPE_Z])
465 return N_("expected an SVE vector register at operand %d");
466 if (mask == reg_type_masks[REG_TYPE_P]
467 || mask == (reg_type_masks[REG_TYPE_P] | reg_type_masks[REG_TYPE_PN]))
468 /* Use this error for "predicate-as-mask only" and "either kind of
469 predicate". We report a more specific error if P is used where
470 PN is expected, and vice versa, so the issue at this point is
471 "predicate-like" vs. "not predicate-like". */
472 return N_("expected an SVE predicate register at operand %d");
473 if (mask == reg_type_masks[REG_TYPE_PN])
474 return N_("expected an SVE predicate-as-counter register at operand %d");
475 if (mask == reg_type_masks[REG_TYPE_VZ])
476 return N_("expected a vector register at operand %d");
477 if (mask == reg_type_masks[REG_TYPE_ZP])
478 return N_("expected an SVE vector or predicate register at operand %d");
479 if (mask == reg_type_masks[REG_TYPE_VZP])
480 return N_("expected a vector or predicate register at operand %d");
481
482 /* SME-related registers. */
483 if (mask == reg_type_masks[REG_TYPE_ZA])
484 return N_("expected a ZA array vector at operand %d");
485 if (mask == (reg_type_masks[REG_TYPE_ZA_ZAT] | reg_type_masks[REG_TYPE_ZT0]))
486 return N_("expected ZT0 or a ZA mask at operand %d");
487 if (mask == reg_type_masks[REG_TYPE_ZAT])
488 return N_("expected a ZA tile at operand %d");
489 if (mask == reg_type_masks[REG_TYPE_ZATHV])
490 return N_("expected a ZA tile slice at operand %d");
491
492 /* Integer and vector combos. */
493 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_V]))
494 return N_("expected an integer register or Advanced SIMD vector register"
495 " at operand %d");
496 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_Z]))
497 return N_("expected an integer register or SVE vector register"
498 " at operand %d");
499 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZ]))
500 return N_("expected an integer or vector register at operand %d");
501 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_P]))
502 return N_("expected an integer or predicate register at operand %d");
503 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZP]))
504 return N_("expected an integer, vector or predicate register"
505 " at operand %d");
506
507 /* SVE and SME combos. */
508 if (mask == (reg_type_masks[REG_TYPE_Z] | reg_type_masks[REG_TYPE_ZATHV]))
509 return N_("expected an SVE vector register or ZA tile slice"
510 " at operand %d");
511
512 return NULL;
513 }
514
515 /* Record that we expected a register of type TYPE but didn't see one.
516 REG is the register that we actually saw, or null if we didn't see a
517 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
518 contents of a register list, otherwise it is zero. */
519
520 static inline void
set_expected_reg_error(aarch64_reg_type type,const reg_entry * reg,unsigned int flags)521 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
522 unsigned int flags)
523 {
524 assert (flags == 0 || flags == SEF_IN_REGLIST);
525 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
526 if (flags & SEF_IN_REGLIST)
527 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
528 else
529 inst.parsing_error.data[0].i = reg_type_masks[type];
530 if (reg)
531 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
532 }
533
534 /* Record that we expected a register list containing registers of type TYPE,
535 but didn't see the opening '{'. If we saw a register instead, REG is the
536 register that we saw, otherwise it is null. */
537
538 static inline void
set_expected_reglist_error(aarch64_reg_type type,const reg_entry * reg)539 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
540 {
541 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
542 inst.parsing_error.data[1].i = reg_type_masks[type];
543 if (reg)
544 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
545 }
546
547 /* Some well known registers that we refer to directly elsewhere. */
548 #define REG_SP 31
549 #define REG_ZR 31
550
551 /* Instructions take 4 bytes in the object file. */
552 #define INSN_SIZE 4
553
554 static htab_t aarch64_ops_hsh;
555 static htab_t aarch64_cond_hsh;
556 static htab_t aarch64_shift_hsh;
557 static htab_t aarch64_sys_regs_hsh;
558 static htab_t aarch64_pstatefield_hsh;
559 static htab_t aarch64_sys_regs_ic_hsh;
560 static htab_t aarch64_sys_regs_dc_hsh;
561 static htab_t aarch64_sys_regs_at_hsh;
562 static htab_t aarch64_sys_regs_tlbi_hsh;
563 static htab_t aarch64_sys_regs_sr_hsh;
564 static htab_t aarch64_reg_hsh;
565 static htab_t aarch64_barrier_opt_hsh;
566 static htab_t aarch64_nzcv_hsh;
567 static htab_t aarch64_pldop_hsh;
568 static htab_t aarch64_hint_opt_hsh;
569
570 /* Stuff needed to resolve the label ambiguity
571 As:
572 ...
573 label: <insn>
574 may differ from:
575 ...
576 label:
577 <insn> */
578
579 static symbolS *last_label_seen;
580
581 /* Literal pool structure. Held on a per-section
582 and per-sub-section basis. */
583
584 #define MAX_LITERAL_POOL_SIZE 1024
585 typedef struct literal_expression
586 {
587 expressionS exp;
588 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
589 LITTLENUM_TYPE * bignum;
590 } literal_expression;
591
592 typedef struct literal_pool
593 {
594 literal_expression literals[MAX_LITERAL_POOL_SIZE];
595 unsigned int next_free_entry;
596 unsigned int id;
597 symbolS *symbol;
598 segT section;
599 subsegT sub_section;
600 int size;
601 struct literal_pool *next;
602 } literal_pool;
603
604 /* Pointer to a linked list of literal pools. */
605 static literal_pool *list_of_pools = NULL;
606
607 /* Pure syntax. */
608
609 /* This array holds the chars that always start a comment. If the
610 pre-processor is disabled, these aren't very useful. */
611 const char comment_chars[] = "";
612
613 /* This array holds the chars that only start a comment at the beginning of
614 a line. If the line seems to have the form '# 123 filename'
615 .line and .file directives will appear in the pre-processed output. */
616 /* Note that input_file.c hand checks for '#' at the beginning of the
617 first line of the input file. This is because the compiler outputs
618 #NO_APP at the beginning of its output. */
619 /* Also note that comments like this one will always work. */
620 const char line_comment_chars[] = "#";
621
622 const char line_separator_chars[] = ";";
623
624 /* Chars that can be used to separate mant
625 from exp in floating point numbers. */
626 const char EXP_CHARS[] = "eE";
627
628 /* Chars that mean this number is a floating point constant. */
629 /* As in 0f12.456 */
630 /* or 0d1.2345e12 */
631
632 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
633
634 /* Prefix character that indicates the start of an immediate value. */
635 #define is_immediate_prefix(C) ((C) == '#')
636
637 /* Separator character handling. */
638
639 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
640
641 static inline bool
skip_past_char(char ** str,char c)642 skip_past_char (char **str, char c)
643 {
644 if (**str == c)
645 {
646 (*str)++;
647 return true;
648 }
649 else
650 return false;
651 }
652
653 #define skip_past_comma(str) skip_past_char (str, ',')
654
655 /* Arithmetic expressions (possibly involving symbols). */
656
657 static bool in_aarch64_get_expression = false;
658
659 /* Third argument to aarch64_get_expression. */
660 #define GE_NO_PREFIX false
661 #define GE_OPT_PREFIX true
662
663 /* Fourth argument to aarch64_get_expression. */
664 #define ALLOW_ABSENT false
665 #define REJECT_ABSENT true
666
667 /* Return TRUE if the string pointed by *STR is successfully parsed
668 as an valid expression; *EP will be filled with the information of
669 such an expression. Otherwise return FALSE.
670
671 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
672 If REJECT_ABSENT is true then trat missing expressions as an error. */
673
674 static bool
aarch64_get_expression(expressionS * ep,char ** str,bool allow_immediate_prefix,bool reject_absent)675 aarch64_get_expression (expressionS * ep,
676 char ** str,
677 bool allow_immediate_prefix,
678 bool reject_absent)
679 {
680 char *save_in;
681 segT seg;
682 bool prefix_present = false;
683
684 if (allow_immediate_prefix)
685 {
686 if (is_immediate_prefix (**str))
687 {
688 (*str)++;
689 prefix_present = true;
690 }
691 }
692
693 memset (ep, 0, sizeof (expressionS));
694
695 save_in = input_line_pointer;
696 input_line_pointer = *str;
697 in_aarch64_get_expression = true;
698 seg = expression (ep);
699 in_aarch64_get_expression = false;
700
701 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
702 {
703 /* We found a bad expression in md_operand(). */
704 *str = input_line_pointer;
705 input_line_pointer = save_in;
706 if (prefix_present && ! error_p ())
707 set_fatal_syntax_error (_("bad expression"));
708 else
709 set_first_syntax_error (_("bad expression"));
710 return false;
711 }
712
713 #ifdef OBJ_AOUT
714 if (seg != absolute_section
715 && seg != text_section
716 && seg != data_section
717 && seg != bss_section
718 && seg != undefined_section)
719 {
720 set_syntax_error (_("bad segment"));
721 *str = input_line_pointer;
722 input_line_pointer = save_in;
723 return false;
724 }
725 #else
726 (void) seg;
727 #endif
728
729 *str = input_line_pointer;
730 input_line_pointer = save_in;
731 return true;
732 }
733
734 /* Turn a string in input_line_pointer into a floating point constant
735 of type TYPE, and store the appropriate bytes in *LITP. The number
736 of LITTLENUMS emitted is stored in *SIZEP. An error message is
737 returned, or NULL on OK. */
738
739 const char *
md_atof(int type,char * litP,int * sizeP)740 md_atof (int type, char *litP, int *sizeP)
741 {
742 return ieee_md_atof (type, litP, sizeP, target_big_endian);
743 }
744
745 /* We handle all bad expressions here, so that we can report the faulty
746 instruction in the error message. */
747 void
md_operand(expressionS * exp)748 md_operand (expressionS * exp)
749 {
750 if (in_aarch64_get_expression)
751 exp->X_op = O_illegal;
752 }
753
754 /* Immediate values. */
755
756 /* Errors may be set multiple times during parsing or bit encoding
757 (particularly in the Neon bits), but usually the earliest error which is set
758 will be the most meaningful. Avoid overwriting it with later (cascading)
759 errors by calling this function. */
760
761 static void
first_error(const char * error)762 first_error (const char *error)
763 {
764 if (! error_p ())
765 set_syntax_error (error);
766 }
767
768 /* Similar to first_error, but this function accepts formatted error
769 message. */
770 static void
first_error_fmt(const char * format,...)771 first_error_fmt (const char *format, ...)
772 {
773 va_list args;
774 enum
775 { size = 100 };
776 /* N.B. this single buffer will not cause error messages for different
777 instructions to pollute each other; this is because at the end of
778 processing of each assembly line, error message if any will be
779 collected by as_bad. */
780 static char buffer[size];
781
782 if (! error_p ())
783 {
784 int ret ATTRIBUTE_UNUSED;
785 va_start (args, format);
786 ret = vsnprintf (buffer, size, format, args);
787 know (ret <= size - 1 && ret >= 0);
788 va_end (args);
789 set_syntax_error (buffer);
790 }
791 }
792
793 /* Internal helper routine converting a vector_type_el structure *VECTYPE
794 to a corresponding operand qualifier. */
795
796 static inline aarch64_opnd_qualifier_t
vectype_to_qualifier(const struct vector_type_el * vectype)797 vectype_to_qualifier (const struct vector_type_el *vectype)
798 {
799 /* Element size in bytes indexed by vector_el_type. */
800 const unsigned char ele_size[5]
801 = {1, 2, 4, 8, 16};
802 const unsigned int ele_base [5] =
803 {
804 AARCH64_OPND_QLF_V_4B,
805 AARCH64_OPND_QLF_V_2H,
806 AARCH64_OPND_QLF_V_2S,
807 AARCH64_OPND_QLF_V_1D,
808 AARCH64_OPND_QLF_V_1Q
809 };
810
811 if (!vectype->defined || vectype->type == NT_invtype)
812 goto vectype_conversion_fail;
813
814 if (vectype->type == NT_zero)
815 return AARCH64_OPND_QLF_P_Z;
816 if (vectype->type == NT_merge)
817 return AARCH64_OPND_QLF_P_M;
818
819 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
820
821 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
822 {
823 /* Special case S_4B. */
824 if (vectype->type == NT_b && vectype->width == 4)
825 return AARCH64_OPND_QLF_S_4B;
826
827 /* Special case S_2H. */
828 if (vectype->type == NT_h && vectype->width == 2)
829 return AARCH64_OPND_QLF_S_2H;
830
831 /* Vector element register. */
832 return AARCH64_OPND_QLF_S_B + vectype->type;
833 }
834 else
835 {
836 /* Vector register. */
837 int reg_size = ele_size[vectype->type] * vectype->width;
838 unsigned offset;
839 unsigned shift;
840 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
841 goto vectype_conversion_fail;
842
843 /* The conversion is by calculating the offset from the base operand
844 qualifier for the vector type. The operand qualifiers are regular
845 enough that the offset can established by shifting the vector width by
846 a vector-type dependent amount. */
847 shift = 0;
848 if (vectype->type == NT_b)
849 shift = 3;
850 else if (vectype->type == NT_h || vectype->type == NT_s)
851 shift = 2;
852 else if (vectype->type >= NT_d)
853 shift = 1;
854 else
855 gas_assert (0);
856
857 offset = ele_base [vectype->type] + (vectype->width >> shift);
858 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
859 && offset <= AARCH64_OPND_QLF_V_1Q);
860 return offset;
861 }
862
863 vectype_conversion_fail:
864 first_error (_("bad vector arrangement type"));
865 return AARCH64_OPND_QLF_NIL;
866 }
867
868 /* Register parsing. */
869
870 /* Generic register parser which is called by other specialized
871 register parsers.
872 CCP points to what should be the beginning of a register name.
873 If it is indeed a valid register name, advance CCP over it and
874 return the reg_entry structure; otherwise return NULL.
875 It does not issue diagnostics. */
876
877 static reg_entry *
parse_reg(char ** ccp)878 parse_reg (char **ccp)
879 {
880 char *start = *ccp;
881 char *p;
882 reg_entry *reg;
883
884 #ifdef REGISTER_PREFIX
885 if (*start != REGISTER_PREFIX)
886 return NULL;
887 start++;
888 #endif
889
890 p = start;
891 if (!ISALPHA (*p) || !is_name_beginner (*p))
892 return NULL;
893
894 do
895 p++;
896 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
897
898 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
899
900 if (!reg)
901 return NULL;
902
903 *ccp = p;
904 return reg;
905 }
906
907 /* Return the operand qualifier associated with all uses of REG, or
908 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
909 that qualifiers don't apply to REG or that qualifiers are added
910 using suffixes. */
911
912 static aarch64_opnd_qualifier_t
inherent_reg_qualifier(const reg_entry * reg)913 inherent_reg_qualifier (const reg_entry *reg)
914 {
915 switch (reg->type)
916 {
917 case REG_TYPE_R_32:
918 case REG_TYPE_SP_32:
919 case REG_TYPE_ZR_32:
920 return AARCH64_OPND_QLF_W;
921
922 case REG_TYPE_R_64:
923 case REG_TYPE_SP_64:
924 case REG_TYPE_ZR_64:
925 return AARCH64_OPND_QLF_X;
926
927 case REG_TYPE_FP_B:
928 case REG_TYPE_FP_H:
929 case REG_TYPE_FP_S:
930 case REG_TYPE_FP_D:
931 case REG_TYPE_FP_Q:
932 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
933
934 default:
935 return AARCH64_OPND_QLF_NIL;
936 }
937 }
938
939 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
940 return FALSE. */
941 static bool
aarch64_check_reg_type(const reg_entry * reg,aarch64_reg_type type)942 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
943 {
944 return (reg_type_masks[type] & (1 << reg->type)) != 0;
945 }
946
947 /* Try to parse a base or offset register. Allow SVE base and offset
948 registers if REG_TYPE includes SVE registers. Return the register
949 entry on success, setting *QUALIFIER to the register qualifier.
950 Return null otherwise.
951
952 Note that this function does not issue any diagnostics. */
953
954 static const reg_entry *
aarch64_addr_reg_parse(char ** ccp,aarch64_reg_type reg_type,aarch64_opnd_qualifier_t * qualifier)955 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
956 aarch64_opnd_qualifier_t *qualifier)
957 {
958 char *str = *ccp;
959 const reg_entry *reg = parse_reg (&str);
960
961 if (reg == NULL)
962 return NULL;
963
964 switch (reg->type)
965 {
966 case REG_TYPE_Z:
967 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_Z)) == 0
968 || str[0] != '.')
969 return NULL;
970 switch (TOLOWER (str[1]))
971 {
972 case 's':
973 *qualifier = AARCH64_OPND_QLF_S_S;
974 break;
975 case 'd':
976 *qualifier = AARCH64_OPND_QLF_S_D;
977 break;
978 default:
979 return NULL;
980 }
981 str += 2;
982 break;
983
984 default:
985 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR_SP))
986 return NULL;
987 *qualifier = inherent_reg_qualifier (reg);
988 break;
989 }
990
991 *ccp = str;
992
993 return reg;
994 }
995
996 /* Try to parse a base or offset register. Return the register entry
997 on success, setting *QUALIFIER to the register qualifier. Return null
998 otherwise.
999
1000 Note that this function does not issue any diagnostics. */
1001
1002 static const reg_entry *
aarch64_reg_parse_32_64(char ** ccp,aarch64_opnd_qualifier_t * qualifier)1003 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
1004 {
1005 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_ZR_SP, qualifier);
1006 }
1007
1008 /* Parse the qualifier of a vector register or vector element of type
1009 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
1010 succeeds; otherwise return FALSE.
1011
1012 Accept only one occurrence of:
1013 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1014 b h s d q */
1015 static bool
parse_vector_type_for_operand(aarch64_reg_type reg_type,struct vector_type_el * parsed_type,char ** str)1016 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1017 struct vector_type_el *parsed_type, char **str)
1018 {
1019 char *ptr = *str;
1020 unsigned width;
1021 unsigned element_size;
1022 enum vector_el_type type;
1023
1024 /* skip '.' */
1025 gas_assert (*ptr == '.');
1026 ptr++;
1027
1028 if (reg_type != REG_TYPE_V || !ISDIGIT (*ptr))
1029 {
1030 width = 0;
1031 goto elt_size;
1032 }
1033 width = strtoul (ptr, &ptr, 10);
1034 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1035 {
1036 first_error_fmt (_("bad size %d in vector width specifier"), width);
1037 return false;
1038 }
1039
1040 elt_size:
1041 switch (TOLOWER (*ptr))
1042 {
1043 case 'b':
1044 type = NT_b;
1045 element_size = 8;
1046 break;
1047 case 'h':
1048 type = NT_h;
1049 element_size = 16;
1050 break;
1051 case 's':
1052 type = NT_s;
1053 element_size = 32;
1054 break;
1055 case 'd':
1056 type = NT_d;
1057 element_size = 64;
1058 break;
1059 case 'q':
1060 if (reg_type != REG_TYPE_V || width == 1)
1061 {
1062 type = NT_q;
1063 element_size = 128;
1064 break;
1065 }
1066 /* fall through. */
1067 default:
1068 if (*ptr != '\0')
1069 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1070 else
1071 first_error (_("missing element size"));
1072 return false;
1073 }
1074 if (width != 0 && width * element_size != 64
1075 && width * element_size != 128
1076 && !(width == 2 && element_size == 16)
1077 && !(width == 4 && element_size == 8))
1078 {
1079 first_error_fmt (_
1080 ("invalid element size %d and vector size combination %c"),
1081 width, *ptr);
1082 return false;
1083 }
1084 ptr++;
1085
1086 parsed_type->type = type;
1087 parsed_type->width = width;
1088 parsed_type->element_size = element_size;
1089
1090 *str = ptr;
1091
1092 return true;
1093 }
1094
1095 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1096 *PARSED_TYPE and point *STR at the end of the suffix. */
1097
1098 static bool
parse_predication_for_operand(struct vector_type_el * parsed_type,char ** str)1099 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1100 {
1101 char *ptr = *str;
1102
1103 /* Skip '/'. */
1104 gas_assert (*ptr == '/');
1105 ptr++;
1106 switch (TOLOWER (*ptr))
1107 {
1108 case 'z':
1109 parsed_type->type = NT_zero;
1110 break;
1111 case 'm':
1112 parsed_type->type = NT_merge;
1113 break;
1114 default:
1115 if (*ptr != '\0' && *ptr != ',')
1116 first_error_fmt (_("unexpected character `%c' in predication type"),
1117 *ptr);
1118 else
1119 first_error (_("missing predication type"));
1120 return false;
1121 }
1122 parsed_type->width = 0;
1123 *str = ptr + 1;
1124 return true;
1125 }
1126
1127 /* Return true if CH is a valid suffix character for registers of
1128 type TYPE. */
1129
1130 static bool
aarch64_valid_suffix_char_p(aarch64_reg_type type,char ch)1131 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1132 {
1133 switch (type)
1134 {
1135 case REG_TYPE_V:
1136 case REG_TYPE_Z:
1137 case REG_TYPE_ZA:
1138 case REG_TYPE_ZAT:
1139 case REG_TYPE_ZATH:
1140 case REG_TYPE_ZATV:
1141 return ch == '.';
1142
1143 case REG_TYPE_P:
1144 case REG_TYPE_PN:
1145 return ch == '.' || ch == '/';
1146
1147 default:
1148 return false;
1149 }
1150 }
1151
1152 /* Parse an index expression at *STR, storing it in *IMM on success. */
1153
1154 static bool
parse_index_expression(char ** str,int64_t * imm)1155 parse_index_expression (char **str, int64_t *imm)
1156 {
1157 expressionS exp;
1158
1159 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1160 if (exp.X_op != O_constant)
1161 {
1162 first_error (_("constant expression required"));
1163 return false;
1164 }
1165 *imm = exp.X_add_number;
1166 return true;
1167 }
1168
1169 /* Parse a register of the type TYPE.
1170
1171 Return null if the string pointed to by *CCP is not a valid register
1172 name or the parsed register is not of TYPE.
1173
1174 Otherwise return the register, and optionally return the register
1175 shape and element index information in *TYPEINFO.
1176
1177 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1178
1179 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1180 register index.
1181
1182 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1183 an operand that we can be confident that it is a good match. */
1184
1185 #define PTR_IN_REGLIST (1U << 0)
1186 #define PTR_FULL_REG (1U << 1)
1187 #define PTR_GOOD_MATCH (1U << 2)
1188
1189 static const reg_entry *
parse_typed_reg(char ** ccp,aarch64_reg_type type,struct vector_type_el * typeinfo,unsigned int flags)1190 parse_typed_reg (char **ccp, aarch64_reg_type type,
1191 struct vector_type_el *typeinfo, unsigned int flags)
1192 {
1193 char *str = *ccp;
1194 bool is_alpha = ISALPHA (*str);
1195 const reg_entry *reg = parse_reg (&str);
1196 struct vector_type_el atype;
1197 struct vector_type_el parsetype;
1198 bool is_typed_vecreg = false;
1199 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1200
1201 atype.defined = 0;
1202 atype.type = NT_invtype;
1203 atype.width = -1;
1204 atype.element_size = 0;
1205 atype.index = 0;
1206
1207 if (reg == NULL)
1208 {
1209 if (typeinfo)
1210 *typeinfo = atype;
1211 if (!is_alpha && (flags & PTR_IN_REGLIST))
1212 set_fatal_syntax_error (_("syntax error in register list"));
1213 else if (flags & PTR_GOOD_MATCH)
1214 set_fatal_syntax_error (NULL);
1215 else
1216 set_expected_reg_error (type, reg, err_flags);
1217 return NULL;
1218 }
1219
1220 if (! aarch64_check_reg_type (reg, type))
1221 {
1222 DEBUG_TRACE ("reg type check failed");
1223 if (flags & PTR_GOOD_MATCH)
1224 set_fatal_syntax_error (NULL);
1225 else
1226 set_expected_reg_error (type, reg, err_flags);
1227 return NULL;
1228 }
1229 type = reg->type;
1230
1231 if (aarch64_valid_suffix_char_p (reg->type, *str))
1232 {
1233 if (*str == '.')
1234 {
1235 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1236 return NULL;
1237 if ((reg->type == REG_TYPE_ZAT
1238 || reg->type == REG_TYPE_ZATH
1239 || reg->type == REG_TYPE_ZATV)
1240 && reg->number * 8U >= parsetype.element_size)
1241 {
1242 set_syntax_error (_("ZA tile number out of range"));
1243 return NULL;
1244 }
1245 }
1246 else
1247 {
1248 if (!parse_predication_for_operand (&parsetype, &str))
1249 return NULL;
1250 }
1251
1252 /* Register if of the form Vn.[bhsdq]. */
1253 is_typed_vecreg = true;
1254
1255 if (type != REG_TYPE_V)
1256 {
1257 /* The width is always variable; we don't allow an integer width
1258 to be specified. */
1259 gas_assert (parsetype.width == 0);
1260 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1261 }
1262 else if (parsetype.width == 0)
1263 /* Expect index. In the new scheme we cannot have
1264 Vn.[bhsdq] represent a scalar. Therefore any
1265 Vn.[bhsdq] should have an index following it.
1266 Except in reglists of course. */
1267 atype.defined |= NTA_HASINDEX;
1268 else
1269 atype.defined |= NTA_HASTYPE;
1270
1271 atype.type = parsetype.type;
1272 atype.width = parsetype.width;
1273 }
1274
1275 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1276 {
1277 /* Reject Sn[index] syntax. */
1278 if (reg->type != REG_TYPE_Z
1279 && reg->type != REG_TYPE_PN
1280 && reg->type != REG_TYPE_ZT0
1281 && !is_typed_vecreg)
1282 {
1283 first_error (_("this type of register can't be indexed"));
1284 return NULL;
1285 }
1286
1287 if (flags & PTR_IN_REGLIST)
1288 {
1289 first_error (_("index not allowed inside register list"));
1290 return NULL;
1291 }
1292
1293 atype.defined |= NTA_HASINDEX;
1294
1295 if (!parse_index_expression (&str, &atype.index))
1296 return NULL;
1297
1298 if (! skip_past_char (&str, ']'))
1299 return NULL;
1300 }
1301 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1302 {
1303 /* Indexed vector register expected. */
1304 first_error (_("indexed vector register expected"));
1305 return NULL;
1306 }
1307
1308 /* A vector reg Vn should be typed or indexed. */
1309 if (type == REG_TYPE_V && atype.defined == 0)
1310 {
1311 first_error (_("invalid use of vector register"));
1312 }
1313
1314 if (typeinfo)
1315 *typeinfo = atype;
1316
1317 *ccp = str;
1318
1319 return reg;
1320 }
1321
1322 /* Parse register.
1323
1324 Return the register on success; return null otherwise.
1325
1326 If this is a NEON vector register with additional type information, fill
1327 in the struct pointed to by VECTYPE (if non-NULL).
1328
1329 This parser does not handle register lists. */
1330
1331 static const reg_entry *
aarch64_reg_parse(char ** ccp,aarch64_reg_type type,struct vector_type_el * vectype)1332 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1333 struct vector_type_el *vectype)
1334 {
1335 return parse_typed_reg (ccp, type, vectype, 0);
1336 }
1337
1338 static inline bool
eq_vector_type_el(struct vector_type_el e1,struct vector_type_el e2)1339 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1340 {
1341 return (e1.type == e2.type
1342 && e1.defined == e2.defined
1343 && e1.width == e2.width
1344 && e1.element_size == e2.element_size
1345 && e1.index == e2.index);
1346 }
1347
1348 /* Return the register number mask for registers of type REG_TYPE. */
1349
1350 static inline int
reg_type_mask(aarch64_reg_type reg_type)1351 reg_type_mask (aarch64_reg_type reg_type)
1352 {
1353 return reg_type == REG_TYPE_P ? 15 : 31;
1354 }
1355
1356 /* This function parses a list of vector registers of type TYPE.
1357 On success, it returns the parsed register list information in the
1358 following encoded format:
1359
1360 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1361 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1362
1363 The information of the register shape and/or index is returned in
1364 *VECTYPE.
1365
1366 It returns PARSE_FAIL if the register list is invalid.
1367
1368 The list contains one to four registers.
1369 Each register can be one of:
1370 <Vt>.<T>[<index>]
1371 <Vt>.<T>
1372 All <T> should be identical.
1373 All <index> should be identical.
1374 There are restrictions on <Vt> numbers which are checked later
1375 (by reg_list_valid_p). */
1376
1377 static int
parse_vector_reg_list(char ** ccp,aarch64_reg_type type,struct vector_type_el * vectype)1378 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1379 struct vector_type_el *vectype)
1380 {
1381 char *str = *ccp;
1382 int nb_regs;
1383 struct vector_type_el typeinfo, typeinfo_first;
1384 uint32_t val, val_range, mask;
1385 int in_range;
1386 int ret_val;
1387 bool error = false;
1388 bool expect_index = false;
1389 unsigned int ptr_flags = PTR_IN_REGLIST;
1390
1391 if (*str != '{')
1392 {
1393 set_expected_reglist_error (type, parse_reg (&str));
1394 return PARSE_FAIL;
1395 }
1396 str++;
1397
1398 nb_regs = 0;
1399 typeinfo_first.defined = 0;
1400 typeinfo_first.type = NT_invtype;
1401 typeinfo_first.width = -1;
1402 typeinfo_first.element_size = 0;
1403 typeinfo_first.index = 0;
1404 ret_val = 0;
1405 val = -1u;
1406 val_range = -1u;
1407 in_range = 0;
1408 mask = reg_type_mask (type);
1409 do
1410 {
1411 if (in_range)
1412 {
1413 str++; /* skip over '-' */
1414 val_range = val;
1415 }
1416 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1417 ptr_flags);
1418 if (!reg)
1419 {
1420 set_first_syntax_error (_("invalid vector register in list"));
1421 error = true;
1422 continue;
1423 }
1424 val = reg->number;
1425 /* reject [bhsd]n */
1426 if (type == REG_TYPE_V && typeinfo.defined == 0)
1427 {
1428 set_first_syntax_error (_("invalid scalar register in list"));
1429 error = true;
1430 continue;
1431 }
1432
1433 if (typeinfo.defined & NTA_HASINDEX)
1434 expect_index = true;
1435
1436 if (in_range)
1437 {
1438 if (val == val_range)
1439 {
1440 set_first_syntax_error
1441 (_("invalid range in vector register list"));
1442 error = true;
1443 }
1444 val_range = (val_range + 1) & mask;
1445 }
1446 else
1447 {
1448 val_range = val;
1449 if (nb_regs == 0)
1450 typeinfo_first = typeinfo;
1451 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1452 {
1453 set_first_syntax_error
1454 (_("type mismatch in vector register list"));
1455 error = true;
1456 }
1457 }
1458 if (! error)
1459 for (;;)
1460 {
1461 ret_val |= val_range << ((5 * nb_regs) & 31);
1462 nb_regs++;
1463 if (val_range == val)
1464 break;
1465 val_range = (val_range + 1) & mask;
1466 }
1467 in_range = 0;
1468 ptr_flags |= PTR_GOOD_MATCH;
1469 }
1470 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1471
1472 skip_whitespace (str);
1473 if (*str != '}')
1474 {
1475 set_first_syntax_error (_("end of vector register list not found"));
1476 error = true;
1477 }
1478 str++;
1479
1480 skip_whitespace (str);
1481
1482 if (expect_index)
1483 {
1484 if (skip_past_char (&str, '['))
1485 {
1486 if (!parse_index_expression (&str, &typeinfo_first.index))
1487 error = true;
1488 if (! skip_past_char (&str, ']'))
1489 error = true;
1490 }
1491 else
1492 {
1493 set_first_syntax_error (_("expected index"));
1494 error = true;
1495 }
1496 }
1497
1498 if (nb_regs > 4)
1499 {
1500 set_first_syntax_error (_("too many registers in vector register list"));
1501 error = true;
1502 }
1503 else if (nb_regs == 0)
1504 {
1505 set_first_syntax_error (_("empty vector register list"));
1506 error = true;
1507 }
1508
1509 *ccp = str;
1510 if (! error)
1511 *vectype = typeinfo_first;
1512
1513 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1514 }
1515
1516 /* Directives: register aliases. */
1517
1518 static reg_entry *
insert_reg_alias(char * str,int number,aarch64_reg_type type)1519 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1520 {
1521 reg_entry *new;
1522 const char *name;
1523
1524 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1525 {
1526 if (new->builtin)
1527 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1528 str);
1529
1530 /* Only warn about a redefinition if it's not defined as the
1531 same register. */
1532 else if (new->number != number || new->type != type)
1533 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1534
1535 return NULL;
1536 }
1537
1538 name = xstrdup (str);
1539 new = XNEW (reg_entry);
1540
1541 new->name = name;
1542 new->number = number;
1543 new->type = type;
1544 new->builtin = false;
1545
1546 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1547
1548 return new;
1549 }
1550
1551 /* Look for the .req directive. This is of the form:
1552
1553 new_register_name .req existing_register_name
1554
1555 If we find one, or if it looks sufficiently like one that we want to
1556 handle any error here, return TRUE. Otherwise return FALSE. */
1557
1558 static bool
create_register_alias(char * newname,char * p)1559 create_register_alias (char *newname, char *p)
1560 {
1561 const reg_entry *old;
1562 char *oldname, *nbuf;
1563 size_t nlen;
1564
1565 /* The input scrubber ensures that whitespace after the mnemonic is
1566 collapsed to single spaces. */
1567 oldname = p;
1568 if (!startswith (oldname, " .req "))
1569 return false;
1570
1571 oldname += 6;
1572 if (*oldname == '\0')
1573 return false;
1574
1575 old = str_hash_find (aarch64_reg_hsh, oldname);
1576 if (!old)
1577 {
1578 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1579 return true;
1580 }
1581
1582 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1583 the desired alias name, and p points to its end. If not, then
1584 the desired alias name is in the global original_case_string. */
1585 #ifdef TC_CASE_SENSITIVE
1586 nlen = p - newname;
1587 #else
1588 newname = original_case_string;
1589 nlen = strlen (newname);
1590 #endif
1591
1592 nbuf = xmemdup0 (newname, nlen);
1593
1594 /* Create aliases under the new name as stated; an all-lowercase
1595 version of the new name; and an all-uppercase version of the new
1596 name. */
1597 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1598 {
1599 for (p = nbuf; *p; p++)
1600 *p = TOUPPER (*p);
1601
1602 if (strncmp (nbuf, newname, nlen))
1603 {
1604 /* If this attempt to create an additional alias fails, do not bother
1605 trying to create the all-lower case alias. We will fail and issue
1606 a second, duplicate error message. This situation arises when the
1607 programmer does something like:
1608 foo .req r0
1609 Foo .req r1
1610 The second .req creates the "Foo" alias but then fails to create
1611 the artificial FOO alias because it has already been created by the
1612 first .req. */
1613 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1614 {
1615 free (nbuf);
1616 return true;
1617 }
1618 }
1619
1620 for (p = nbuf; *p; p++)
1621 *p = TOLOWER (*p);
1622
1623 if (strncmp (nbuf, newname, nlen))
1624 insert_reg_alias (nbuf, old->number, old->type);
1625 }
1626
1627 free (nbuf);
1628 return true;
1629 }
1630
1631 /* Should never be called, as .req goes between the alias and the
1632 register name, not at the beginning of the line. */
1633 static void
s_req(int a ATTRIBUTE_UNUSED)1634 s_req (int a ATTRIBUTE_UNUSED)
1635 {
1636 as_bad (_("invalid syntax for .req directive"));
1637 }
1638
1639 /* The .unreq directive deletes an alias which was previously defined
1640 by .req. For example:
1641
1642 my_alias .req r11
1643 .unreq my_alias */
1644
1645 static void
s_unreq(int a ATTRIBUTE_UNUSED)1646 s_unreq (int a ATTRIBUTE_UNUSED)
1647 {
1648 char *name;
1649 char saved_char;
1650
1651 name = input_line_pointer;
1652 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1653 saved_char = *input_line_pointer;
1654 *input_line_pointer = 0;
1655
1656 if (!*name)
1657 as_bad (_("invalid syntax for .unreq directive"));
1658 else
1659 {
1660 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1661
1662 if (!reg)
1663 as_bad (_("unknown register alias '%s'"), name);
1664 else if (reg->builtin)
1665 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1666 name);
1667 else
1668 {
1669 char *p;
1670 char *nbuf;
1671
1672 str_hash_delete (aarch64_reg_hsh, name);
1673 free ((char *) reg->name);
1674 free (reg);
1675
1676 /* Also locate the all upper case and all lower case versions.
1677 Do not complain if we cannot find one or the other as it
1678 was probably deleted above. */
1679
1680 nbuf = strdup (name);
1681 for (p = nbuf; *p; p++)
1682 *p = TOUPPER (*p);
1683 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1684 if (reg)
1685 {
1686 str_hash_delete (aarch64_reg_hsh, nbuf);
1687 free ((char *) reg->name);
1688 free (reg);
1689 }
1690
1691 for (p = nbuf; *p; p++)
1692 *p = TOLOWER (*p);
1693 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1694 if (reg)
1695 {
1696 str_hash_delete (aarch64_reg_hsh, nbuf);
1697 free ((char *) reg->name);
1698 free (reg);
1699 }
1700
1701 free (nbuf);
1702 }
1703 }
1704
1705 *input_line_pointer = saved_char;
1706 demand_empty_rest_of_line ();
1707 }
1708
1709 /* Directives: Instruction set selection. */
1710
1711 #if defined OBJ_ELF || defined OBJ_COFF
1712 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1713 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1714 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1715 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1716
1717 /* Create a new mapping symbol for the transition to STATE. */
1718
1719 static void
make_mapping_symbol(enum mstate state,valueT value,fragS * frag)1720 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1721 {
1722 symbolS *symbolP;
1723 const char *symname;
1724 int type;
1725
1726 switch (state)
1727 {
1728 case MAP_DATA:
1729 symname = "$d";
1730 type = BSF_NO_FLAGS;
1731 break;
1732 case MAP_INSN:
1733 symname = "$x";
1734 type = BSF_NO_FLAGS;
1735 break;
1736 default:
1737 abort ();
1738 }
1739
1740 symbolP = symbol_new (symname, now_seg, frag, value);
1741 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1742
1743 /* Save the mapping symbols for future reference. Also check that
1744 we do not place two mapping symbols at the same offset within a
1745 frag. We'll handle overlap between frags in
1746 check_mapping_symbols.
1747
1748 If .fill or other data filling directive generates zero sized data,
1749 the mapping symbol for the following code will have the same value
1750 as the one generated for the data filling directive. In this case,
1751 we replace the old symbol with the new one at the same address. */
1752 if (value == 0)
1753 {
1754 if (frag->tc_frag_data.first_map != NULL)
1755 {
1756 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1757 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1758 &symbol_lastP);
1759 }
1760 frag->tc_frag_data.first_map = symbolP;
1761 }
1762 if (frag->tc_frag_data.last_map != NULL)
1763 {
1764 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1765 S_GET_VALUE (symbolP));
1766 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1767 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1768 &symbol_lastP);
1769 }
1770 frag->tc_frag_data.last_map = symbolP;
1771 }
1772
1773 /* We must sometimes convert a region marked as code to data during
1774 code alignment, if an odd number of bytes have to be padded. The
1775 code mapping symbol is pushed to an aligned address. */
1776
1777 static void
insert_data_mapping_symbol(enum mstate state,valueT value,fragS * frag,offsetT bytes)1778 insert_data_mapping_symbol (enum mstate state,
1779 valueT value, fragS * frag, offsetT bytes)
1780 {
1781 /* If there was already a mapping symbol, remove it. */
1782 if (frag->tc_frag_data.last_map != NULL
1783 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1784 frag->fr_address + value)
1785 {
1786 symbolS *symp = frag->tc_frag_data.last_map;
1787
1788 if (value == 0)
1789 {
1790 know (frag->tc_frag_data.first_map == symp);
1791 frag->tc_frag_data.first_map = NULL;
1792 }
1793 frag->tc_frag_data.last_map = NULL;
1794 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1795 }
1796
1797 make_mapping_symbol (MAP_DATA, value, frag);
1798 make_mapping_symbol (state, value + bytes, frag);
1799 }
1800
1801 static void mapping_state_2 (enum mstate state, int max_chars);
1802
1803 /* Set the mapping state to STATE. Only call this when about to
1804 emit some STATE bytes to the file. */
1805
1806 void
mapping_state(enum mstate state)1807 mapping_state (enum mstate state)
1808 {
1809 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1810
1811 if (state == MAP_INSN)
1812 /* AArch64 instructions require 4-byte alignment. When emitting
1813 instructions into any section, record the appropriate section
1814 alignment. */
1815 record_alignment (now_seg, 2);
1816
1817 if (mapstate == state)
1818 /* The mapping symbol has already been emitted.
1819 There is nothing else to do. */
1820 return;
1821
1822 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1823 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1824 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1825 evaluated later in the next else. */
1826 return;
1827 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1828 {
1829 /* Only add the symbol if the offset is > 0:
1830 if we're at the first frag, check it's size > 0;
1831 if we're not at the first frag, then for sure
1832 the offset is > 0. */
1833 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1834 const int add_symbol = (frag_now != frag_first)
1835 || (frag_now_fix () > 0);
1836
1837 if (add_symbol)
1838 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1839 }
1840 #undef TRANSITION
1841
1842 mapping_state_2 (state, 0);
1843 }
1844
1845 /* Same as mapping_state, but MAX_CHARS bytes have already been
1846 allocated. Put the mapping symbol that far back. */
1847
1848 static void
mapping_state_2(enum mstate state,int max_chars)1849 mapping_state_2 (enum mstate state, int max_chars)
1850 {
1851 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1852
1853 if (!SEG_NORMAL (now_seg))
1854 return;
1855
1856 if (mapstate == state)
1857 /* The mapping symbol has already been emitted.
1858 There is nothing else to do. */
1859 return;
1860
1861 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1862 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1863 }
1864 #else
1865 #define mapping_state(x) /* nothing */
1866 #define mapping_state_2(x, y) /* nothing */
1867 #endif
1868
1869 /* Directives: alignment. */
1870
1871 static void
s_even(int ignore ATTRIBUTE_UNUSED)1872 s_even (int ignore ATTRIBUTE_UNUSED)
1873 {
1874 /* Never make frag if expect extra pass. */
1875 if (!need_pass_2)
1876 frag_align (1, 0, 0);
1877
1878 record_alignment (now_seg, 1);
1879
1880 demand_empty_rest_of_line ();
1881 }
1882
1883 /* Directives: Literal pools. */
1884
1885 static literal_pool *
find_literal_pool(int size)1886 find_literal_pool (int size)
1887 {
1888 literal_pool *pool;
1889
1890 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1891 {
1892 if (pool->section == now_seg
1893 && pool->sub_section == now_subseg && pool->size == size)
1894 break;
1895 }
1896
1897 return pool;
1898 }
1899
1900 static literal_pool *
find_or_make_literal_pool(int size)1901 find_or_make_literal_pool (int size)
1902 {
1903 /* Next literal pool ID number. */
1904 static unsigned int latest_pool_num = 1;
1905 literal_pool *pool;
1906
1907 pool = find_literal_pool (size);
1908
1909 if (pool == NULL)
1910 {
1911 /* Create a new pool. */
1912 pool = XNEW (literal_pool);
1913 if (!pool)
1914 return NULL;
1915
1916 /* Currently we always put the literal pool in the current text
1917 section. If we were generating "small" model code where we
1918 knew that all code and initialised data was within 1MB then
1919 we could output literals to mergeable, read-only data
1920 sections. */
1921
1922 pool->next_free_entry = 0;
1923 pool->section = now_seg;
1924 pool->sub_section = now_subseg;
1925 pool->size = size;
1926 pool->next = list_of_pools;
1927 pool->symbol = NULL;
1928
1929 /* Add it to the list. */
1930 list_of_pools = pool;
1931 }
1932
1933 /* New pools, and emptied pools, will have a NULL symbol. */
1934 if (pool->symbol == NULL)
1935 {
1936 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1937 &zero_address_frag, 0);
1938 pool->id = latest_pool_num++;
1939 }
1940
1941 /* Done. */
1942 return pool;
1943 }
1944
1945 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1946 Return TRUE on success, otherwise return FALSE. */
1947 static bool
add_to_lit_pool(expressionS * exp,int size)1948 add_to_lit_pool (expressionS *exp, int size)
1949 {
1950 literal_pool *pool;
1951 unsigned int entry;
1952
1953 pool = find_or_make_literal_pool (size);
1954
1955 /* Check if this literal value is already in the pool. */
1956 for (entry = 0; entry < pool->next_free_entry; entry++)
1957 {
1958 expressionS * litexp = & pool->literals[entry].exp;
1959
1960 if ((litexp->X_op == exp->X_op)
1961 && (exp->X_op == O_constant)
1962 && (litexp->X_add_number == exp->X_add_number)
1963 && (litexp->X_unsigned == exp->X_unsigned))
1964 break;
1965
1966 if ((litexp->X_op == exp->X_op)
1967 && (exp->X_op == O_symbol)
1968 && (litexp->X_add_number == exp->X_add_number)
1969 && (litexp->X_add_symbol == exp->X_add_symbol)
1970 && (litexp->X_op_symbol == exp->X_op_symbol))
1971 break;
1972 }
1973
1974 /* Do we need to create a new entry? */
1975 if (entry == pool->next_free_entry)
1976 {
1977 if (entry >= MAX_LITERAL_POOL_SIZE)
1978 {
1979 set_syntax_error (_("literal pool overflow"));
1980 return false;
1981 }
1982
1983 pool->literals[entry].exp = *exp;
1984 pool->next_free_entry += 1;
1985 if (exp->X_op == O_big)
1986 {
1987 /* PR 16688: Bignums are held in a single global array. We must
1988 copy and preserve that value now, before it is overwritten. */
1989 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1990 exp->X_add_number);
1991 memcpy (pool->literals[entry].bignum, generic_bignum,
1992 CHARS_PER_LITTLENUM * exp->X_add_number);
1993 }
1994 else
1995 pool->literals[entry].bignum = NULL;
1996 }
1997
1998 exp->X_op = O_symbol;
1999 exp->X_add_number = ((int) entry) * size;
2000 exp->X_add_symbol = pool->symbol;
2001
2002 return true;
2003 }
2004
2005 /* Can't use symbol_new here, so have to create a symbol and then at
2006 a later date assign it a value. That's what these functions do. */
2007
2008 static void
symbol_locate(symbolS * symbolP,const char * name,segT segment,valueT valu,fragS * frag)2009 symbol_locate (symbolS * symbolP,
2010 const char *name,/* It is copied, the caller can modify. */
2011 segT segment, /* Segment identifier (SEG_<something>). */
2012 valueT valu, /* Symbol value. */
2013 fragS * frag) /* Associated fragment. */
2014 {
2015 size_t name_length;
2016 char *preserved_copy_of_name;
2017
2018 name_length = strlen (name) + 1; /* +1 for \0. */
2019 obstack_grow (¬es, name, name_length);
2020 preserved_copy_of_name = obstack_finish (¬es);
2021
2022 #ifdef tc_canonicalize_symbol_name
2023 preserved_copy_of_name =
2024 tc_canonicalize_symbol_name (preserved_copy_of_name);
2025 #endif
2026
2027 S_SET_NAME (symbolP, preserved_copy_of_name);
2028
2029 S_SET_SEGMENT (symbolP, segment);
2030 S_SET_VALUE (symbolP, valu);
2031 symbol_clear_list_pointers (symbolP);
2032
2033 symbol_set_frag (symbolP, frag);
2034
2035 /* Link to end of symbol chain. */
2036 {
2037 extern int symbol_table_frozen;
2038
2039 if (symbol_table_frozen)
2040 abort ();
2041 }
2042
2043 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2044
2045 obj_symbol_new_hook (symbolP);
2046
2047 #ifdef tc_symbol_new_hook
2048 tc_symbol_new_hook (symbolP);
2049 #endif
2050
2051 #ifdef DEBUG_SYMS
2052 verify_symbol_chain (symbol_rootP, symbol_lastP);
2053 #endif /* DEBUG_SYMS */
2054 }
2055
2056
2057 static void
s_ltorg(int ignored ATTRIBUTE_UNUSED)2058 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2059 {
2060 unsigned int entry;
2061 literal_pool *pool;
2062 char sym_name[20];
2063 int align;
2064
2065 for (align = 2; align <= 4; align++)
2066 {
2067 int size = 1 << align;
2068
2069 pool = find_literal_pool (size);
2070 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2071 continue;
2072
2073 /* Align pool as you have word accesses.
2074 Only make a frag if we have to. */
2075 if (!need_pass_2)
2076 frag_align (align, 0, 0);
2077
2078 mapping_state (MAP_DATA);
2079
2080 record_alignment (now_seg, align);
2081
2082 sprintf (sym_name, "$$lit_\002%x", pool->id);
2083
2084 symbol_locate (pool->symbol, sym_name, now_seg,
2085 (valueT) frag_now_fix (), frag_now);
2086 symbol_table_insert (pool->symbol);
2087
2088 for (entry = 0; entry < pool->next_free_entry; entry++)
2089 {
2090 expressionS * exp = & pool->literals[entry].exp;
2091
2092 if (exp->X_op == O_big)
2093 {
2094 /* PR 16688: Restore the global bignum value. */
2095 gas_assert (pool->literals[entry].bignum != NULL);
2096 memcpy (generic_bignum, pool->literals[entry].bignum,
2097 CHARS_PER_LITTLENUM * exp->X_add_number);
2098 }
2099
2100 /* First output the expression in the instruction to the pool. */
2101 emit_expr (exp, size); /* .word|.xword */
2102
2103 if (exp->X_op == O_big)
2104 {
2105 free (pool->literals[entry].bignum);
2106 pool->literals[entry].bignum = NULL;
2107 }
2108 }
2109
2110 /* Mark the pool as empty. */
2111 pool->next_free_entry = 0;
2112 pool->symbol = NULL;
2113 }
2114 }
2115
2116 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2117 /* Forward declarations for functions below, in the MD interface
2118 section. */
2119 static struct reloc_table_entry * find_reloc_table_entry (char **);
2120
2121 /* Directives: Data. */
2122 /* N.B. the support for relocation suffix in this directive needs to be
2123 implemented properly. */
2124
2125 static void
s_aarch64_cons(int nbytes)2126 s_aarch64_cons (int nbytes)
2127 {
2128 expressionS exp;
2129
2130 #ifdef md_flush_pending_output
2131 md_flush_pending_output ();
2132 #endif
2133
2134 if (is_it_end_of_statement ())
2135 {
2136 demand_empty_rest_of_line ();
2137 return;
2138 }
2139
2140 #ifdef md_cons_align
2141 md_cons_align (nbytes);
2142 #endif
2143
2144 mapping_state (MAP_DATA);
2145 do
2146 {
2147 struct reloc_table_entry *reloc;
2148
2149 expression (&exp);
2150
2151 if (exp.X_op != O_symbol)
2152 emit_expr (&exp, (unsigned int) nbytes);
2153 else
2154 {
2155 skip_past_char (&input_line_pointer, '#');
2156 if (skip_past_char (&input_line_pointer, ':'))
2157 {
2158 reloc = find_reloc_table_entry (&input_line_pointer);
2159 if (reloc == NULL)
2160 as_bad (_("unrecognized relocation suffix"));
2161 else
2162 as_bad (_("unimplemented relocation suffix"));
2163 ignore_rest_of_line ();
2164 return;
2165 }
2166 else
2167 emit_expr (&exp, (unsigned int) nbytes);
2168 }
2169 }
2170 while (*input_line_pointer++ == ',');
2171
2172 /* Put terminator back into stream. */
2173 input_line_pointer--;
2174 demand_empty_rest_of_line ();
2175 }
2176 #endif
2177
2178 #ifdef OBJ_ELF
2179 /* Forward declarations for functions below, in the MD interface
2180 section. */
2181 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2182
2183 /* Mark symbol that it follows a variant PCS convention. */
2184
2185 static void
s_variant_pcs(int ignored ATTRIBUTE_UNUSED)2186 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2187 {
2188 char *name;
2189 char c;
2190 symbolS *sym;
2191 asymbol *bfdsym;
2192 elf_symbol_type *elfsym;
2193
2194 c = get_symbol_name (&name);
2195 if (!*name)
2196 as_bad (_("Missing symbol name in directive"));
2197 sym = symbol_find_or_make (name);
2198 restore_line_pointer (c);
2199 demand_empty_rest_of_line ();
2200 bfdsym = symbol_get_bfdsym (sym);
2201 elfsym = elf_symbol_from (bfdsym);
2202 gas_assert (elfsym);
2203 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2204 }
2205 #endif /* OBJ_ELF */
2206
2207 /* Output a 32-bit word, but mark as an instruction. */
2208
2209 static void
s_aarch64_inst(int ignored ATTRIBUTE_UNUSED)2210 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2211 {
2212 expressionS exp;
2213 unsigned n = 0;
2214
2215 #ifdef md_flush_pending_output
2216 md_flush_pending_output ();
2217 #endif
2218
2219 if (is_it_end_of_statement ())
2220 {
2221 demand_empty_rest_of_line ();
2222 return;
2223 }
2224
2225 /* Sections are assumed to start aligned. In executable section, there is no
2226 MAP_DATA symbol pending. So we only align the address during
2227 MAP_DATA --> MAP_INSN transition.
2228 For other sections, this is not guaranteed. */
2229 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2230 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2231 frag_align_code (2, 0);
2232
2233 #ifdef OBJ_ELF
2234 mapping_state (MAP_INSN);
2235 #endif
2236
2237 do
2238 {
2239 expression (&exp);
2240 if (exp.X_op != O_constant)
2241 {
2242 as_bad (_("constant expression required"));
2243 ignore_rest_of_line ();
2244 return;
2245 }
2246
2247 if (target_big_endian)
2248 {
2249 unsigned int val = exp.X_add_number;
2250 exp.X_add_number = SWAP_32 (val);
2251 }
2252 emit_expr (&exp, INSN_SIZE);
2253 ++n;
2254 }
2255 while (*input_line_pointer++ == ',');
2256
2257 dwarf2_emit_insn (n * INSN_SIZE);
2258
2259 /* Put terminator back into stream. */
2260 input_line_pointer--;
2261 demand_empty_rest_of_line ();
2262 }
2263
2264 static void
s_aarch64_cfi_b_key_frame(int ignored ATTRIBUTE_UNUSED)2265 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2266 {
2267 demand_empty_rest_of_line ();
2268 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2269 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2270 }
2271
2272 #ifdef OBJ_ELF
2273 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2274
2275 static void
s_tlsdescadd(int ignored ATTRIBUTE_UNUSED)2276 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2277 {
2278 expressionS exp;
2279
2280 expression (&exp);
2281 frag_grow (4);
2282 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2283 BFD_RELOC_AARCH64_TLSDESC_ADD);
2284
2285 demand_empty_rest_of_line ();
2286 }
2287
2288 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2289
2290 static void
s_tlsdesccall(int ignored ATTRIBUTE_UNUSED)2291 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2292 {
2293 expressionS exp;
2294
2295 /* Since we're just labelling the code, there's no need to define a
2296 mapping symbol. */
2297 expression (&exp);
2298 /* Make sure there is enough room in this frag for the following
2299 blr. This trick only works if the blr follows immediately after
2300 the .tlsdesc directive. */
2301 frag_grow (4);
2302 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2303 BFD_RELOC_AARCH64_TLSDESC_CALL);
2304
2305 demand_empty_rest_of_line ();
2306 }
2307
2308 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2309
2310 static void
s_tlsdescldr(int ignored ATTRIBUTE_UNUSED)2311 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2312 {
2313 expressionS exp;
2314
2315 expression (&exp);
2316 frag_grow (4);
2317 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2318 BFD_RELOC_AARCH64_TLSDESC_LDR);
2319
2320 demand_empty_rest_of_line ();
2321 }
2322 #endif /* OBJ_ELF */
2323
2324 #ifdef TE_PE
2325 static void
s_secrel(int dummy ATTRIBUTE_UNUSED)2326 s_secrel (int dummy ATTRIBUTE_UNUSED)
2327 {
2328 expressionS exp;
2329
2330 do
2331 {
2332 expression (&exp);
2333 if (exp.X_op == O_symbol)
2334 exp.X_op = O_secrel;
2335
2336 emit_expr (&exp, 4);
2337 }
2338 while (*input_line_pointer++ == ',');
2339
2340 input_line_pointer--;
2341 demand_empty_rest_of_line ();
2342 }
2343
2344 void
tc_pe_dwarf2_emit_offset(symbolS * symbol,unsigned int size)2345 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2346 {
2347 expressionS exp;
2348
2349 exp.X_op = O_secrel;
2350 exp.X_add_symbol = symbol;
2351 exp.X_add_number = 0;
2352 emit_expr (&exp, size);
2353 }
2354
2355 static void
s_secidx(int dummy ATTRIBUTE_UNUSED)2356 s_secidx (int dummy ATTRIBUTE_UNUSED)
2357 {
2358 expressionS exp;
2359
2360 do
2361 {
2362 expression (&exp);
2363 if (exp.X_op == O_symbol)
2364 exp.X_op = O_secidx;
2365
2366 emit_expr (&exp, 2);
2367 }
2368 while (*input_line_pointer++ == ',');
2369
2370 input_line_pointer--;
2371 demand_empty_rest_of_line ();
2372 }
2373 #endif /* TE_PE */
2374
2375 static void s_aarch64_arch (int);
2376 static void s_aarch64_cpu (int);
2377 static void s_aarch64_arch_extension (int);
2378
2379 /* This table describes all the machine specific pseudo-ops the assembler
2380 has to support. The fields are:
2381 pseudo-op name without dot
2382 function to call to execute this pseudo-op
2383 Integer arg to pass to the function. */
2384
2385 const pseudo_typeS md_pseudo_table[] = {
2386 /* Never called because '.req' does not start a line. */
2387 {"req", s_req, 0},
2388 {"unreq", s_unreq, 0},
2389 {"even", s_even, 0},
2390 {"ltorg", s_ltorg, 0},
2391 {"pool", s_ltorg, 0},
2392 {"cpu", s_aarch64_cpu, 0},
2393 {"arch", s_aarch64_arch, 0},
2394 {"arch_extension", s_aarch64_arch_extension, 0},
2395 {"inst", s_aarch64_inst, 0},
2396 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2397 #ifdef OBJ_ELF
2398 {"tlsdescadd", s_tlsdescadd, 0},
2399 {"tlsdesccall", s_tlsdesccall, 0},
2400 {"tlsdescldr", s_tlsdescldr, 0},
2401 {"variant_pcs", s_variant_pcs, 0},
2402 #endif
2403 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2404 {"word", s_aarch64_cons, 4},
2405 {"long", s_aarch64_cons, 4},
2406 {"xword", s_aarch64_cons, 8},
2407 {"dword", s_aarch64_cons, 8},
2408 #endif
2409 #ifdef TE_PE
2410 {"secrel32", s_secrel, 0},
2411 {"secidx", s_secidx, 0},
2412 #endif
2413 {"float16", float_cons, 'h'},
2414 {"bfloat16", float_cons, 'b'},
2415 {0, 0, 0}
2416 };
2417
2418
2419 /* Check whether STR points to a register name followed by a comma or the
2420 end of line; REG_TYPE indicates which register types are checked
2421 against. Return TRUE if STR is such a register name; otherwise return
2422 FALSE. The function does not intend to produce any diagnostics, but since
2423 the register parser aarch64_reg_parse, which is called by this function,
2424 does produce diagnostics, we call clear_error to clear any diagnostics
2425 that may be generated by aarch64_reg_parse.
2426 Also, the function returns FALSE directly if there is any user error
2427 present at the function entry. This prevents the existing diagnostics
2428 state from being spoiled.
2429 The function currently serves parse_constant_immediate and
2430 parse_big_immediate only. */
2431 static bool
reg_name_p(char * str,aarch64_reg_type reg_type)2432 reg_name_p (char *str, aarch64_reg_type reg_type)
2433 {
2434 const reg_entry *reg;
2435
2436 /* Prevent the diagnostics state from being spoiled. */
2437 if (error_p ())
2438 return false;
2439
2440 reg = aarch64_reg_parse (&str, reg_type, NULL);
2441
2442 /* Clear the parsing error that may be set by the reg parser. */
2443 clear_error ();
2444
2445 if (!reg)
2446 return false;
2447
2448 skip_whitespace (str);
2449 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2450 return true;
2451
2452 return false;
2453 }
2454
2455 /* Parser functions used exclusively in instruction operands. */
2456
2457 /* Parse an immediate expression which may not be constant.
2458
2459 To prevent the expression parser from pushing a register name
2460 into the symbol table as an undefined symbol, firstly a check is
2461 done to find out whether STR is a register of type REG_TYPE followed
2462 by a comma or the end of line. Return FALSE if STR is such a string. */
2463
2464 static bool
parse_immediate_expression(char ** str,expressionS * exp,aarch64_reg_type reg_type)2465 parse_immediate_expression (char **str, expressionS *exp,
2466 aarch64_reg_type reg_type)
2467 {
2468 if (reg_name_p (*str, reg_type))
2469 {
2470 set_recoverable_error (_("immediate operand required"));
2471 return false;
2472 }
2473
2474 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2475
2476 if (exp->X_op == O_absent)
2477 {
2478 set_fatal_syntax_error (_("missing immediate expression"));
2479 return false;
2480 }
2481
2482 return true;
2483 }
2484
2485 /* Constant immediate-value read function for use in insn parsing.
2486 STR points to the beginning of the immediate (with the optional
2487 leading #); *VAL receives the value. REG_TYPE says which register
2488 names should be treated as registers rather than as symbolic immediates.
2489
2490 Return TRUE on success; otherwise return FALSE. */
2491
2492 static bool
parse_constant_immediate(char ** str,int64_t * val,aarch64_reg_type reg_type)2493 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2494 {
2495 expressionS exp;
2496
2497 if (! parse_immediate_expression (str, &exp, reg_type))
2498 return false;
2499
2500 if (exp.X_op != O_constant)
2501 {
2502 set_syntax_error (_("constant expression required"));
2503 return false;
2504 }
2505
2506 *val = exp.X_add_number;
2507 return true;
2508 }
2509
2510 static uint32_t
encode_imm_float_bits(uint32_t imm)2511 encode_imm_float_bits (uint32_t imm)
2512 {
2513 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2514 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2515 }
2516
2517 /* Return TRUE if the single-precision floating-point value encoded in IMM
2518 can be expressed in the AArch64 8-bit signed floating-point format with
2519 3-bit exponent and normalized 4 bits of precision; in other words, the
2520 floating-point value must be expressable as
2521 (+/-) n / 16 * power (2, r)
2522 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2523
2524 static bool
aarch64_imm_float_p(uint32_t imm)2525 aarch64_imm_float_p (uint32_t imm)
2526 {
2527 /* If a single-precision floating-point value has the following bit
2528 pattern, it can be expressed in the AArch64 8-bit floating-point
2529 format:
2530
2531 3 32222222 2221111111111
2532 1 09876543 21098765432109876543210
2533 n Eeeeeexx xxxx0000000000000000000
2534
2535 where n, e and each x are either 0 or 1 independently, with
2536 E == ~ e. */
2537
2538 uint32_t pattern;
2539
2540 /* Prepare the pattern for 'Eeeeee'. */
2541 if (((imm >> 30) & 0x1) == 0)
2542 pattern = 0x3e000000;
2543 else
2544 pattern = 0x40000000;
2545
2546 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2547 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2548 }
2549
2550 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2551 as an IEEE float without any loss of precision. Store the value in
2552 *FPWORD if so. */
2553
2554 static bool
can_convert_double_to_float(uint64_t imm,uint32_t * fpword)2555 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2556 {
2557 /* If a double-precision floating-point value has the following bit
2558 pattern, it can be expressed in a float:
2559
2560 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2561 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2562 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2563
2564 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2565 if Eeee_eeee != 1111_1111
2566
2567 where n, e, s and S are either 0 or 1 independently and where ~ is the
2568 inverse of E. */
2569
2570 uint32_t pattern;
2571 uint32_t high32 = imm >> 32;
2572 uint32_t low32 = imm;
2573
2574 /* Lower 29 bits need to be 0s. */
2575 if ((imm & 0x1fffffff) != 0)
2576 return false;
2577
2578 /* Prepare the pattern for 'Eeeeeeeee'. */
2579 if (((high32 >> 30) & 0x1) == 0)
2580 pattern = 0x38000000;
2581 else
2582 pattern = 0x40000000;
2583
2584 /* Check E~~~. */
2585 if ((high32 & 0x78000000) != pattern)
2586 return false;
2587
2588 /* Check Eeee_eeee != 1111_1111. */
2589 if ((high32 & 0x7ff00000) == 0x47f00000)
2590 return false;
2591
2592 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2593 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2594 | (low32 >> 29)); /* 3 S bits. */
2595 return true;
2596 }
2597
2598 /* Return true if we should treat OPERAND as a double-precision
2599 floating-point operand rather than a single-precision one. */
2600 static bool
double_precision_operand_p(const aarch64_opnd_info * operand)2601 double_precision_operand_p (const aarch64_opnd_info *operand)
2602 {
2603 /* Check for unsuffixed SVE registers, which are allowed
2604 for LDR and STR but not in instructions that require an
2605 immediate. We get better error messages if we arbitrarily
2606 pick one size, parse the immediate normally, and then
2607 report the match failure in the normal way. */
2608 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2609 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2610 }
2611
2612 /* Parse a floating-point immediate. Return TRUE on success and return the
2613 value in *IMMED in the format of IEEE754 single-precision encoding.
2614 *CCP points to the start of the string; DP_P is TRUE when the immediate
2615 is expected to be in double-precision (N.B. this only matters when
2616 hexadecimal representation is involved). REG_TYPE says which register
2617 names should be treated as registers rather than as symbolic immediates.
2618
2619 This routine accepts any IEEE float; it is up to the callers to reject
2620 invalid ones. */
2621
2622 static bool
parse_aarch64_imm_float(char ** ccp,int * immed,bool dp_p,aarch64_reg_type reg_type)2623 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2624 aarch64_reg_type reg_type)
2625 {
2626 char *str = *ccp;
2627 char *fpnum;
2628 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2629 int64_t val = 0;
2630 unsigned fpword = 0;
2631 bool hex_p = false;
2632
2633 skip_past_char (&str, '#');
2634
2635 fpnum = str;
2636 skip_whitespace (fpnum);
2637
2638 if (startswith (fpnum, "0x"))
2639 {
2640 /* Support the hexadecimal representation of the IEEE754 encoding.
2641 Double-precision is expected when DP_P is TRUE, otherwise the
2642 representation should be in single-precision. */
2643 if (! parse_constant_immediate (&str, &val, reg_type))
2644 goto invalid_fp;
2645
2646 if (dp_p)
2647 {
2648 if (!can_convert_double_to_float (val, &fpword))
2649 goto invalid_fp;
2650 }
2651 else if ((uint64_t) val > 0xffffffff)
2652 goto invalid_fp;
2653 else
2654 fpword = val;
2655
2656 hex_p = true;
2657 }
2658 else if (reg_name_p (str, reg_type))
2659 {
2660 set_recoverable_error (_("immediate operand required"));
2661 return false;
2662 }
2663
2664 if (! hex_p)
2665 {
2666 int i;
2667
2668 if ((str = atof_ieee (str, 's', words)) == NULL)
2669 goto invalid_fp;
2670
2671 /* Our FP word must be 32 bits (single-precision FP). */
2672 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2673 {
2674 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2675 fpword |= words[i];
2676 }
2677 }
2678
2679 *immed = fpword;
2680 *ccp = str;
2681 return true;
2682
2683 invalid_fp:
2684 set_fatal_syntax_error (_("invalid floating-point constant"));
2685 return false;
2686 }
2687
2688 /* Less-generic immediate-value read function with the possibility of loading
2689 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2690 instructions.
2691
2692 To prevent the expression parser from pushing a register name into the
2693 symbol table as an undefined symbol, a check is firstly done to find
2694 out whether STR is a register of type REG_TYPE followed by a comma or
2695 the end of line. Return FALSE if STR is such a register. */
2696
2697 static bool
parse_big_immediate(char ** str,int64_t * imm,aarch64_reg_type reg_type)2698 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2699 {
2700 char *ptr = *str;
2701
2702 if (reg_name_p (ptr, reg_type))
2703 {
2704 set_syntax_error (_("immediate operand required"));
2705 return false;
2706 }
2707
2708 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2709
2710 if (inst.reloc.exp.X_op == O_constant)
2711 *imm = inst.reloc.exp.X_add_number;
2712
2713 *str = ptr;
2714
2715 return true;
2716 }
2717
2718 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2719 if NEED_LIBOPCODES is non-zero, the fixup will need
2720 assistance from the libopcodes. */
2721
2722 static inline void
aarch64_set_gas_internal_fixup(struct reloc * reloc,const aarch64_opnd_info * operand,int need_libopcodes_p)2723 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2724 const aarch64_opnd_info *operand,
2725 int need_libopcodes_p)
2726 {
2727 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2728 reloc->opnd = operand->type;
2729 if (need_libopcodes_p)
2730 reloc->need_libopcodes_p = 1;
2731 };
2732
2733 /* Return TRUE if the instruction needs to be fixed up later internally by
2734 the GAS; otherwise return FALSE. */
2735
2736 static inline bool
aarch64_gas_internal_fixup_p(void)2737 aarch64_gas_internal_fixup_p (void)
2738 {
2739 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2740 }
2741
2742 /* Assign the immediate value to the relevant field in *OPERAND if
2743 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2744 needs an internal fixup in a later stage.
2745 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2746 IMM.VALUE that may get assigned with the constant. */
2747 static inline void
assign_imm_if_const_or_fixup_later(struct reloc * reloc,aarch64_opnd_info * operand,int addr_off_p,int need_libopcodes_p,int skip_p)2748 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2749 aarch64_opnd_info *operand,
2750 int addr_off_p,
2751 int need_libopcodes_p,
2752 int skip_p)
2753 {
2754 if (reloc->exp.X_op == O_constant)
2755 {
2756 if (addr_off_p)
2757 operand->addr.offset.imm = reloc->exp.X_add_number;
2758 else
2759 operand->imm.value = reloc->exp.X_add_number;
2760 reloc->type = BFD_RELOC_UNUSED;
2761 }
2762 else
2763 {
2764 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2765 /* Tell libopcodes to ignore this operand or not. This is helpful
2766 when one of the operands needs to be fixed up later but we need
2767 libopcodes to check the other operands. */
2768 operand->skip = skip_p;
2769 }
2770 }
2771
2772 /* Relocation modifiers. Each entry in the table contains the textual
2773 name for the relocation which may be placed before a symbol used as
2774 a load/store offset, or add immediate. It must be surrounded by a
2775 leading and trailing colon, for example:
2776
2777 ldr x0, [x1, #:rello:varsym]
2778 add x0, x1, #:rello:varsym */
2779
2780 struct reloc_table_entry
2781 {
2782 const char *name;
2783 int pc_rel;
2784 bfd_reloc_code_real_type adr_type;
2785 bfd_reloc_code_real_type adrp_type;
2786 bfd_reloc_code_real_type movw_type;
2787 bfd_reloc_code_real_type add_type;
2788 bfd_reloc_code_real_type ldst_type;
2789 bfd_reloc_code_real_type ld_literal_type;
2790 };
2791
2792 static struct reloc_table_entry reloc_table[] =
2793 {
2794 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2795 {"lo12", 0,
2796 0, /* adr_type */
2797 0,
2798 0,
2799 BFD_RELOC_AARCH64_ADD_LO12,
2800 BFD_RELOC_AARCH64_LDST_LO12,
2801 0},
2802
2803 /* Higher 21 bits of pc-relative page offset: ADRP */
2804 {"pg_hi21", 1,
2805 0, /* adr_type */
2806 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2807 0,
2808 0,
2809 0,
2810 0},
2811
2812 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2813 {"pg_hi21_nc", 1,
2814 0, /* adr_type */
2815 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2816 0,
2817 0,
2818 0,
2819 0},
2820
2821 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2822 {"abs_g0", 0,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_MOVW_G0,
2826 0,
2827 0,
2828 0},
2829
2830 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2831 {"abs_g0_s", 0,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_MOVW_G0_S,
2835 0,
2836 0,
2837 0},
2838
2839 /* Less significant bits 0-15 of address/value: MOVK, no check */
2840 {"abs_g0_nc", 0,
2841 0, /* adr_type */
2842 0,
2843 BFD_RELOC_AARCH64_MOVW_G0_NC,
2844 0,
2845 0,
2846 0},
2847
2848 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2849 {"abs_g1", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_MOVW_G1,
2853 0,
2854 0,
2855 0},
2856
2857 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2858 {"abs_g1_s", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_MOVW_G1_S,
2862 0,
2863 0,
2864 0},
2865
2866 /* Less significant bits 16-31 of address/value: MOVK, no check */
2867 {"abs_g1_nc", 0,
2868 0, /* adr_type */
2869 0,
2870 BFD_RELOC_AARCH64_MOVW_G1_NC,
2871 0,
2872 0,
2873 0},
2874
2875 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2876 {"abs_g2", 0,
2877 0, /* adr_type */
2878 0,
2879 BFD_RELOC_AARCH64_MOVW_G2,
2880 0,
2881 0,
2882 0},
2883
2884 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2885 {"abs_g2_s", 0,
2886 0, /* adr_type */
2887 0,
2888 BFD_RELOC_AARCH64_MOVW_G2_S,
2889 0,
2890 0,
2891 0},
2892
2893 /* Less significant bits 32-47 of address/value: MOVK, no check */
2894 {"abs_g2_nc", 0,
2895 0, /* adr_type */
2896 0,
2897 BFD_RELOC_AARCH64_MOVW_G2_NC,
2898 0,
2899 0,
2900 0},
2901
2902 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2903 {"abs_g3", 0,
2904 0, /* adr_type */
2905 0,
2906 BFD_RELOC_AARCH64_MOVW_G3,
2907 0,
2908 0,
2909 0},
2910
2911 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2912 {"prel_g0", 1,
2913 0, /* adr_type */
2914 0,
2915 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2916 0,
2917 0,
2918 0},
2919
2920 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2921 {"prel_g0_nc", 1,
2922 0, /* adr_type */
2923 0,
2924 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2925 0,
2926 0,
2927 0},
2928
2929 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2930 {"prel_g1", 1,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2934 0,
2935 0,
2936 0},
2937
2938 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2939 {"prel_g1_nc", 1,
2940 0, /* adr_type */
2941 0,
2942 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2943 0,
2944 0,
2945 0},
2946
2947 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2948 {"prel_g2", 1,
2949 0, /* adr_type */
2950 0,
2951 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2952 0,
2953 0,
2954 0},
2955
2956 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2957 {"prel_g2_nc", 1,
2958 0, /* adr_type */
2959 0,
2960 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2961 0,
2962 0,
2963 0},
2964
2965 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2966 {"prel_g3", 1,
2967 0, /* adr_type */
2968 0,
2969 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2970 0,
2971 0,
2972 0},
2973
2974 /* Get to the page containing GOT entry for a symbol. */
2975 {"got", 1,
2976 0, /* adr_type */
2977 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2978 0,
2979 0,
2980 0,
2981 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2982
2983 /* 12 bit offset into the page containing GOT entry for that symbol. */
2984 {"got_lo12", 0,
2985 0, /* adr_type */
2986 0,
2987 0,
2988 0,
2989 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2990 0},
2991
2992 /* 0-15 bits of address/value: MOVk, no check. */
2993 {"gotoff_g0_nc", 0,
2994 0, /* adr_type */
2995 0,
2996 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2997 0,
2998 0,
2999 0},
3000
3001 /* Most significant bits 16-31 of address/value: MOVZ. */
3002 {"gotoff_g1", 0,
3003 0, /* adr_type */
3004 0,
3005 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
3006 0,
3007 0,
3008 0},
3009
3010 /* 15 bit offset into the page containing GOT entry for that symbol. */
3011 {"gotoff_lo15", 0,
3012 0, /* adr_type */
3013 0,
3014 0,
3015 0,
3016 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
3017 0},
3018
3019 /* Get to the page containing GOT TLS entry for a symbol */
3020 {"gottprel_g0_nc", 0,
3021 0, /* adr_type */
3022 0,
3023 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3024 0,
3025 0,
3026 0},
3027
3028 /* Get to the page containing GOT TLS entry for a symbol */
3029 {"gottprel_g1", 0,
3030 0, /* adr_type */
3031 0,
3032 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3033 0,
3034 0,
3035 0},
3036
3037 /* Get to the page containing GOT TLS entry for a symbol */
3038 {"tlsgd", 0,
3039 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3040 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3041 0,
3042 0,
3043 0,
3044 0},
3045
3046 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3047 {"tlsgd_lo12", 0,
3048 0, /* adr_type */
3049 0,
3050 0,
3051 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3052 0,
3053 0},
3054
3055 /* Lower 16 bits address/value: MOVk. */
3056 {"tlsgd_g0_nc", 0,
3057 0, /* adr_type */
3058 0,
3059 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3060 0,
3061 0,
3062 0},
3063
3064 /* Most significant bits 16-31 of address/value: MOVZ. */
3065 {"tlsgd_g1", 0,
3066 0, /* adr_type */
3067 0,
3068 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3069 0,
3070 0,
3071 0},
3072
3073 /* Get to the page containing GOT TLS entry for a symbol */
3074 {"tlsdesc", 0,
3075 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3076 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3077 0,
3078 0,
3079 0,
3080 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3081
3082 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3083 {"tlsdesc_lo12", 0,
3084 0, /* adr_type */
3085 0,
3086 0,
3087 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3088 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3089 0},
3090
3091 /* Get to the page containing GOT TLS entry for a symbol.
3092 The same as GD, we allocate two consecutive GOT slots
3093 for module index and module offset, the only difference
3094 with GD is the module offset should be initialized to
3095 zero without any outstanding runtime relocation. */
3096 {"tlsldm", 0,
3097 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3098 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3099 0,
3100 0,
3101 0,
3102 0},
3103
3104 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3105 {"tlsldm_lo12_nc", 0,
3106 0, /* adr_type */
3107 0,
3108 0,
3109 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3110 0,
3111 0},
3112
3113 /* 12 bit offset into the module TLS base address. */
3114 {"dtprel_lo12", 0,
3115 0, /* adr_type */
3116 0,
3117 0,
3118 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3119 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3120 0},
3121
3122 /* Same as dtprel_lo12, no overflow check. */
3123 {"dtprel_lo12_nc", 0,
3124 0, /* adr_type */
3125 0,
3126 0,
3127 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3128 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3129 0},
3130
3131 /* bits[23:12] of offset to the module TLS base address. */
3132 {"dtprel_hi12", 0,
3133 0, /* adr_type */
3134 0,
3135 0,
3136 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3137 0,
3138 0},
3139
3140 /* bits[15:0] of offset to the module TLS base address. */
3141 {"dtprel_g0", 0,
3142 0, /* adr_type */
3143 0,
3144 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3145 0,
3146 0,
3147 0},
3148
3149 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3150 {"dtprel_g0_nc", 0,
3151 0, /* adr_type */
3152 0,
3153 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3154 0,
3155 0,
3156 0},
3157
3158 /* bits[31:16] of offset to the module TLS base address. */
3159 {"dtprel_g1", 0,
3160 0, /* adr_type */
3161 0,
3162 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3163 0,
3164 0,
3165 0},
3166
3167 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3168 {"dtprel_g1_nc", 0,
3169 0, /* adr_type */
3170 0,
3171 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3172 0,
3173 0,
3174 0},
3175
3176 /* bits[47:32] of offset to the module TLS base address. */
3177 {"dtprel_g2", 0,
3178 0, /* adr_type */
3179 0,
3180 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3181 0,
3182 0,
3183 0},
3184
3185 /* Lower 16 bit offset into GOT entry for a symbol */
3186 {"tlsdesc_off_g0_nc", 0,
3187 0, /* adr_type */
3188 0,
3189 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3190 0,
3191 0,
3192 0},
3193
3194 /* Higher 16 bit offset into GOT entry for a symbol */
3195 {"tlsdesc_off_g1", 0,
3196 0, /* adr_type */
3197 0,
3198 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3199 0,
3200 0,
3201 0},
3202
3203 /* Get to the page containing GOT TLS entry for a symbol */
3204 {"gottprel", 0,
3205 0, /* adr_type */
3206 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3207 0,
3208 0,
3209 0,
3210 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3211
3212 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3213 {"gottprel_lo12", 0,
3214 0, /* adr_type */
3215 0,
3216 0,
3217 0,
3218 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3219 0},
3220
3221 /* Get tp offset for a symbol. */
3222 {"tprel", 0,
3223 0, /* adr_type */
3224 0,
3225 0,
3226 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3227 0,
3228 0},
3229
3230 /* Get tp offset for a symbol. */
3231 {"tprel_lo12", 0,
3232 0, /* adr_type */
3233 0,
3234 0,
3235 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3236 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3237 0},
3238
3239 /* Get tp offset for a symbol. */
3240 {"tprel_hi12", 0,
3241 0, /* adr_type */
3242 0,
3243 0,
3244 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3245 0,
3246 0},
3247
3248 /* Get tp offset for a symbol. */
3249 {"tprel_lo12_nc", 0,
3250 0, /* adr_type */
3251 0,
3252 0,
3253 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3254 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3255 0},
3256
3257 /* Most significant bits 32-47 of address/value: MOVZ. */
3258 {"tprel_g2", 0,
3259 0, /* adr_type */
3260 0,
3261 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3262 0,
3263 0,
3264 0},
3265
3266 /* Most significant bits 16-31 of address/value: MOVZ. */
3267 {"tprel_g1", 0,
3268 0, /* adr_type */
3269 0,
3270 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3271 0,
3272 0,
3273 0},
3274
3275 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3276 {"tprel_g1_nc", 0,
3277 0, /* adr_type */
3278 0,
3279 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3280 0,
3281 0,
3282 0},
3283
3284 /* Most significant bits 0-15 of address/value: MOVZ. */
3285 {"tprel_g0", 0,
3286 0, /* adr_type */
3287 0,
3288 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3289 0,
3290 0,
3291 0},
3292
3293 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3294 {"tprel_g0_nc", 0,
3295 0, /* adr_type */
3296 0,
3297 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3298 0,
3299 0,
3300 0},
3301
3302 /* 15bit offset from got entry to base address of GOT table. */
3303 {"gotpage_lo15", 0,
3304 0,
3305 0,
3306 0,
3307 0,
3308 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3309 0},
3310
3311 /* 14bit offset from got entry to base address of GOT table. */
3312 {"gotpage_lo14", 0,
3313 0,
3314 0,
3315 0,
3316 0,
3317 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3318 0},
3319 };
3320
3321 /* Given the address of a pointer pointing to the textual name of a
3322 relocation as may appear in assembler source, attempt to find its
3323 details in reloc_table. The pointer will be updated to the character
3324 after the trailing colon. On failure, NULL will be returned;
3325 otherwise return the reloc_table_entry. */
3326
3327 static struct reloc_table_entry *
find_reloc_table_entry(char ** str)3328 find_reloc_table_entry (char **str)
3329 {
3330 unsigned int i;
3331 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3332 {
3333 int length = strlen (reloc_table[i].name);
3334
3335 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3336 && (*str)[length] == ':')
3337 {
3338 *str += (length + 1);
3339 return &reloc_table[i];
3340 }
3341 }
3342
3343 return NULL;
3344 }
3345
3346 /* Returns 0 if the relocation should never be forced,
3347 1 if the relocation must be forced, and -1 if either
3348 result is OK. */
3349
3350 static signed int
aarch64_force_reloc(unsigned int type)3351 aarch64_force_reloc (unsigned int type)
3352 {
3353 switch (type)
3354 {
3355 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3356 /* Perform these "immediate" internal relocations
3357 even if the symbol is extern or weak. */
3358 return 0;
3359
3360 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3361 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3362 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3363 /* Pseudo relocs that need to be fixed up according to
3364 ilp32_p. */
3365 return 1;
3366
3367 case BFD_RELOC_AARCH64_ADD_LO12:
3368 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3369 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3370 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3371 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3372 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3373 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3374 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3375 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3376 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3377 case BFD_RELOC_AARCH64_LDST128_LO12:
3378 case BFD_RELOC_AARCH64_LDST16_LO12:
3379 case BFD_RELOC_AARCH64_LDST32_LO12:
3380 case BFD_RELOC_AARCH64_LDST64_LO12:
3381 case BFD_RELOC_AARCH64_LDST8_LO12:
3382 case BFD_RELOC_AARCH64_LDST_LO12:
3383 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3384 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3385 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3386 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3387 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3388 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3389 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3390 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3391 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3392 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3393 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3394 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3395 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3396 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3397 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3398 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3399 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3400 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3401 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3402 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3403 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3404 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3405 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3406 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3407 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3408 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3409 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3410 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3411 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3412 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3413 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3414 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3415 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3416 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3417 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3418 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3419 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3420 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3421 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3422 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3423 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3424 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3425 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3426 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3427 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3428 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3429 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3430 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3431 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3432 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3433 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3434 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3435 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3436 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3437 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3438 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3439 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3440 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3441 /* Always leave these relocations for the linker. */
3442 return 1;
3443
3444 default:
3445 return -1;
3446 }
3447 }
3448
3449 int
aarch64_force_relocation(struct fix * fixp)3450 aarch64_force_relocation (struct fix *fixp)
3451 {
3452 int res = aarch64_force_reloc (fixp->fx_r_type);
3453
3454 if (res == -1)
3455 return generic_force_reloc (fixp);
3456 return res;
3457 }
3458
3459 /* Mode argument to parse_shift and parser_shifter_operand. */
3460 enum parse_shift_mode
3461 {
3462 SHIFTED_NONE, /* no shifter allowed */
3463 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3464 "#imm{,lsl #n}" */
3465 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3466 "#imm" */
3467 SHIFTED_LSL, /* bare "lsl #n" */
3468 SHIFTED_MUL, /* bare "mul #n" */
3469 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3470 SHIFTED_MUL_VL, /* "mul vl" */
3471 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3472 };
3473
3474 /* Parse a <shift> operator on an AArch64 data processing instruction.
3475 Return TRUE on success; otherwise return FALSE. */
3476 static bool
parse_shift(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3477 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3478 {
3479 const struct aarch64_name_value_pair *shift_op;
3480 enum aarch64_modifier_kind kind;
3481 expressionS exp;
3482 int exp_has_prefix;
3483 char *s = *str;
3484 char *p = s;
3485
3486 for (p = *str; ISALPHA (*p); p++)
3487 ;
3488
3489 if (p == *str)
3490 {
3491 set_syntax_error (_("shift expression expected"));
3492 return false;
3493 }
3494
3495 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3496
3497 if (shift_op == NULL)
3498 {
3499 set_syntax_error (_("shift operator expected"));
3500 return false;
3501 }
3502
3503 kind = aarch64_get_operand_modifier (shift_op);
3504
3505 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3506 {
3507 set_syntax_error (_("invalid use of 'MSL'"));
3508 return false;
3509 }
3510
3511 if (kind == AARCH64_MOD_MUL
3512 && mode != SHIFTED_MUL
3513 && mode != SHIFTED_MUL_VL)
3514 {
3515 set_syntax_error (_("invalid use of 'MUL'"));
3516 return false;
3517 }
3518
3519 switch (mode)
3520 {
3521 case SHIFTED_LOGIC_IMM:
3522 if (aarch64_extend_operator_p (kind))
3523 {
3524 set_syntax_error (_("extending shift is not permitted"));
3525 return false;
3526 }
3527 break;
3528
3529 case SHIFTED_ARITH_IMM:
3530 if (kind == AARCH64_MOD_ROR)
3531 {
3532 set_syntax_error (_("'ROR' shift is not permitted"));
3533 return false;
3534 }
3535 break;
3536
3537 case SHIFTED_LSL:
3538 if (kind != AARCH64_MOD_LSL)
3539 {
3540 set_syntax_error (_("only 'LSL' shift is permitted"));
3541 return false;
3542 }
3543 break;
3544
3545 case SHIFTED_MUL:
3546 if (kind != AARCH64_MOD_MUL)
3547 {
3548 set_syntax_error (_("only 'MUL' is permitted"));
3549 return false;
3550 }
3551 break;
3552
3553 case SHIFTED_MUL_VL:
3554 /* "MUL VL" consists of two separate tokens. Require the first
3555 token to be "MUL" and look for a following "VL". */
3556 if (kind == AARCH64_MOD_MUL)
3557 {
3558 skip_whitespace (p);
3559 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3560 {
3561 p += 2;
3562 kind = AARCH64_MOD_MUL_VL;
3563 break;
3564 }
3565 }
3566 set_syntax_error (_("only 'MUL VL' is permitted"));
3567 return false;
3568
3569 case SHIFTED_REG_OFFSET:
3570 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3571 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3572 {
3573 set_fatal_syntax_error
3574 (_("invalid shift for the register offset addressing mode"));
3575 return false;
3576 }
3577 break;
3578
3579 case SHIFTED_LSL_MSL:
3580 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3581 {
3582 set_syntax_error (_("invalid shift operator"));
3583 return false;
3584 }
3585 break;
3586
3587 default:
3588 abort ();
3589 }
3590
3591 /* Whitespace can appear here if the next thing is a bare digit. */
3592 skip_whitespace (p);
3593
3594 /* Parse shift amount. */
3595 exp_has_prefix = 0;
3596 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3597 exp.X_op = O_absent;
3598 else
3599 {
3600 if (is_immediate_prefix (*p))
3601 {
3602 p++;
3603 exp_has_prefix = 1;
3604 }
3605 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3606 }
3607 if (kind == AARCH64_MOD_MUL_VL)
3608 /* For consistency, give MUL VL the same shift amount as an implicit
3609 MUL #1. */
3610 operand->shifter.amount = 1;
3611 else if (exp.X_op == O_absent)
3612 {
3613 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3614 {
3615 set_syntax_error (_("missing shift amount"));
3616 return false;
3617 }
3618 operand->shifter.amount = 0;
3619 }
3620 else if (exp.X_op != O_constant)
3621 {
3622 set_syntax_error (_("constant shift amount required"));
3623 return false;
3624 }
3625 /* For parsing purposes, MUL #n has no inherent range. The range
3626 depends on the operand and will be checked by operand-specific
3627 routines. */
3628 else if (kind != AARCH64_MOD_MUL
3629 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3630 {
3631 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3632 return false;
3633 }
3634 else
3635 {
3636 operand->shifter.amount = exp.X_add_number;
3637 operand->shifter.amount_present = 1;
3638 }
3639
3640 operand->shifter.operator_present = 1;
3641 operand->shifter.kind = kind;
3642
3643 *str = p;
3644 return true;
3645 }
3646
3647 /* Parse a <shifter_operand> for a data processing instruction:
3648
3649 #<immediate>
3650 #<immediate>, LSL #imm
3651
3652 Validation of immediate operands is deferred to md_apply_fix.
3653
3654 Return TRUE on success; otherwise return FALSE. */
3655
3656 static bool
parse_shifter_operand_imm(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3657 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3658 enum parse_shift_mode mode)
3659 {
3660 char *p;
3661
3662 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3663 return false;
3664
3665 p = *str;
3666
3667 /* Accept an immediate expression. */
3668 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3669 REJECT_ABSENT))
3670 return false;
3671
3672 /* Accept optional LSL for arithmetic immediate values. */
3673 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3674 if (! parse_shift (&p, operand, SHIFTED_LSL))
3675 return false;
3676
3677 /* Not accept any shifter for logical immediate values. */
3678 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3679 && parse_shift (&p, operand, mode))
3680 {
3681 set_syntax_error (_("unexpected shift operator"));
3682 return false;
3683 }
3684
3685 *str = p;
3686 return true;
3687 }
3688
3689 /* Parse a <shifter_operand> for a data processing instruction:
3690
3691 <Rm>
3692 <Rm>, <shift>
3693 #<immediate>
3694 #<immediate>, LSL #imm
3695
3696 where <shift> is handled by parse_shift above, and the last two
3697 cases are handled by the function above.
3698
3699 Validation of immediate operands is deferred to md_apply_fix.
3700
3701 Return TRUE on success; otherwise return FALSE. */
3702
3703 static bool
parse_shifter_operand(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3704 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3705 enum parse_shift_mode mode)
3706 {
3707 const reg_entry *reg;
3708 aarch64_opnd_qualifier_t qualifier;
3709 enum aarch64_operand_class opd_class
3710 = aarch64_get_operand_class (operand->type);
3711
3712 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3713 if (reg)
3714 {
3715 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3716 {
3717 set_syntax_error (_("unexpected register in the immediate operand"));
3718 return false;
3719 }
3720
3721 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR))
3722 {
3723 set_expected_reg_error (REG_TYPE_R_ZR, reg, 0);
3724 return false;
3725 }
3726
3727 operand->reg.regno = reg->number;
3728 operand->qualifier = qualifier;
3729
3730 /* Accept optional shift operation on register. */
3731 if (! skip_past_comma (str))
3732 return true;
3733
3734 if (! parse_shift (str, operand, mode))
3735 return false;
3736
3737 return true;
3738 }
3739 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3740 {
3741 set_syntax_error
3742 (_("integer register expected in the extended/shifted operand "
3743 "register"));
3744 return false;
3745 }
3746
3747 /* We have a shifted immediate variable. */
3748 return parse_shifter_operand_imm (str, operand, mode);
3749 }
3750
3751 /* Return TRUE on success; return FALSE otherwise. */
3752
3753 static bool
parse_shifter_operand_reloc(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3754 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3755 enum parse_shift_mode mode)
3756 {
3757 char *p = *str;
3758
3759 /* Determine if we have the sequence of characters #: or just :
3760 coming next. If we do, then we check for a :rello: relocation
3761 modifier. If we don't, punt the whole lot to
3762 parse_shifter_operand. */
3763
3764 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3765 {
3766 struct reloc_table_entry *entry;
3767
3768 if (p[0] == '#')
3769 p += 2;
3770 else
3771 p++;
3772 *str = p;
3773
3774 /* Try to parse a relocation. Anything else is an error. */
3775 if (!(entry = find_reloc_table_entry (str)))
3776 {
3777 set_syntax_error (_("unknown relocation modifier"));
3778 return false;
3779 }
3780
3781 if (entry->add_type == 0)
3782 {
3783 set_syntax_error
3784 (_("this relocation modifier is not allowed on this instruction"));
3785 return false;
3786 }
3787
3788 /* Save str before we decompose it. */
3789 p = *str;
3790
3791 /* Next, we parse the expression. */
3792 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3793 REJECT_ABSENT))
3794 return false;
3795
3796 /* Record the relocation type (use the ADD variant here). */
3797 inst.reloc.type = entry->add_type;
3798 inst.reloc.pc_rel = entry->pc_rel;
3799
3800 /* If str is empty, we've reached the end, stop here. */
3801 if (**str == '\0')
3802 return true;
3803
3804 /* Otherwise, we have a shifted reloc modifier, so rewind to
3805 recover the variable name and continue parsing for the shifter. */
3806 *str = p;
3807 return parse_shifter_operand_imm (str, operand, mode);
3808 }
3809
3810 return parse_shifter_operand (str, operand, mode);
3811 }
3812
3813 /* Parse all forms of an address expression. Information is written
3814 to *OPERAND and/or inst.reloc.
3815
3816 The A64 instruction set has the following addressing modes:
3817
3818 Offset
3819 [base] // in SIMD ld/st structure
3820 [base{,#0}] // in ld/st exclusive
3821 [base{,#imm}]
3822 [base,Xm{,LSL #imm}]
3823 [base,Xm,SXTX {#imm}]
3824 [base,Wm,(S|U)XTW {#imm}]
3825 Pre-indexed
3826 [base]! // in ldraa/ldrab exclusive
3827 [base,#imm]!
3828 Post-indexed
3829 [base],#imm
3830 [base],Xm // in SIMD ld/st structure
3831 PC-relative (literal)
3832 label
3833 SVE:
3834 [base,#imm,MUL VL]
3835 [base,Zm.D{,LSL #imm}]
3836 [base,Zm.S,(S|U)XTW {#imm}]
3837 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3838 [Zn.S,#imm]
3839 [Zn.D,#imm]
3840 [Zn.S{, Xm}]
3841 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3842 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3843 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3844
3845 (As a convenience, the notation "=immediate" is permitted in conjunction
3846 with the pc-relative literal load instructions to automatically place an
3847 immediate value or symbolic address in a nearby literal pool and generate
3848 a hidden label which references it.)
3849
3850 Upon a successful parsing, the address structure in *OPERAND will be
3851 filled in the following way:
3852
3853 .base_regno = <base>
3854 .offset.is_reg // 1 if the offset is a register
3855 .offset.imm = <imm>
3856 .offset.regno = <Rm>
3857
3858 For different addressing modes defined in the A64 ISA:
3859
3860 Offset
3861 .pcrel=0; .preind=1; .postind=0; .writeback=0
3862 Pre-indexed
3863 .pcrel=0; .preind=1; .postind=0; .writeback=1
3864 Post-indexed
3865 .pcrel=0; .preind=0; .postind=1; .writeback=1
3866 PC-relative (literal)
3867 .pcrel=1; .preind=1; .postind=0; .writeback=0
3868
3869 The shift/extension information, if any, will be stored in .shifter.
3870 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3871 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3872 corresponding register.
3873
3874 BASE_TYPE says which types of base register should be accepted and
3875 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3876 is the type of shifter that is allowed for immediate offsets,
3877 or SHIFTED_NONE if none.
3878
3879 In all other respects, it is the caller's responsibility to check
3880 for addressing modes not supported by the instruction, and to set
3881 inst.reloc.type. */
3882
3883 static bool
parse_address_main(char ** str,aarch64_opnd_info * operand,aarch64_opnd_qualifier_t * base_qualifier,aarch64_opnd_qualifier_t * offset_qualifier,aarch64_reg_type base_type,aarch64_reg_type offset_type,enum parse_shift_mode imm_shift_mode)3884 parse_address_main (char **str, aarch64_opnd_info *operand,
3885 aarch64_opnd_qualifier_t *base_qualifier,
3886 aarch64_opnd_qualifier_t *offset_qualifier,
3887 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3888 enum parse_shift_mode imm_shift_mode)
3889 {
3890 char *p = *str;
3891 const reg_entry *reg;
3892 expressionS *exp = &inst.reloc.exp;
3893
3894 *base_qualifier = AARCH64_OPND_QLF_NIL;
3895 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3896 if (! skip_past_char (&p, '['))
3897 {
3898 /* =immediate or label. */
3899 operand->addr.pcrel = 1;
3900 operand->addr.preind = 1;
3901
3902 /* #:<reloc_op>:<symbol> */
3903 skip_past_char (&p, '#');
3904 if (skip_past_char (&p, ':'))
3905 {
3906 bfd_reloc_code_real_type ty;
3907 struct reloc_table_entry *entry;
3908
3909 /* Try to parse a relocation modifier. Anything else is
3910 an error. */
3911 entry = find_reloc_table_entry (&p);
3912 if (! entry)
3913 {
3914 set_syntax_error (_("unknown relocation modifier"));
3915 return false;
3916 }
3917
3918 switch (operand->type)
3919 {
3920 case AARCH64_OPND_ADDR_PCREL21:
3921 /* adr */
3922 ty = entry->adr_type;
3923 break;
3924
3925 default:
3926 ty = entry->ld_literal_type;
3927 break;
3928 }
3929
3930 if (ty == 0)
3931 {
3932 set_syntax_error
3933 (_("this relocation modifier is not allowed on this "
3934 "instruction"));
3935 return false;
3936 }
3937
3938 /* #:<reloc_op>: */
3939 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3940 {
3941 set_syntax_error (_("invalid relocation expression"));
3942 return false;
3943 }
3944 /* #:<reloc_op>:<expr> */
3945 /* Record the relocation type. */
3946 inst.reloc.type = ty;
3947 inst.reloc.pc_rel = entry->pc_rel;
3948 }
3949 else
3950 {
3951 if (skip_past_char (&p, '='))
3952 /* =immediate; need to generate the literal in the literal pool. */
3953 inst.gen_lit_pool = 1;
3954
3955 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3956 {
3957 set_syntax_error (_("invalid address"));
3958 return false;
3959 }
3960 }
3961
3962 *str = p;
3963 return true;
3964 }
3965
3966 /* [ */
3967
3968 bool alpha_base_p = ISALPHA (*p);
3969 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3970 if (!reg || !aarch64_check_reg_type (reg, base_type))
3971 {
3972 if (reg
3973 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3974 && *base_qualifier == AARCH64_OPND_QLF_W)
3975 set_syntax_error (_("expected a 64-bit base register"));
3976 else if (alpha_base_p)
3977 set_syntax_error (_("invalid base register"));
3978 else
3979 set_syntax_error (_("expected a base register"));
3980 return false;
3981 }
3982 operand->addr.base_regno = reg->number;
3983
3984 /* [Xn */
3985 if (skip_past_comma (&p))
3986 {
3987 /* [Xn, */
3988 operand->addr.preind = 1;
3989
3990 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3991 if (reg)
3992 {
3993 if (!aarch64_check_reg_type (reg, offset_type))
3994 {
3995 set_syntax_error (_("invalid offset register"));
3996 return false;
3997 }
3998
3999 /* [Xn,Rm */
4000 operand->addr.offset.regno = reg->number;
4001 operand->addr.offset.is_reg = 1;
4002 /* Shifted index. */
4003 if (skip_past_comma (&p))
4004 {
4005 /* [Xn,Rm, */
4006 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
4007 /* Use the diagnostics set in parse_shift, so not set new
4008 error message here. */
4009 return false;
4010 }
4011 /* We only accept:
4012 [base,Xm] # For vector plus scalar SVE2 indexing.
4013 [base,Xm{,LSL #imm}]
4014 [base,Xm,SXTX {#imm}]
4015 [base,Wm,(S|U)XTW {#imm}] */
4016 if (operand->shifter.kind == AARCH64_MOD_NONE
4017 || operand->shifter.kind == AARCH64_MOD_LSL
4018 || operand->shifter.kind == AARCH64_MOD_SXTX)
4019 {
4020 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4021 {
4022 set_syntax_error (_("invalid use of 32-bit register offset"));
4023 return false;
4024 }
4025 if (aarch64_get_qualifier_esize (*base_qualifier)
4026 != aarch64_get_qualifier_esize (*offset_qualifier)
4027 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4028 || *base_qualifier != AARCH64_OPND_QLF_S_S
4029 || *offset_qualifier != AARCH64_OPND_QLF_X))
4030 {
4031 set_syntax_error (_("offset has different size from base"));
4032 return false;
4033 }
4034 }
4035 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4036 {
4037 set_syntax_error (_("invalid use of 64-bit register offset"));
4038 return false;
4039 }
4040 }
4041 else
4042 {
4043 /* [Xn,#:<reloc_op>:<symbol> */
4044 skip_past_char (&p, '#');
4045 if (skip_past_char (&p, ':'))
4046 {
4047 struct reloc_table_entry *entry;
4048
4049 /* Try to parse a relocation modifier. Anything else is
4050 an error. */
4051 if (!(entry = find_reloc_table_entry (&p)))
4052 {
4053 set_syntax_error (_("unknown relocation modifier"));
4054 return false;
4055 }
4056
4057 if (entry->ldst_type == 0)
4058 {
4059 set_syntax_error
4060 (_("this relocation modifier is not allowed on this "
4061 "instruction"));
4062 return false;
4063 }
4064
4065 /* [Xn,#:<reloc_op>: */
4066 /* We now have the group relocation table entry corresponding to
4067 the name in the assembler source. Next, we parse the
4068 expression. */
4069 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4070 {
4071 set_syntax_error (_("invalid relocation expression"));
4072 return false;
4073 }
4074
4075 /* [Xn,#:<reloc_op>:<expr> */
4076 /* Record the load/store relocation type. */
4077 inst.reloc.type = entry->ldst_type;
4078 inst.reloc.pc_rel = entry->pc_rel;
4079 }
4080 else
4081 {
4082 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4083 {
4084 set_syntax_error (_("invalid expression in the address"));
4085 return false;
4086 }
4087 /* [Xn,<expr> */
4088 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4089 /* [Xn,<expr>,<shifter> */
4090 if (! parse_shift (&p, operand, imm_shift_mode))
4091 return false;
4092 }
4093 }
4094 }
4095
4096 if (! skip_past_char (&p, ']'))
4097 {
4098 set_syntax_error (_("']' expected"));
4099 return false;
4100 }
4101
4102 if (skip_past_char (&p, '!'))
4103 {
4104 if (operand->addr.preind && operand->addr.offset.is_reg)
4105 {
4106 set_syntax_error (_("register offset not allowed in pre-indexed "
4107 "addressing mode"));
4108 return false;
4109 }
4110 /* [Xn]! */
4111 operand->addr.writeback = 1;
4112 }
4113 else if (skip_past_comma (&p))
4114 {
4115 /* [Xn], */
4116 operand->addr.postind = 1;
4117 operand->addr.writeback = 1;
4118
4119 if (operand->addr.preind)
4120 {
4121 set_syntax_error (_("cannot combine pre- and post-indexing"));
4122 return false;
4123 }
4124
4125 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4126 if (reg)
4127 {
4128 /* [Xn],Xm */
4129 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4130 {
4131 set_syntax_error (_("invalid offset register"));
4132 return false;
4133 }
4134
4135 operand->addr.offset.regno = reg->number;
4136 operand->addr.offset.is_reg = 1;
4137 }
4138 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4139 {
4140 /* [Xn],#expr */
4141 set_syntax_error (_("invalid expression in the address"));
4142 return false;
4143 }
4144 }
4145
4146 /* If at this point neither .preind nor .postind is set, we have a
4147 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4148 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4149 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4150 [Zn.<T>, xzr]. */
4151 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4152 {
4153 if (operand->addr.writeback)
4154 {
4155 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4156 {
4157 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4158 operand->addr.offset.is_reg = 0;
4159 operand->addr.offset.imm = 0;
4160 operand->addr.preind = 1;
4161 }
4162 else
4163 {
4164 /* Reject [Rn]! */
4165 set_syntax_error (_("missing offset in the pre-indexed address"));
4166 return false;
4167 }
4168 }
4169 else
4170 {
4171 operand->addr.preind = 1;
4172 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4173 {
4174 operand->addr.offset.is_reg = 1;
4175 operand->addr.offset.regno = REG_ZR;
4176 *offset_qualifier = AARCH64_OPND_QLF_X;
4177 }
4178 else
4179 {
4180 inst.reloc.exp.X_op = O_constant;
4181 inst.reloc.exp.X_add_number = 0;
4182 }
4183 }
4184 }
4185
4186 *str = p;
4187 return true;
4188 }
4189
4190 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4191 on success. */
4192 static bool
parse_address(char ** str,aarch64_opnd_info * operand)4193 parse_address (char **str, aarch64_opnd_info *operand)
4194 {
4195 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4196 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4197 REG_TYPE_R64_SP, REG_TYPE_R_ZR, SHIFTED_NONE);
4198 }
4199
4200 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4201 The arguments have the same meaning as for parse_address_main.
4202 Return TRUE on success. */
4203 static bool
parse_sve_address(char ** str,aarch64_opnd_info * operand,aarch64_opnd_qualifier_t * base_qualifier,aarch64_opnd_qualifier_t * offset_qualifier)4204 parse_sve_address (char **str, aarch64_opnd_info *operand,
4205 aarch64_opnd_qualifier_t *base_qualifier,
4206 aarch64_opnd_qualifier_t *offset_qualifier)
4207 {
4208 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4209 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4210 SHIFTED_MUL_VL);
4211 }
4212
4213 /* Parse a register X0-X30. The register must be 64-bit and register 31
4214 is unallocated. */
4215 static bool
parse_x0_to_x30(char ** str,aarch64_opnd_info * operand)4216 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4217 {
4218 const reg_entry *reg = parse_reg (str);
4219 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4220 {
4221 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4222 return false;
4223 }
4224 operand->reg.regno = reg->number;
4225 operand->qualifier = AARCH64_OPND_QLF_X;
4226 return true;
4227 }
4228
4229 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4230 Return TRUE on success; otherwise return FALSE. */
4231 static bool
parse_half(char ** str,int * internal_fixup_p)4232 parse_half (char **str, int *internal_fixup_p)
4233 {
4234 char *p = *str;
4235
4236 skip_past_char (&p, '#');
4237
4238 gas_assert (internal_fixup_p);
4239 *internal_fixup_p = 0;
4240
4241 if (*p == ':')
4242 {
4243 struct reloc_table_entry *entry;
4244
4245 /* Try to parse a relocation. Anything else is an error. */
4246 ++p;
4247
4248 if (!(entry = find_reloc_table_entry (&p)))
4249 {
4250 set_syntax_error (_("unknown relocation modifier"));
4251 return false;
4252 }
4253
4254 if (entry->movw_type == 0)
4255 {
4256 set_syntax_error
4257 (_("this relocation modifier is not allowed on this instruction"));
4258 return false;
4259 }
4260
4261 inst.reloc.type = entry->movw_type;
4262 }
4263 else
4264 *internal_fixup_p = 1;
4265
4266 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4267 return false;
4268
4269 *str = p;
4270 return true;
4271 }
4272
4273 /* Parse an operand for an ADRP instruction:
4274 ADRP <Xd>, <label>
4275 Return TRUE on success; otherwise return FALSE. */
4276
4277 static bool
parse_adrp(char ** str)4278 parse_adrp (char **str)
4279 {
4280 char *p;
4281
4282 p = *str;
4283 if (*p == ':')
4284 {
4285 struct reloc_table_entry *entry;
4286
4287 /* Try to parse a relocation. Anything else is an error. */
4288 ++p;
4289 if (!(entry = find_reloc_table_entry (&p)))
4290 {
4291 set_syntax_error (_("unknown relocation modifier"));
4292 return false;
4293 }
4294
4295 if (entry->adrp_type == 0)
4296 {
4297 set_syntax_error
4298 (_("this relocation modifier is not allowed on this instruction"));
4299 return false;
4300 }
4301
4302 inst.reloc.type = entry->adrp_type;
4303 }
4304 else
4305 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4306
4307 inst.reloc.pc_rel = 1;
4308 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4309 return false;
4310 *str = p;
4311 return true;
4312 }
4313
4314 /* Miscellaneous. */
4315
4316 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4317 of SIZE tokens in which index I gives the token for field value I,
4318 or is null if field value I is invalid. If the symbolic operand
4319 can also be given as a 0-based integer, REG_TYPE says which register
4320 names should be treated as registers rather than as symbolic immediates
4321 while parsing that integer. REG_TYPE is REG_TYPE_MAX otherwise.
4322
4323 Return true on success, moving *STR past the operand and storing the
4324 field value in *VAL. */
4325
4326 static int
parse_enum_string(char ** str,int64_t * val,const char * const * array,size_t size,aarch64_reg_type reg_type)4327 parse_enum_string (char **str, int64_t *val, const char *const *array,
4328 size_t size, aarch64_reg_type reg_type)
4329 {
4330 expressionS exp;
4331 char *p, *q;
4332 size_t i;
4333
4334 /* Match C-like tokens. */
4335 p = q = *str;
4336 while (ISALNUM (*q))
4337 q++;
4338
4339 for (i = 0; i < size; ++i)
4340 if (array[i]
4341 && strncasecmp (array[i], p, q - p) == 0
4342 && array[i][q - p] == 0)
4343 {
4344 *val = i;
4345 *str = q;
4346 return true;
4347 }
4348
4349 if (reg_type == REG_TYPE_MAX)
4350 return false;
4351
4352 if (!parse_immediate_expression (&p, &exp, reg_type))
4353 return false;
4354
4355 if (exp.X_op == O_constant
4356 && (uint64_t) exp.X_add_number < size)
4357 {
4358 *val = exp.X_add_number;
4359 *str = p;
4360 return true;
4361 }
4362
4363 /* Use the default error for this operand. */
4364 return false;
4365 }
4366
4367 /* Parse an option for a preload instruction. Returns the encoding for the
4368 option, or PARSE_FAIL. */
4369
4370 static int
parse_pldop(char ** str)4371 parse_pldop (char **str)
4372 {
4373 char *p, *q;
4374 const struct aarch64_name_value_pair *o;
4375
4376 p = q = *str;
4377 while (ISALNUM (*q))
4378 q++;
4379
4380 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4381 if (!o)
4382 return PARSE_FAIL;
4383
4384 *str = q;
4385 return o->value;
4386 }
4387
4388 /* Parse an option for a barrier instruction. Returns the encoding for the
4389 option, or PARSE_FAIL. */
4390
4391 static int
parse_barrier(char ** str)4392 parse_barrier (char **str)
4393 {
4394 char *p, *q;
4395 const struct aarch64_name_value_pair *o;
4396
4397 p = q = *str;
4398 while (ISALPHA (*q))
4399 q++;
4400
4401 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4402 if (!o)
4403 return PARSE_FAIL;
4404
4405 *str = q;
4406 return o->value;
4407 }
4408
4409 /* Parse an option for barrier, bti and guarded control stack data
4410 synchronization instructions. Return true on matching the target
4411 options else return false. */
4412
4413 static bool
parse_hint_opt(const char * name,char ** str,const struct aarch64_name_value_pair ** hint_opt)4414 parse_hint_opt (const char *name, char **str,
4415 const struct aarch64_name_value_pair ** hint_opt)
4416 {
4417 char *p, *q;
4418 const struct aarch64_name_value_pair *o;
4419
4420 p = q = *str;
4421 while (ISALPHA (*q))
4422 q++;
4423
4424 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4425 if (!o)
4426 return false;
4427
4428 if ((strcmp ("gcsb", name) == 0 && o->value != HINT_OPD_DSYNC)
4429 || ((strcmp ("psb", name) == 0 || strcmp ("tsb", name) == 0)
4430 && o->value != HINT_OPD_CSYNC)
4431 || ((strcmp ("bti", name) == 0)
4432 && (o->value != HINT_OPD_C && o->value != HINT_OPD_J
4433 && o->value != HINT_OPD_JC)))
4434 return false;
4435
4436 *str = q;
4437 *hint_opt = o;
4438 return true;
4439 }
4440
4441 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4442 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4443 on failure. Format:
4444
4445 REG_TYPE.QUALIFIER
4446
4447 Side effect: Update STR with current parse position of success.
4448
4449 FLAGS is as for parse_typed_reg. */
4450
4451 static const reg_entry *
parse_reg_with_qual(char ** str,aarch64_reg_type reg_type,aarch64_opnd_qualifier_t * qualifier,unsigned int flags)4452 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4453 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4454 {
4455 struct vector_type_el vectype;
4456 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4457 PTR_FULL_REG | flags);
4458 if (!reg)
4459 return NULL;
4460
4461 if (vectype.type == NT_invtype)
4462 *qualifier = AARCH64_OPND_QLF_NIL;
4463 else
4464 {
4465 *qualifier = vectype_to_qualifier (&vectype);
4466 if (*qualifier == AARCH64_OPND_QLF_NIL)
4467 return NULL;
4468 }
4469
4470 return reg;
4471 }
4472
4473 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4474
4475 #<imm>
4476 <imm>
4477
4478 Function return TRUE if immediate was found, or FALSE.
4479 */
4480 static bool
parse_sme_immediate(char ** str,int64_t * imm)4481 parse_sme_immediate (char **str, int64_t *imm)
4482 {
4483 int64_t val;
4484 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4485 return false;
4486
4487 *imm = val;
4488 return true;
4489 }
4490
4491 /* Parse index with selection register and immediate offset:
4492
4493 [<Wv>, <imm>]
4494 [<Wv>, #<imm>]
4495 [<Ws>, <offsf>:<offsl>]
4496
4497 Return true on success, populating OPND with the parsed index. */
4498
4499 static bool
parse_sme_za_index(char ** str,struct aarch64_indexed_za * opnd)4500 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4501 {
4502 const reg_entry *reg;
4503
4504 if (!skip_past_char (str, '['))
4505 {
4506 set_syntax_error (_("expected '['"));
4507 return false;
4508 }
4509
4510 /* The selection register, encoded in the 2-bit Rv field. */
4511 reg = parse_reg (str);
4512 if (reg == NULL || reg->type != REG_TYPE_R_32)
4513 {
4514 set_syntax_error (_("expected a 32-bit selection register"));
4515 return false;
4516 }
4517 opnd->index.regno = reg->number;
4518
4519 if (!skip_past_char (str, ','))
4520 {
4521 set_syntax_error (_("missing immediate offset"));
4522 return false;
4523 }
4524
4525 if (!parse_sme_immediate (str, &opnd->index.imm))
4526 {
4527 set_syntax_error (_("expected a constant immediate offset"));
4528 return false;
4529 }
4530
4531 if (skip_past_char (str, ':'))
4532 {
4533 int64_t end;
4534 if (!parse_sme_immediate (str, &end))
4535 {
4536 set_syntax_error (_("expected a constant immediate offset"));
4537 return false;
4538 }
4539 if (end < opnd->index.imm)
4540 {
4541 set_syntax_error (_("the last offset is less than the"
4542 " first offset"));
4543 return false;
4544 }
4545 if (end == opnd->index.imm)
4546 {
4547 set_syntax_error (_("the last offset is equal to the"
4548 " first offset"));
4549 return false;
4550 }
4551 opnd->index.countm1 = (uint64_t) end - opnd->index.imm;
4552 }
4553
4554 opnd->group_size = 0;
4555 if (skip_past_char (str, ','))
4556 {
4557 if (strncasecmp (*str, "vgx2", 4) == 0 && !ISALPHA ((*str)[4]))
4558 {
4559 *str += 4;
4560 opnd->group_size = 2;
4561 }
4562 else if (strncasecmp (*str, "vgx4", 4) == 0 && !ISALPHA ((*str)[4]))
4563 {
4564 *str += 4;
4565 opnd->group_size = 4;
4566 }
4567 else
4568 {
4569 set_syntax_error (_("invalid vector group size"));
4570 return false;
4571 }
4572 }
4573
4574 if (!skip_past_char (str, ']'))
4575 {
4576 set_syntax_error (_("expected ']'"));
4577 return false;
4578 }
4579
4580 return true;
4581 }
4582
4583 /* Parse a register of type REG_TYPE that might have an element type
4584 qualifier and that is indexed by two values: a 32-bit register,
4585 followed by an immediate. The ranges of the register and the
4586 immediate vary by opcode and are checked in libopcodes.
4587
4588 Return true on success, populating OPND with information about
4589 the operand and setting QUALIFIER to the register qualifier.
4590
4591 Field format examples:
4592
4593 <Pm>.<T>[<Wv>< #<imm>]
4594 ZA[<Wv>, #<imm>]
4595 <ZAn><HV>.<T>[<Wv>, #<imm>]
4596 <ZAn><HV>.<T>[<Ws>, <offsf>:<offsl>]
4597
4598 FLAGS is as for parse_typed_reg. */
4599
4600 static bool
parse_dual_indexed_reg(char ** str,aarch64_reg_type reg_type,struct aarch64_indexed_za * opnd,aarch64_opnd_qualifier_t * qualifier,unsigned int flags)4601 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4602 struct aarch64_indexed_za *opnd,
4603 aarch64_opnd_qualifier_t *qualifier,
4604 unsigned int flags)
4605 {
4606 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4607 if (!reg)
4608 return false;
4609
4610 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4611 opnd->regno = reg->number;
4612
4613 return parse_sme_za_index (str, opnd);
4614 }
4615
4616 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4617 operand. */
4618
4619 static bool
parse_sme_za_hv_tiles_operand_with_braces(char ** str,struct aarch64_indexed_za * opnd,aarch64_opnd_qualifier_t * qualifier)4620 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4621 struct aarch64_indexed_za *opnd,
4622 aarch64_opnd_qualifier_t *qualifier)
4623 {
4624 if (!skip_past_char (str, '{'))
4625 {
4626 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4627 return false;
4628 }
4629
4630 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4631 PTR_IN_REGLIST))
4632 return false;
4633
4634 if (!skip_past_char (str, '}'))
4635 {
4636 set_syntax_error (_("expected '}'"));
4637 return false;
4638 }
4639
4640 return true;
4641 }
4642
4643 /* Parse list of up to eight 64-bit element tile names separated by commas in
4644 SME's ZERO instruction:
4645
4646 ZERO { <mask> }
4647
4648 Function returns <mask>:
4649
4650 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4651 */
4652 static int
parse_sme_zero_mask(char ** str)4653 parse_sme_zero_mask(char **str)
4654 {
4655 char *q;
4656 int mask;
4657 aarch64_opnd_qualifier_t qualifier;
4658 unsigned int ptr_flags = PTR_IN_REGLIST;
4659
4660 mask = 0x00;
4661 q = *str;
4662 do
4663 {
4664 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4665 &qualifier, ptr_flags);
4666 if (!reg)
4667 return PARSE_FAIL;
4668
4669 if (reg->type == REG_TYPE_ZA)
4670 {
4671 if (qualifier != AARCH64_OPND_QLF_NIL)
4672 {
4673 set_syntax_error ("ZA should not have a size suffix");
4674 return PARSE_FAIL;
4675 }
4676 /* { ZA } is assembled as all-ones immediate. */
4677 mask = 0xff;
4678 }
4679 else
4680 {
4681 int regno = reg->number;
4682 if (qualifier == AARCH64_OPND_QLF_S_B)
4683 {
4684 /* { ZA0.B } is assembled as all-ones immediate. */
4685 mask = 0xff;
4686 }
4687 else if (qualifier == AARCH64_OPND_QLF_S_H)
4688 mask |= 0x55 << regno;
4689 else if (qualifier == AARCH64_OPND_QLF_S_S)
4690 mask |= 0x11 << regno;
4691 else if (qualifier == AARCH64_OPND_QLF_S_D)
4692 mask |= 0x01 << regno;
4693 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4694 {
4695 set_syntax_error (_("ZA tile masks do not operate at .Q"
4696 " granularity"));
4697 return PARSE_FAIL;
4698 }
4699 else if (qualifier == AARCH64_OPND_QLF_NIL)
4700 {
4701 set_syntax_error (_("missing ZA tile size"));
4702 return PARSE_FAIL;
4703 }
4704 else
4705 {
4706 set_syntax_error (_("invalid ZA tile"));
4707 return PARSE_FAIL;
4708 }
4709 }
4710 ptr_flags |= PTR_GOOD_MATCH;
4711 }
4712 while (skip_past_char (&q, ','));
4713
4714 *str = q;
4715 return mask;
4716 }
4717
4718 /* Wraps in curly braces <mask> operand ZERO instruction:
4719
4720 ZERO { <mask> }
4721
4722 Function returns value of <mask> bit-field.
4723 */
4724 static int
parse_sme_list_of_64bit_tiles(char ** str)4725 parse_sme_list_of_64bit_tiles (char **str)
4726 {
4727 int regno;
4728
4729 if (!skip_past_char (str, '{'))
4730 {
4731 set_syntax_error (_("expected '{'"));
4732 return PARSE_FAIL;
4733 }
4734
4735 /* Empty <mask> list is an all-zeros immediate. */
4736 if (!skip_past_char (str, '}'))
4737 {
4738 regno = parse_sme_zero_mask (str);
4739 if (regno == PARSE_FAIL)
4740 return PARSE_FAIL;
4741
4742 if (!skip_past_char (str, '}'))
4743 {
4744 set_syntax_error (_("expected '}'"));
4745 return PARSE_FAIL;
4746 }
4747 }
4748 else
4749 regno = 0x00;
4750
4751 return regno;
4752 }
4753
4754 /* Parse streaming mode operand for SMSTART and SMSTOP.
4755
4756 {SM | ZA}
4757
4758 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4759 */
4760 static int
parse_sme_sm_za(char ** str)4761 parse_sme_sm_za (char **str)
4762 {
4763 char *p, *q;
4764
4765 p = q = *str;
4766 while (ISALPHA (*q))
4767 q++;
4768
4769 if ((q - p != 2)
4770 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4771 {
4772 set_syntax_error (_("expected SM or ZA operand"));
4773 return PARSE_FAIL;
4774 }
4775
4776 *str = q;
4777 return TOLOWER (p[0]);
4778 }
4779
4780 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4781 Returns the encoding for the option, or PARSE_FAIL.
4782
4783 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4784 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4785
4786 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4787 field, otherwise as a system register.
4788 */
4789
4790 static int
parse_sys_reg(char ** str,htab_t sys_regs,int imple_defined_p,int pstatefield_p,uint32_t * flags,bool sysreg128_p)4791 parse_sys_reg (char **str, htab_t sys_regs,
4792 int imple_defined_p, int pstatefield_p,
4793 uint32_t* flags, bool sysreg128_p)
4794 {
4795 char *p, *q;
4796 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4797 const aarch64_sys_reg *o;
4798 int value;
4799
4800 p = buf;
4801 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4802 if (p < buf + (sizeof (buf) - 1))
4803 *p++ = TOLOWER (*q);
4804 *p = '\0';
4805
4806 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4807 valid system register. This is enforced by construction of the hash
4808 table. */
4809 if (p - buf != q - *str)
4810 return PARSE_FAIL;
4811
4812 o = str_hash_find (sys_regs, buf);
4813 if (!o)
4814 {
4815 if (!imple_defined_p)
4816 return PARSE_FAIL;
4817 else
4818 {
4819 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4820 unsigned int op0, op1, cn, cm, op2;
4821
4822 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4823 != 5)
4824 return PARSE_FAIL;
4825 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4826 return PARSE_FAIL;
4827 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4828 if (flags)
4829 *flags = 0;
4830 }
4831 }
4832 else
4833 {
4834 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4835 as_bad (_("selected processor does not support PSTATE field "
4836 "name '%s'"), buf);
4837 if (!pstatefield_p
4838 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4839 o->flags, &o->features))
4840 as_bad (_("selected processor does not support system register "
4841 "name '%s'"), buf);
4842 if (sysreg128_p && !aarch64_sys_reg_128bit_p (o->flags))
4843 as_bad (_("128-bit-wide accsess not allowed on selected system"
4844 " register '%s'"), buf);
4845 if (aarch64_sys_reg_deprecated_p (o->flags))
4846 as_warn (_("system register name '%s' is deprecated and may be "
4847 "removed in a future release"), buf);
4848 value = o->value;
4849 if (flags)
4850 *flags = o->flags;
4851 }
4852
4853 *str = q;
4854 return value;
4855 }
4856
4857 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4858 for the option, or NULL. */
4859
4860 static const aarch64_sys_ins_reg *
parse_sys_ins_reg(char ** str,htab_t sys_ins_regs,bool sysreg128_p)4861 parse_sys_ins_reg (char **str, htab_t sys_ins_regs, bool sysreg128_p)
4862 {
4863 char *p, *q;
4864 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4865 const aarch64_sys_ins_reg *o;
4866
4867 p = buf;
4868 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4869 if (p < buf + (sizeof (buf) - 1))
4870 *p++ = TOLOWER (*q);
4871 *p = '\0';
4872
4873 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4874 valid system register. This is enforced by construction of the hash
4875 table. */
4876 if (p - buf != q - *str)
4877 return NULL;
4878
4879 o = str_hash_find (sys_ins_regs, buf);
4880 if (!o || (sysreg128_p && !aarch64_sys_reg_128bit_p (o->flags)))
4881 return NULL;
4882
4883 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4884 o->name, o->flags, &o->features))
4885 as_bad (_("selected processor does not support system register "
4886 "name '%s'"), buf);
4887 if (aarch64_sys_reg_deprecated_p (o->flags))
4888 as_warn (_("system register name '%s' is deprecated and may be "
4889 "removed in a future release"), buf);
4890
4891 *str = q;
4892 return o;
4893 }
4894
4895 #define po_char_or_fail(chr) do { \
4896 if (! skip_past_char (&str, chr)) \
4897 goto failure; \
4898 } while (0)
4899
4900 #define po_reg_or_fail(regtype) do { \
4901 reg = aarch64_reg_parse (&str, regtype, NULL); \
4902 if (!reg) \
4903 goto failure; \
4904 } while (0)
4905
4906 #define po_int_fp_reg_or_fail(reg_type) do { \
4907 reg = parse_reg (&str); \
4908 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4909 { \
4910 set_expected_reg_error (reg_type, reg, 0); \
4911 goto failure; \
4912 } \
4913 info->reg.regno = reg->number; \
4914 info->qualifier = inherent_reg_qualifier (reg); \
4915 } while (0)
4916
4917 #define po_imm_nc_or_fail() do { \
4918 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4919 goto failure; \
4920 } while (0)
4921
4922 #define po_imm_or_fail(min, max) do { \
4923 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4924 goto failure; \
4925 if (val < min || val > max) \
4926 { \
4927 set_fatal_syntax_error (_("immediate value out of range "\
4928 #min " to "#max)); \
4929 goto failure; \
4930 } \
4931 } while (0)
4932
4933 #define po_enum_or_fail(array) do { \
4934 if (!parse_enum_string (&str, &val, array, \
4935 ARRAY_SIZE (array), imm_reg_type)) \
4936 goto failure; \
4937 } while (0)
4938
4939 #define po_strict_enum_or_fail(array) do { \
4940 if (!parse_enum_string (&str, &val, array, \
4941 ARRAY_SIZE (array), REG_TYPE_MAX)) \
4942 goto failure; \
4943 } while (0)
4944
4945 #define po_misc_or_fail(expr) do { \
4946 if (!expr) \
4947 goto failure; \
4948 } while (0)
4949
4950 /* A primitive log calculator. */
4951
4952 static inline unsigned int
get_log2(unsigned int n)4953 get_log2 (unsigned int n)
4954 {
4955 unsigned int count = 0;
4956 while (n > 1)
4957 {
4958 n >>= 1;
4959 count += 1;
4960 }
4961 return count;
4962 }
4963
4964 /* encode the 12-bit imm field of Add/sub immediate */
4965 static inline uint32_t
encode_addsub_imm(uint32_t imm)4966 encode_addsub_imm (uint32_t imm)
4967 {
4968 return imm << 10;
4969 }
4970
4971 /* encode the shift amount field of Add/sub immediate */
4972 static inline uint32_t
encode_addsub_imm_shift_amount(uint32_t cnt)4973 encode_addsub_imm_shift_amount (uint32_t cnt)
4974 {
4975 return cnt << 22;
4976 }
4977
4978
4979 /* encode the imm field of Adr instruction */
4980 static inline uint32_t
encode_adr_imm(uint32_t imm)4981 encode_adr_imm (uint32_t imm)
4982 {
4983 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4984 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4985 }
4986
4987 /* encode the immediate field of Move wide immediate */
4988 static inline uint32_t
encode_movw_imm(uint32_t imm)4989 encode_movw_imm (uint32_t imm)
4990 {
4991 return imm << 5;
4992 }
4993
4994 /* encode the 26-bit offset of unconditional branch */
4995 static inline uint32_t
encode_branch_ofs_26(uint32_t ofs)4996 encode_branch_ofs_26 (uint32_t ofs)
4997 {
4998 return ofs & ((1 << 26) - 1);
4999 }
5000
5001 /* encode the 19-bit offset of conditional branch and compare & branch */
5002 static inline uint32_t
encode_cond_branch_ofs_19(uint32_t ofs)5003 encode_cond_branch_ofs_19 (uint32_t ofs)
5004 {
5005 return (ofs & ((1 << 19) - 1)) << 5;
5006 }
5007
5008 /* encode the 19-bit offset of ld literal */
5009 static inline uint32_t
encode_ld_lit_ofs_19(uint32_t ofs)5010 encode_ld_lit_ofs_19 (uint32_t ofs)
5011 {
5012 return (ofs & ((1 << 19) - 1)) << 5;
5013 }
5014
5015 /* Encode the 14-bit offset of test & branch. */
5016 static inline uint32_t
encode_tst_branch_ofs_14(uint32_t ofs)5017 encode_tst_branch_ofs_14 (uint32_t ofs)
5018 {
5019 return (ofs & ((1 << 14) - 1)) << 5;
5020 }
5021
5022 /* Encode the 16-bit imm field of svc/hvc/smc. */
5023 static inline uint32_t
encode_svc_imm(uint32_t imm)5024 encode_svc_imm (uint32_t imm)
5025 {
5026 return imm << 5;
5027 }
5028
5029 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5030 static inline uint32_t
reencode_addsub_switch_add_sub(uint32_t opcode)5031 reencode_addsub_switch_add_sub (uint32_t opcode)
5032 {
5033 return opcode ^ (1 << 30);
5034 }
5035
5036 static inline uint32_t
reencode_movzn_to_movz(uint32_t opcode)5037 reencode_movzn_to_movz (uint32_t opcode)
5038 {
5039 return opcode | (1 << 30);
5040 }
5041
5042 static inline uint32_t
reencode_movzn_to_movn(uint32_t opcode)5043 reencode_movzn_to_movn (uint32_t opcode)
5044 {
5045 return opcode & ~(1 << 30);
5046 }
5047
5048 /* Overall per-instruction processing. */
5049
5050 /* We need to be able to fix up arbitrary expressions in some statements.
5051 This is so that we can handle symbols that are an arbitrary distance from
5052 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5053 which returns part of an address in a form which will be valid for
5054 a data instruction. We do this by pushing the expression into a symbol
5055 in the expr_section, and creating a fix for that. */
5056
5057 static fixS *
fix_new_aarch64(fragS * frag,int where,short int size,expressionS * exp,int pc_rel,int reloc)5058 fix_new_aarch64 (fragS * frag,
5059 int where,
5060 short int size,
5061 expressionS * exp,
5062 int pc_rel,
5063 int reloc)
5064 {
5065 fixS *new_fix;
5066
5067 switch (exp->X_op)
5068 {
5069 case O_constant:
5070 case O_symbol:
5071 case O_add:
5072 case O_subtract:
5073 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5074 break;
5075
5076 default:
5077 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5078 pc_rel, reloc);
5079 break;
5080 }
5081 return new_fix;
5082 }
5083
5084 /* Diagnostics on operands errors. */
5085
5086 /* By default, output verbose error message.
5087 Disable the verbose error message by -mno-verbose-error. */
5088 static int verbose_error_p = 1;
5089
5090 #ifdef DEBUG_AARCH64
5091 /* N.B. this is only for the purpose of debugging. */
5092 const char* operand_mismatch_kind_names[] =
5093 {
5094 "AARCH64_OPDE_NIL",
5095 "AARCH64_OPDE_RECOVERABLE",
5096 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5097 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5098 "AARCH64_OPDE_SYNTAX_ERROR",
5099 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5100 "AARCH64_OPDE_INVALID_VARIANT",
5101 "AARCH64_OPDE_INVALID_VG_SIZE",
5102 "AARCH64_OPDE_REG_LIST_LENGTH",
5103 "AARCH64_OPDE_REG_LIST_STRIDE",
5104 "AARCH64_OPDE_UNTIED_IMMS",
5105 "AARCH64_OPDE_UNTIED_OPERAND",
5106 "AARCH64_OPDE_OUT_OF_RANGE",
5107 "AARCH64_OPDE_UNALIGNED",
5108 "AARCH64_OPDE_OTHER_ERROR",
5109 "AARCH64_OPDE_INVALID_REGNO",
5110 };
5111 #endif /* DEBUG_AARCH64 */
5112
5113 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5114
5115 When multiple errors of different kinds are found in the same assembly
5116 line, only the error of the highest severity will be picked up for
5117 issuing the diagnostics. */
5118
5119 static inline bool
operand_error_higher_severity_p(enum aarch64_operand_error_kind lhs,enum aarch64_operand_error_kind rhs)5120 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5121 enum aarch64_operand_error_kind rhs)
5122 {
5123 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5124 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5125 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5126 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5127 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5128 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5129 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5130 gas_assert (AARCH64_OPDE_INVALID_VG_SIZE > AARCH64_OPDE_INVALID_VARIANT);
5131 gas_assert (AARCH64_OPDE_REG_LIST_LENGTH > AARCH64_OPDE_INVALID_VG_SIZE);
5132 gas_assert (AARCH64_OPDE_REG_LIST_STRIDE > AARCH64_OPDE_REG_LIST_LENGTH);
5133 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST_STRIDE);
5134 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5135 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST_STRIDE);
5136 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5137 return lhs > rhs;
5138 }
5139
5140 /* Helper routine to get the mnemonic name from the assembly instruction
5141 line; should only be called for the diagnosis purpose, as there is
5142 string copy operation involved, which may affect the runtime
5143 performance if used in elsewhere. */
5144
5145 static const char*
get_mnemonic_name(const char * str)5146 get_mnemonic_name (const char *str)
5147 {
5148 static char mnemonic[32];
5149 char *ptr;
5150
5151 /* Get the first 15 bytes and assume that the full name is included. */
5152 strncpy (mnemonic, str, 31);
5153 mnemonic[31] = '\0';
5154
5155 /* Scan up to the end of the mnemonic, which must end in white space,
5156 '.', or end of string. */
5157 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5158 ;
5159
5160 *ptr = '\0';
5161
5162 /* Append '...' to the truncated long name. */
5163 if (ptr - mnemonic == 31)
5164 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5165
5166 return mnemonic;
5167 }
5168
5169 static void
reset_aarch64_instruction(aarch64_instruction * instruction)5170 reset_aarch64_instruction (aarch64_instruction *instruction)
5171 {
5172 memset (instruction, '\0', sizeof (aarch64_instruction));
5173 instruction->reloc.type = BFD_RELOC_UNUSED;
5174 }
5175
5176 /* Data structures storing one user error in the assembly code related to
5177 operands. */
5178
5179 struct operand_error_record
5180 {
5181 const aarch64_opcode *opcode;
5182 aarch64_operand_error detail;
5183 struct operand_error_record *next;
5184 };
5185
5186 typedef struct operand_error_record operand_error_record;
5187
5188 struct operand_errors
5189 {
5190 operand_error_record *head;
5191 operand_error_record *tail;
5192 };
5193
5194 typedef struct operand_errors operand_errors;
5195
5196 /* Top-level data structure reporting user errors for the current line of
5197 the assembly code.
5198 The way md_assemble works is that all opcodes sharing the same mnemonic
5199 name are iterated to find a match to the assembly line. In this data
5200 structure, each of the such opcodes will have one operand_error_record
5201 allocated and inserted. In other words, excessive errors related with
5202 a single opcode are disregarded. */
5203 operand_errors operand_error_report;
5204
5205 /* Free record nodes. */
5206 static operand_error_record *free_opnd_error_record_nodes = NULL;
5207
5208 /* Initialize the data structure that stores the operand mismatch
5209 information on assembling one line of the assembly code. */
5210 static void
init_operand_error_report(void)5211 init_operand_error_report (void)
5212 {
5213 if (operand_error_report.head != NULL)
5214 {
5215 gas_assert (operand_error_report.tail != NULL);
5216 operand_error_report.tail->next = free_opnd_error_record_nodes;
5217 free_opnd_error_record_nodes = operand_error_report.head;
5218 operand_error_report.head = NULL;
5219 operand_error_report.tail = NULL;
5220 return;
5221 }
5222 gas_assert (operand_error_report.tail == NULL);
5223 }
5224
5225 /* Return TRUE if some operand error has been recorded during the
5226 parsing of the current assembly line using the opcode *OPCODE;
5227 otherwise return FALSE. */
5228 static inline bool
opcode_has_operand_error_p(const aarch64_opcode * opcode)5229 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5230 {
5231 operand_error_record *record = operand_error_report.head;
5232 return record && record->opcode == opcode;
5233 }
5234
5235 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5236 OPCODE field is initialized with OPCODE.
5237 N.B. only one record for each opcode, i.e. the maximum of one error is
5238 recorded for each instruction template. */
5239
5240 static void
add_operand_error_record(const operand_error_record * new_record)5241 add_operand_error_record (const operand_error_record* new_record)
5242 {
5243 const aarch64_opcode *opcode = new_record->opcode;
5244 operand_error_record* record = operand_error_report.head;
5245
5246 /* The record may have been created for this opcode. If not, we need
5247 to prepare one. */
5248 if (! opcode_has_operand_error_p (opcode))
5249 {
5250 /* Get one empty record. */
5251 if (free_opnd_error_record_nodes == NULL)
5252 {
5253 record = XNEW (operand_error_record);
5254 }
5255 else
5256 {
5257 record = free_opnd_error_record_nodes;
5258 free_opnd_error_record_nodes = record->next;
5259 }
5260 record->opcode = opcode;
5261 /* Insert at the head. */
5262 record->next = operand_error_report.head;
5263 operand_error_report.head = record;
5264 if (operand_error_report.tail == NULL)
5265 operand_error_report.tail = record;
5266 }
5267 else if (record->detail.kind != AARCH64_OPDE_NIL
5268 && record->detail.index <= new_record->detail.index
5269 && operand_error_higher_severity_p (record->detail.kind,
5270 new_record->detail.kind))
5271 {
5272 /* In the case of multiple errors found on operands related with a
5273 single opcode, only record the error of the leftmost operand and
5274 only if the error is of higher severity. */
5275 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5276 " the existing error %s on operand %d",
5277 operand_mismatch_kind_names[new_record->detail.kind],
5278 new_record->detail.index,
5279 operand_mismatch_kind_names[record->detail.kind],
5280 record->detail.index);
5281 return;
5282 }
5283
5284 record->detail = new_record->detail;
5285 }
5286
5287 static inline void
record_operand_error_info(const aarch64_opcode * opcode,aarch64_operand_error * error_info)5288 record_operand_error_info (const aarch64_opcode *opcode,
5289 aarch64_operand_error *error_info)
5290 {
5291 operand_error_record record;
5292 record.opcode = opcode;
5293 record.detail = *error_info;
5294 add_operand_error_record (&record);
5295 }
5296
5297 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5298 error message *ERROR, for operand IDX (count from 0). */
5299
5300 static void
record_operand_error(const aarch64_opcode * opcode,int idx,enum aarch64_operand_error_kind kind,const char * error)5301 record_operand_error (const aarch64_opcode *opcode, int idx,
5302 enum aarch64_operand_error_kind kind,
5303 const char* error)
5304 {
5305 aarch64_operand_error info;
5306 memset(&info, 0, sizeof (info));
5307 info.index = idx;
5308 info.kind = kind;
5309 info.error = error;
5310 info.non_fatal = false;
5311 record_operand_error_info (opcode, &info);
5312 }
5313
5314 static void
record_operand_error_with_data(const aarch64_opcode * opcode,int idx,enum aarch64_operand_error_kind kind,const char * error,const int * extra_data)5315 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5316 enum aarch64_operand_error_kind kind,
5317 const char* error, const int *extra_data)
5318 {
5319 aarch64_operand_error info;
5320 info.index = idx;
5321 info.kind = kind;
5322 info.error = error;
5323 info.data[0].i = extra_data[0];
5324 info.data[1].i = extra_data[1];
5325 info.data[2].i = extra_data[2];
5326 info.non_fatal = false;
5327 record_operand_error_info (opcode, &info);
5328 }
5329
5330 static void
record_operand_out_of_range_error(const aarch64_opcode * opcode,int idx,const char * error,int lower_bound,int upper_bound)5331 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5332 const char* error, int lower_bound,
5333 int upper_bound)
5334 {
5335 int data[3] = {lower_bound, upper_bound, 0};
5336 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5337 error, data);
5338 }
5339
5340 /* Remove the operand error record for *OPCODE. */
5341 static void ATTRIBUTE_UNUSED
remove_operand_error_record(const aarch64_opcode * opcode)5342 remove_operand_error_record (const aarch64_opcode *opcode)
5343 {
5344 if (opcode_has_operand_error_p (opcode))
5345 {
5346 operand_error_record* record = operand_error_report.head;
5347 gas_assert (record != NULL && operand_error_report.tail != NULL);
5348 operand_error_report.head = record->next;
5349 record->next = free_opnd_error_record_nodes;
5350 free_opnd_error_record_nodes = record;
5351 if (operand_error_report.head == NULL)
5352 {
5353 gas_assert (operand_error_report.tail == record);
5354 operand_error_report.tail = NULL;
5355 }
5356 }
5357 }
5358
5359 /* Given the instruction in *INSTR, return the index of the best matched
5360 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5361
5362 Return -1 if there is no qualifier sequence; return the first match
5363 if there is multiple matches found. */
5364
5365 static int
find_best_match(const aarch64_inst * instr,const aarch64_opnd_qualifier_seq_t * qualifiers_list)5366 find_best_match (const aarch64_inst *instr,
5367 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5368 {
5369 int i, num_opnds, max_num_matched, idx;
5370
5371 num_opnds = aarch64_num_of_operands (instr->opcode);
5372 if (num_opnds == 0)
5373 {
5374 DEBUG_TRACE ("no operand");
5375 return -1;
5376 }
5377
5378 max_num_matched = 0;
5379 idx = 0;
5380
5381 /* For each pattern. */
5382 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5383 {
5384 int j, num_matched;
5385 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5386
5387 /* Most opcodes has much fewer patterns in the list. */
5388 if (empty_qualifier_sequence_p (qualifiers))
5389 {
5390 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5391 break;
5392 }
5393
5394 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5395 if (*qualifiers == instr->operands[j].qualifier)
5396 ++num_matched;
5397
5398 if (num_matched > max_num_matched)
5399 {
5400 max_num_matched = num_matched;
5401 idx = i;
5402 }
5403 }
5404
5405 DEBUG_TRACE ("return with %d", idx);
5406 return idx;
5407 }
5408
5409 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5410 corresponding operands in *INSTR. */
5411
5412 static inline void
assign_qualifier_sequence(aarch64_inst * instr,const aarch64_opnd_qualifier_t * qualifiers)5413 assign_qualifier_sequence (aarch64_inst *instr,
5414 const aarch64_opnd_qualifier_t *qualifiers)
5415 {
5416 int i = 0;
5417 int num_opnds = aarch64_num_of_operands (instr->opcode);
5418 gas_assert (num_opnds);
5419 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5420 instr->operands[i].qualifier = *qualifiers;
5421 }
5422
5423 /* Callback used by aarch64_print_operand to apply STYLE to the
5424 disassembler output created from FMT and ARGS. The STYLER object holds
5425 any required state. Must return a pointer to a string (created from FMT
5426 and ARGS) that will continue to be valid until the complete disassembled
5427 instruction has been printed.
5428
5429 We don't currently add any styling to the output of the disassembler as
5430 used within assembler error messages, and so STYLE is ignored here. A
5431 new string is allocated on the obstack help within STYLER and returned
5432 to the caller. */
5433
aarch64_apply_style(struct aarch64_styler * styler,enum disassembler_style style ATTRIBUTE_UNUSED,const char * fmt,va_list args)5434 static const char *aarch64_apply_style
5435 (struct aarch64_styler *styler,
5436 enum disassembler_style style ATTRIBUTE_UNUSED,
5437 const char *fmt, va_list args)
5438 {
5439 int res;
5440 char *ptr;
5441 struct obstack *stack = (struct obstack *) styler->state;
5442 va_list ap;
5443
5444 /* Calculate the required space. */
5445 va_copy (ap, args);
5446 res = vsnprintf (NULL, 0, fmt, ap);
5447 va_end (ap);
5448 gas_assert (res >= 0);
5449
5450 /* Allocate space on the obstack and format the result. */
5451 ptr = (char *) obstack_alloc (stack, res + 1);
5452 res = vsnprintf (ptr, (res + 1), fmt, args);
5453 gas_assert (res >= 0);
5454
5455 return ptr;
5456 }
5457
5458 /* Print operands for the diagnosis purpose. */
5459
5460 static void
print_operands(char * buf,const aarch64_opcode * opcode,const aarch64_opnd_info * opnds)5461 print_operands (char *buf, const aarch64_opcode *opcode,
5462 const aarch64_opnd_info *opnds)
5463 {
5464 int i;
5465 struct aarch64_styler styler;
5466 struct obstack content;
5467 obstack_init (&content);
5468
5469 styler.apply_style = aarch64_apply_style;
5470 styler.state = (void *) &content;
5471
5472 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5473 {
5474 char str[128];
5475 char cmt[128];
5476
5477 /* We regard the opcode operand info more, however we also look into
5478 the inst->operands to support the disassembling of the optional
5479 operand.
5480 The two operand code should be the same in all cases, apart from
5481 when the operand can be optional. */
5482 if (opcode->operands[i] == AARCH64_OPND_NIL
5483 || opnds[i].type == AARCH64_OPND_NIL)
5484 break;
5485
5486 /* Generate the operand string in STR. */
5487 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5488 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5489
5490 /* Delimiter. */
5491 if (str[0] != '\0')
5492 strcat (buf, i == 0 ? " " : ", ");
5493
5494 /* Append the operand string. */
5495 strcat (buf, str);
5496
5497 /* Append a comment. This works because only the last operand ever
5498 adds a comment. If that ever changes then we'll need to be
5499 smarter here. */
5500 if (cmt[0] != '\0')
5501 {
5502 strcat (buf, "\t// ");
5503 strcat (buf, cmt);
5504 }
5505 }
5506
5507 obstack_free (&content, NULL);
5508 }
5509
5510 /* Send to stderr a string as information. */
5511
5512 static void
output_info(const char * format,...)5513 output_info (const char *format, ...)
5514 {
5515 const char *file;
5516 unsigned int line;
5517 va_list args;
5518
5519 file = as_where (&line);
5520 if (file)
5521 {
5522 if (line != 0)
5523 fprintf (stderr, "%s:%u: ", file, line);
5524 else
5525 fprintf (stderr, "%s: ", file);
5526 }
5527 fprintf (stderr, _("Info: "));
5528 va_start (args, format);
5529 vfprintf (stderr, format, args);
5530 va_end (args);
5531 (void) putc ('\n', stderr);
5532 }
5533
5534 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5535 relates to registers or register lists. If so, return a string that
5536 reports the error against "operand %d", otherwise return null. */
5537
5538 static const char *
get_reg_error_message(const aarch64_operand_error * detail)5539 get_reg_error_message (const aarch64_operand_error *detail)
5540 {
5541 /* Handle the case where we found a register that was expected
5542 to be in a register list outside of a register list. */
5543 if ((detail->data[1].i & detail->data[2].i) != 0
5544 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5545 return _("missing braces at operand %d");
5546
5547 /* If some opcodes expected a register, and we found a register,
5548 complain about the difference. */
5549 if (detail->data[2].i)
5550 {
5551 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5552 ? detail->data[1].i & ~SEF_IN_REGLIST
5553 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5554 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5555 if (!msg)
5556 msg = N_("unexpected register type at operand %d");
5557 return msg;
5558 }
5559
5560 /* Handle the case where we got to the point of trying to parse a
5561 register within a register list, but didn't find a known register. */
5562 if (detail->data[1].i & SEF_IN_REGLIST)
5563 {
5564 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5565 const char *msg = get_reg_expected_msg (expected, 0);
5566 if (!msg)
5567 msg = _("invalid register list at operand %d");
5568 return msg;
5569 }
5570
5571 /* Punt if register-related problems weren't the only errors. */
5572 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5573 return NULL;
5574
5575 /* Handle the case where the only acceptable things are registers. */
5576 if (detail->data[1].i == 0)
5577 {
5578 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5579 if (!msg)
5580 msg = _("expected a register at operand %d");
5581 return msg;
5582 }
5583
5584 /* Handle the case where the only acceptable things are register lists,
5585 and there was no opening '{'. */
5586 if (detail->data[0].i == 0)
5587 return _("expected '{' at operand %d");
5588
5589 return _("expected a register or register list at operand %d");
5590 }
5591
5592 /* Output one operand error record. */
5593
5594 static void
output_operand_error_record(const operand_error_record * record,char * str)5595 output_operand_error_record (const operand_error_record *record, char *str)
5596 {
5597 const aarch64_operand_error *detail = &record->detail;
5598 int idx = detail->index;
5599 const aarch64_opcode *opcode = record->opcode;
5600 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5601 : AARCH64_OPND_NIL);
5602
5603 typedef void (*handler_t)(const char *format, ...);
5604 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5605 const char *msg = detail->error;
5606
5607 switch (detail->kind)
5608 {
5609 case AARCH64_OPDE_NIL:
5610 gas_assert (0);
5611 break;
5612
5613 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5614 handler (_("this `%s' should have an immediately preceding `%s'"
5615 " -- `%s'"),
5616 detail->data[0].s, detail->data[1].s, str);
5617 break;
5618
5619 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5620 handler (_("the preceding `%s' should be followed by `%s` rather"
5621 " than `%s` -- `%s'"),
5622 detail->data[1].s, detail->data[0].s, opcode->name, str);
5623 break;
5624
5625 case AARCH64_OPDE_SYNTAX_ERROR:
5626 if (!msg && idx >= 0)
5627 {
5628 msg = get_reg_error_message (detail);
5629 if (msg)
5630 {
5631 char *full_msg = xasprintf (msg, idx + 1);
5632 handler (_("%s -- `%s'"), full_msg, str);
5633 free (full_msg);
5634 break;
5635 }
5636 }
5637 /* Fall through. */
5638
5639 case AARCH64_OPDE_RECOVERABLE:
5640 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5641 case AARCH64_OPDE_OTHER_ERROR:
5642 /* Use the prepared error message if there is, otherwise use the
5643 operand description string to describe the error. */
5644 if (msg != NULL)
5645 {
5646 if (idx < 0)
5647 handler (_("%s -- `%s'"), msg, str);
5648 else
5649 handler (_("%s at operand %d -- `%s'"),
5650 msg, idx + 1, str);
5651 }
5652 else
5653 {
5654 gas_assert (idx >= 0);
5655 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5656 aarch64_get_operand_desc (opd_code), str);
5657 }
5658 break;
5659
5660 case AARCH64_OPDE_INVALID_VARIANT:
5661 handler (_("operand mismatch -- `%s'"), str);
5662 if (verbose_error_p)
5663 {
5664 /* We will try to correct the erroneous instruction and also provide
5665 more information e.g. all other valid variants.
5666
5667 The string representation of the corrected instruction and other
5668 valid variants are generated by
5669
5670 1) obtaining the intermediate representation of the erroneous
5671 instruction;
5672 2) manipulating the IR, e.g. replacing the operand qualifier;
5673 3) printing out the instruction by calling the printer functions
5674 shared with the disassembler.
5675
5676 The limitation of this method is that the exact input assembly
5677 line cannot be accurately reproduced in some cases, for example an
5678 optional operand present in the actual assembly line will be
5679 omitted in the output; likewise for the optional syntax rules,
5680 e.g. the # before the immediate. Another limitation is that the
5681 assembly symbols and relocation operations in the assembly line
5682 currently cannot be printed out in the error report. Last but not
5683 least, when there is other error(s) co-exist with this error, the
5684 'corrected' instruction may be still incorrect, e.g. given
5685 'ldnp h0,h1,[x0,#6]!'
5686 this diagnosis will provide the version:
5687 'ldnp s0,s1,[x0,#6]!'
5688 which is still not right. */
5689 size_t len = strlen (get_mnemonic_name (str));
5690 int i, qlf_idx;
5691 bool result;
5692 char buf[2048];
5693 aarch64_inst *inst_base = &inst.base;
5694 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5695
5696 /* Init inst. */
5697 reset_aarch64_instruction (&inst);
5698 inst_base->opcode = opcode;
5699
5700 /* Reset the error report so that there is no side effect on the
5701 following operand parsing. */
5702 init_operand_error_report ();
5703
5704 /* Fill inst. */
5705 result = parse_operands (str + len, opcode)
5706 && programmer_friendly_fixup (&inst);
5707 gas_assert (result);
5708 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5709 NULL, NULL, insn_sequence);
5710 gas_assert (!result);
5711
5712 /* Find the most matched qualifier sequence. */
5713 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5714 gas_assert (qlf_idx > -1);
5715
5716 /* Assign the qualifiers. */
5717 assign_qualifier_sequence (inst_base,
5718 opcode->qualifiers_list[qlf_idx]);
5719
5720 /* Print the hint. */
5721 output_info (_(" did you mean this?"));
5722 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5723 print_operands (buf, opcode, inst_base->operands);
5724 output_info (_(" %s"), buf);
5725
5726 /* Print out other variant(s) if there is any. */
5727 if (qlf_idx != 0 ||
5728 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5729 output_info (_(" other valid variant(s):"));
5730
5731 /* For each pattern. */
5732 qualifiers_list = opcode->qualifiers_list;
5733 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5734 {
5735 /* Most opcodes has much fewer patterns in the list.
5736 First NIL qualifier indicates the end in the list. */
5737 if (empty_qualifier_sequence_p (*qualifiers_list))
5738 break;
5739
5740 if (i != qlf_idx)
5741 {
5742 /* Mnemonics name. */
5743 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5744
5745 /* Assign the qualifiers. */
5746 assign_qualifier_sequence (inst_base, *qualifiers_list);
5747
5748 /* Print instruction. */
5749 print_operands (buf, opcode, inst_base->operands);
5750
5751 output_info (_(" %s"), buf);
5752 }
5753 }
5754 }
5755 break;
5756
5757 case AARCH64_OPDE_UNTIED_IMMS:
5758 handler (_("operand %d must have the same immediate value "
5759 "as operand 1 -- `%s'"),
5760 detail->index + 1, str);
5761 break;
5762
5763 case AARCH64_OPDE_UNTIED_OPERAND:
5764 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5765 detail->index + 1, str);
5766 break;
5767
5768 case AARCH64_OPDE_INVALID_REGNO:
5769 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5770 detail->data[0].s, detail->data[1].i,
5771 detail->data[0].s, detail->data[2].i, idx + 1, str);
5772 break;
5773
5774 case AARCH64_OPDE_OUT_OF_RANGE:
5775 if (detail->data[0].i != detail->data[1].i)
5776 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5777 msg ? msg : _("immediate value"),
5778 detail->data[0].i, detail->data[1].i, idx + 1, str);
5779 else
5780 handler (_("%s must be %d at operand %d -- `%s'"),
5781 msg ? msg : _("immediate value"),
5782 detail->data[0].i, idx + 1, str);
5783 break;
5784
5785 case AARCH64_OPDE_INVALID_VG_SIZE:
5786 if (detail->data[0].i == 0)
5787 handler (_("unexpected vector group size at operand %d -- `%s'"),
5788 idx + 1, str);
5789 else
5790 handler (_("operand %d must have a vector group size of %d -- `%s'"),
5791 idx + 1, detail->data[0].i, str);
5792 break;
5793
5794 case AARCH64_OPDE_REG_LIST_LENGTH:
5795 if (detail->data[0].i == (1 << 1))
5796 handler (_("expected a single-register list at operand %d -- `%s'"),
5797 idx + 1, str);
5798 else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
5799 handler (_("expected a list of %d registers at operand %d -- `%s'"),
5800 get_log2 (detail->data[0].i), idx + 1, str);
5801 else if (detail->data[0].i == 0x14)
5802 handler (_("expected a list of %d or %d registers at"
5803 " operand %d -- `%s'"),
5804 2, 4, idx + 1, str);
5805 else
5806 handler (_("invalid number of registers in the list"
5807 " at operand %d -- `%s'"), idx + 1, str);
5808 break;
5809
5810 case AARCH64_OPDE_REG_LIST_STRIDE:
5811 if (detail->data[0].i == (1 << 1))
5812 handler (_("the register list must have a stride of %d"
5813 " at operand %d -- `%s'"), 1, idx + 1, str);
5814 else if (detail->data[0].i == 0x12 || detail->data[0].i == 0x102)
5815 handler (_("the register list must have a stride of %d or %d"
5816 " at operand %d -- `%s`"), 1,
5817 detail->data[0].i == 0x12 ? 4 : 8, idx + 1, str);
5818 else
5819 handler (_("invalid register stride at operand %d -- `%s'"),
5820 idx + 1, str);
5821 break;
5822
5823 case AARCH64_OPDE_UNALIGNED:
5824 handler (_("immediate value must be a multiple of "
5825 "%d at operand %d -- `%s'"),
5826 detail->data[0].i, idx + 1, str);
5827 break;
5828
5829 default:
5830 gas_assert (0);
5831 break;
5832 }
5833 }
5834
5835 /* Return true if the presence of error A against an instruction means
5836 that error B should not be reported. This is only used as a first pass,
5837 to pick the kind of error that we should report. */
5838
5839 static bool
better_error_p(operand_error_record * a,operand_error_record * b)5840 better_error_p (operand_error_record *a, operand_error_record *b)
5841 {
5842 /* For errors reported during parsing, prefer errors that relate to
5843 later operands, since that implies that the earlier operands were
5844 syntactically valid.
5845
5846 For example, if we see a register R instead of an immediate in
5847 operand N, we'll report that as a recoverable "immediate operand
5848 required" error. This is because there is often another opcode
5849 entry that accepts a register operand N, and any errors about R
5850 should be reported against the register forms of the instruction.
5851 But if no such register form exists, the recoverable error should
5852 still win over a syntax error against operand N-1.
5853
5854 For these purposes, count an error reported at the end of the
5855 assembly string as equivalent to an error reported against the
5856 final operand. This means that opcode entries that expect more
5857 operands win over "unexpected characters following instruction". */
5858 if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
5859 && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
5860 {
5861 int a_index = (a->detail.index < 0
5862 ? aarch64_num_of_operands (a->opcode) - 1
5863 : a->detail.index);
5864 int b_index = (b->detail.index < 0
5865 ? aarch64_num_of_operands (b->opcode) - 1
5866 : b->detail.index);
5867 if (a_index != b_index)
5868 return a_index > b_index;
5869 }
5870 return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
5871 }
5872
5873 /* Process and output the error message about the operand mismatching.
5874
5875 When this function is called, the operand error information had
5876 been collected for an assembly line and there will be multiple
5877 errors in the case of multiple instruction templates; output the
5878 error message that most closely describes the problem.
5879
5880 The errors to be printed can be filtered on printing all errors
5881 or only non-fatal errors. This distinction has to be made because
5882 the error buffer may already be filled with fatal errors we don't want to
5883 print due to the different instruction templates. */
5884
5885 static void
output_operand_error_report(char * str,bool non_fatal_only)5886 output_operand_error_report (char *str, bool non_fatal_only)
5887 {
5888 enum aarch64_operand_error_kind kind;
5889 operand_error_record *curr;
5890 operand_error_record *head = operand_error_report.head;
5891 operand_error_record *record;
5892
5893 /* No error to report. */
5894 if (head == NULL)
5895 return;
5896
5897 gas_assert (head != NULL && operand_error_report.tail != NULL);
5898
5899 /* Only one error. */
5900 if (head == operand_error_report.tail)
5901 {
5902 /* If the only error is a non-fatal one and we don't want to print it,
5903 just exit. */
5904 if (!non_fatal_only || head->detail.non_fatal)
5905 {
5906 DEBUG_TRACE ("single opcode entry with error kind: %s",
5907 operand_mismatch_kind_names[head->detail.kind]);
5908 output_operand_error_record (head, str);
5909 }
5910 return;
5911 }
5912
5913 /* Find the error kind of the highest severity. */
5914 DEBUG_TRACE ("multiple opcode entries with error kind");
5915 record = NULL;
5916 for (curr = head; curr != NULL; curr = curr->next)
5917 {
5918 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5919 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5920 {
5921 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5922 operand_mismatch_kind_names[curr->detail.kind],
5923 curr->detail.data[0].i, curr->detail.data[1].i,
5924 curr->detail.data[2].i);
5925 }
5926 else if (curr->detail.kind == AARCH64_OPDE_REG_LIST_LENGTH
5927 || curr->detail.kind == AARCH64_OPDE_REG_LIST_STRIDE)
5928 {
5929 DEBUG_TRACE ("\t%s [%x]",
5930 operand_mismatch_kind_names[curr->detail.kind],
5931 curr->detail.data[0].i);
5932 }
5933 else
5934 {
5935 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5936 }
5937 if ((!non_fatal_only || curr->detail.non_fatal)
5938 && (!record || better_error_p (curr, record)))
5939 record = curr;
5940 }
5941
5942 kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
5943 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5944
5945 /* Pick up one of errors of KIND to report. */
5946 record = NULL;
5947 for (curr = head; curr != NULL; curr = curr->next)
5948 {
5949 /* If we don't want to print non-fatal errors then don't consider them
5950 at all. */
5951 if (curr->detail.kind != kind
5952 || (non_fatal_only && !curr->detail.non_fatal))
5953 continue;
5954 /* If there are multiple errors, pick up the one with the highest
5955 mismatching operand index. In the case of multiple errors with
5956 the equally highest operand index, pick up the first one or the
5957 first one with non-NULL error message. */
5958 if (!record || curr->detail.index > record->detail.index)
5959 record = curr;
5960 else if (curr->detail.index == record->detail.index
5961 && !record->detail.error)
5962 {
5963 if (curr->detail.error)
5964 record = curr;
5965 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
5966 {
5967 record->detail.data[0].i |= curr->detail.data[0].i;
5968 record->detail.data[1].i |= curr->detail.data[1].i;
5969 record->detail.data[2].i |= curr->detail.data[2].i;
5970 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
5971 operand_mismatch_kind_names[kind],
5972 curr->detail.data[0].i, curr->detail.data[1].i,
5973 curr->detail.data[2].i);
5974 }
5975 else if (kind == AARCH64_OPDE_REG_LIST_LENGTH
5976 || kind == AARCH64_OPDE_REG_LIST_STRIDE)
5977 {
5978 record->detail.data[0].i |= curr->detail.data[0].i;
5979 DEBUG_TRACE ("\t--> %s [%x]",
5980 operand_mismatch_kind_names[kind],
5981 curr->detail.data[0].i);
5982 }
5983 /* Pick the variant with the cloest match. */
5984 else if (kind == AARCH64_OPDE_INVALID_VARIANT
5985 && record->detail.data[0].i > curr->detail.data[0].i)
5986 record = curr;
5987 }
5988 }
5989
5990 /* The way errors are collected in the back-end is a bit non-intuitive. But
5991 essentially, because each operand template is tried recursively you may
5992 always have errors collected from the previous tried OPND. These are
5993 usually skipped if there is one successful match. However now with the
5994 non-fatal errors we have to ignore those previously collected hard errors
5995 when we're only interested in printing the non-fatal ones. This condition
5996 prevents us from printing errors that are not appropriate, since we did
5997 match a condition, but it also has warnings that it wants to print. */
5998 if (non_fatal_only && !record)
5999 return;
6000
6001 gas_assert (record);
6002 DEBUG_TRACE ("Pick up error kind %s to report",
6003 operand_mismatch_kind_names[kind]);
6004
6005 /* Output. */
6006 output_operand_error_record (record, str);
6007 }
6008
6009 /* Write an AARCH64 instruction to buf - always little-endian. */
6010 static void
put_aarch64_insn(char * buf,uint32_t insn)6011 put_aarch64_insn (char *buf, uint32_t insn)
6012 {
6013 unsigned char *where = (unsigned char *) buf;
6014 where[0] = insn;
6015 where[1] = insn >> 8;
6016 where[2] = insn >> 16;
6017 where[3] = insn >> 24;
6018 }
6019
6020 static uint32_t
get_aarch64_insn(char * buf)6021 get_aarch64_insn (char *buf)
6022 {
6023 unsigned char *where = (unsigned char *) buf;
6024 uint32_t result;
6025 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
6026 | ((uint32_t) where[3] << 24)));
6027 return result;
6028 }
6029
6030 static void
output_inst(struct aarch64_inst * new_inst)6031 output_inst (struct aarch64_inst *new_inst)
6032 {
6033 char *to = NULL;
6034
6035 to = frag_more (INSN_SIZE);
6036
6037 frag_now->tc_frag_data.recorded = 1;
6038
6039 put_aarch64_insn (to, inst.base.value);
6040
6041 if (inst.reloc.type != BFD_RELOC_UNUSED)
6042 {
6043 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
6044 INSN_SIZE, &inst.reloc.exp,
6045 inst.reloc.pc_rel,
6046 inst.reloc.type);
6047 DEBUG_TRACE ("Prepared relocation fix up");
6048 /* Don't check the addend value against the instruction size,
6049 that's the job of our code in md_apply_fix(). */
6050 fixp->fx_no_overflow = 1;
6051 if (new_inst != NULL)
6052 fixp->tc_fix_data.inst = new_inst;
6053 if (aarch64_gas_internal_fixup_p ())
6054 {
6055 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
6056 fixp->tc_fix_data.opnd = inst.reloc.opnd;
6057 fixp->fx_addnumber = inst.reloc.flags;
6058 }
6059 }
6060
6061 dwarf2_emit_insn (INSN_SIZE);
6062 }
6063
6064 /* Link together opcodes of the same name. */
6065
6066 struct templates
6067 {
6068 const aarch64_opcode *opcode;
6069 struct templates *next;
6070 };
6071
6072 typedef struct templates templates;
6073
6074 static templates *
lookup_mnemonic(const char * start,int len)6075 lookup_mnemonic (const char *start, int len)
6076 {
6077 templates *templ = NULL;
6078
6079 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
6080 return templ;
6081 }
6082
6083 /* Subroutine of md_assemble, responsible for looking up the primary
6084 opcode from the mnemonic the user wrote. BASE points to the beginning
6085 of the mnemonic, DOT points to the first '.' within the mnemonic
6086 (if any) and END points to the end of the mnemonic. */
6087
6088 static templates *
opcode_lookup(char * base,char * dot,char * end)6089 opcode_lookup (char *base, char *dot, char *end)
6090 {
6091 const aarch64_cond *cond;
6092 char condname[16];
6093 int len;
6094
6095 if (dot == end)
6096 return 0;
6097
6098 inst.cond = COND_ALWAYS;
6099
6100 /* Handle a possible condition. */
6101 if (dot)
6102 {
6103 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
6104 if (!cond)
6105 return 0;
6106 inst.cond = cond->value;
6107 len = dot - base;
6108 }
6109 else
6110 len = end - base;
6111
6112 if (inst.cond == COND_ALWAYS)
6113 {
6114 /* Look for unaffixed mnemonic. */
6115 return lookup_mnemonic (base, len);
6116 }
6117 else if (len <= 13)
6118 {
6119 /* append ".c" to mnemonic if conditional */
6120 memcpy (condname, base, len);
6121 memcpy (condname + len, ".c", 2);
6122 base = condname;
6123 len += 2;
6124 return lookup_mnemonic (base, len);
6125 }
6126
6127 return NULL;
6128 }
6129
6130 /* Process an optional operand that is found omitted from the assembly line.
6131 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6132 instruction's opcode entry while IDX is the index of this omitted operand.
6133 */
6134
6135 static void
process_omitted_operand(enum aarch64_opnd type,const aarch64_opcode * opcode,int idx,aarch64_opnd_info * operand)6136 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6137 int idx, aarch64_opnd_info *operand)
6138 {
6139 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6140 gas_assert (optional_operand_p (opcode, idx));
6141 gas_assert (!operand->present);
6142
6143 switch (type)
6144 {
6145 case AARCH64_OPND_Rd:
6146 case AARCH64_OPND_Rn:
6147 case AARCH64_OPND_Rm:
6148 case AARCH64_OPND_Rt:
6149 case AARCH64_OPND_Rt2:
6150 case AARCH64_OPND_Rt_LS64:
6151 case AARCH64_OPND_Rt_SP:
6152 case AARCH64_OPND_Rs:
6153 case AARCH64_OPND_Ra:
6154 case AARCH64_OPND_Rt_SYS:
6155 case AARCH64_OPND_Rd_SP:
6156 case AARCH64_OPND_Rn_SP:
6157 case AARCH64_OPND_Rm_SP:
6158 case AARCH64_OPND_Fd:
6159 case AARCH64_OPND_Fn:
6160 case AARCH64_OPND_Fm:
6161 case AARCH64_OPND_Fa:
6162 case AARCH64_OPND_Ft:
6163 case AARCH64_OPND_Ft2:
6164 case AARCH64_OPND_Sd:
6165 case AARCH64_OPND_Sn:
6166 case AARCH64_OPND_Sm:
6167 case AARCH64_OPND_Va:
6168 case AARCH64_OPND_Vd:
6169 case AARCH64_OPND_Vn:
6170 case AARCH64_OPND_Vm:
6171 case AARCH64_OPND_VdD1:
6172 case AARCH64_OPND_VnD1:
6173 operand->reg.regno = default_value;
6174 break;
6175 case AARCH64_OPND_PAIRREG_OR_XZR:
6176 if (inst.base.operands[idx - 1].reg.regno == 0x1f)
6177 {
6178 operand->reg.regno = 0x1f;
6179 break;
6180 }
6181 operand->reg.regno = inst.base.operands[idx - 1].reg.regno + 1;
6182 break;
6183 case AARCH64_OPND_PAIRREG:
6184 operand->reg.regno = inst.base.operands[idx - 1].reg.regno + 1;
6185 break;
6186
6187 case AARCH64_OPND_Ed:
6188 case AARCH64_OPND_En:
6189 case AARCH64_OPND_Em:
6190 case AARCH64_OPND_Em16:
6191 case AARCH64_OPND_SM3_IMM2:
6192 operand->reglane.regno = default_value;
6193 break;
6194
6195 case AARCH64_OPND_IDX:
6196 case AARCH64_OPND_BIT_NUM:
6197 case AARCH64_OPND_IMMR:
6198 case AARCH64_OPND_IMMS:
6199 case AARCH64_OPND_SHLL_IMM:
6200 case AARCH64_OPND_IMM_VLSL:
6201 case AARCH64_OPND_IMM_VLSR:
6202 case AARCH64_OPND_CCMP_IMM:
6203 case AARCH64_OPND_FBITS:
6204 case AARCH64_OPND_UIMM4:
6205 case AARCH64_OPND_UIMM3_OP1:
6206 case AARCH64_OPND_UIMM3_OP2:
6207 case AARCH64_OPND_IMM:
6208 case AARCH64_OPND_IMM_2:
6209 case AARCH64_OPND_WIDTH:
6210 case AARCH64_OPND_UIMM7:
6211 case AARCH64_OPND_NZCV:
6212 case AARCH64_OPND_SVE_PATTERN:
6213 case AARCH64_OPND_SVE_PRFOP:
6214 operand->imm.value = default_value;
6215 break;
6216
6217 case AARCH64_OPND_SVE_PATTERN_SCALED:
6218 operand->imm.value = default_value;
6219 operand->shifter.kind = AARCH64_MOD_MUL;
6220 operand->shifter.amount = 1;
6221 break;
6222
6223 case AARCH64_OPND_EXCEPTION:
6224 inst.reloc.type = BFD_RELOC_UNUSED;
6225 break;
6226
6227 case AARCH64_OPND_BARRIER_ISB:
6228 operand->barrier = aarch64_barrier_options + default_value;
6229 break;
6230
6231 case AARCH64_OPND_BTI_TARGET:
6232 operand->hint_option = aarch64_hint_options + default_value;
6233 break;
6234
6235 default:
6236 break;
6237 }
6238 }
6239
6240 /* Process the relocation type for move wide instructions.
6241 Return TRUE on success; otherwise return FALSE. */
6242
6243 static bool
process_movw_reloc_info(void)6244 process_movw_reloc_info (void)
6245 {
6246 int is32;
6247 unsigned shift;
6248
6249 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6250
6251 if (inst.base.opcode->op == OP_MOVK)
6252 switch (inst.reloc.type)
6253 {
6254 case BFD_RELOC_AARCH64_MOVW_G0_S:
6255 case BFD_RELOC_AARCH64_MOVW_G1_S:
6256 case BFD_RELOC_AARCH64_MOVW_G2_S:
6257 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6258 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6259 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6260 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6261 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6262 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6263 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6264 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6265 set_syntax_error
6266 (_("the specified relocation type is not allowed for MOVK"));
6267 return false;
6268 default:
6269 break;
6270 }
6271
6272 switch (inst.reloc.type)
6273 {
6274 case BFD_RELOC_AARCH64_MOVW_G0:
6275 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6276 case BFD_RELOC_AARCH64_MOVW_G0_S:
6277 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6278 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6279 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6280 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6281 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6282 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6283 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6284 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6285 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6286 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6287 shift = 0;
6288 break;
6289 case BFD_RELOC_AARCH64_MOVW_G1:
6290 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6291 case BFD_RELOC_AARCH64_MOVW_G1_S:
6292 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6293 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6294 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6295 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6296 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6297 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6298 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6299 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6301 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6302 shift = 16;
6303 break;
6304 case BFD_RELOC_AARCH64_MOVW_G2:
6305 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6306 case BFD_RELOC_AARCH64_MOVW_G2_S:
6307 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6308 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6309 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6310 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6311 if (is32)
6312 {
6313 set_fatal_syntax_error
6314 (_("the specified relocation type is not allowed for 32-bit "
6315 "register"));
6316 return false;
6317 }
6318 shift = 32;
6319 break;
6320 case BFD_RELOC_AARCH64_MOVW_G3:
6321 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6322 if (is32)
6323 {
6324 set_fatal_syntax_error
6325 (_("the specified relocation type is not allowed for 32-bit "
6326 "register"));
6327 return false;
6328 }
6329 shift = 48;
6330 break;
6331 default:
6332 /* More cases should be added when more MOVW-related relocation types
6333 are supported in GAS. */
6334 gas_assert (aarch64_gas_internal_fixup_p ());
6335 /* The shift amount should have already been set by the parser. */
6336 return true;
6337 }
6338 inst.base.operands[1].shifter.amount = shift;
6339 return true;
6340 }
6341
6342 /* Determine and return the real reloc type code for an instruction
6343 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6344
6345 static inline bfd_reloc_code_real_type
ldst_lo12_determine_real_reloc_type(void)6346 ldst_lo12_determine_real_reloc_type (void)
6347 {
6348 unsigned logsz, max_logsz;
6349 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6350 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6351
6352 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6353 {
6354 BFD_RELOC_AARCH64_LDST8_LO12,
6355 BFD_RELOC_AARCH64_LDST16_LO12,
6356 BFD_RELOC_AARCH64_LDST32_LO12,
6357 BFD_RELOC_AARCH64_LDST64_LO12,
6358 BFD_RELOC_AARCH64_LDST128_LO12
6359 },
6360 {
6361 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6362 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6363 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6364 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6365 BFD_RELOC_AARCH64_NONE
6366 },
6367 {
6368 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6369 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6370 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6371 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6372 BFD_RELOC_AARCH64_NONE
6373 },
6374 {
6375 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6376 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6377 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6378 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6379 BFD_RELOC_AARCH64_NONE
6380 },
6381 {
6382 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6383 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6384 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6385 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6386 BFD_RELOC_AARCH64_NONE
6387 }
6388 };
6389
6390 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6391 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6392 || (inst.reloc.type
6393 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6394 || (inst.reloc.type
6395 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6396 || (inst.reloc.type
6397 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6398 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6399
6400 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6401 opd1_qlf =
6402 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6403 1, opd0_qlf, 0);
6404 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6405
6406 logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
6407
6408 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6409 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6410 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6411 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6412 max_logsz = 3;
6413 else
6414 max_logsz = 4;
6415
6416 if (logsz > max_logsz)
6417 {
6418 /* SEE PR 27904 for an example of this. */
6419 set_fatal_syntax_error
6420 (_("relocation qualifier does not match instruction size"));
6421 return BFD_RELOC_AARCH64_NONE;
6422 }
6423
6424 /* In reloc.c, these pseudo relocation types should be defined in similar
6425 order as above reloc_ldst_lo12 array. Because the array index calculation
6426 below relies on this. */
6427 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6428 }
6429
6430 /* Check whether a register list REGINFO is valid. The registers have type
6431 REG_TYPE and must be numbered in increasing order (modulo the register
6432 bank size). They must have a consistent stride.
6433
6434 Return true if the list is valid, describing it in LIST if so. */
6435
6436 static bool
reg_list_valid_p(uint32_t reginfo,struct aarch64_reglist * list,aarch64_reg_type reg_type)6437 reg_list_valid_p (uint32_t reginfo, struct aarch64_reglist *list,
6438 aarch64_reg_type reg_type)
6439 {
6440 uint32_t i, nb_regs, prev_regno, incr, mask;
6441 mask = reg_type_mask (reg_type);
6442
6443 nb_regs = 1 + (reginfo & 0x3);
6444 reginfo >>= 2;
6445 prev_regno = reginfo & 0x1f;
6446 incr = 1;
6447
6448 list->first_regno = prev_regno;
6449 list->num_regs = nb_regs;
6450
6451 for (i = 1; i < nb_regs; ++i)
6452 {
6453 uint32_t curr_regno, curr_incr;
6454 reginfo >>= 5;
6455 curr_regno = reginfo & 0x1f;
6456 curr_incr = (curr_regno - prev_regno) & mask;
6457 if (curr_incr == 0)
6458 return false;
6459 else if (i == 1)
6460 incr = curr_incr;
6461 else if (curr_incr != incr)
6462 return false;
6463 prev_regno = curr_regno;
6464 }
6465
6466 list->stride = incr;
6467 return true;
6468 }
6469
6470 /* Generic instruction operand parser. This does no encoding and no
6471 semantic validation; it merely squirrels values away in the inst
6472 structure. Returns TRUE or FALSE depending on whether the
6473 specified grammar matched. */
6474
6475 static bool
parse_operands(char * str,const aarch64_opcode * opcode)6476 parse_operands (char *str, const aarch64_opcode *opcode)
6477 {
6478 int i;
6479 char *backtrack_pos = 0;
6480 const enum aarch64_opnd *operands = opcode->operands;
6481 const uint64_t flags = opcode->flags;
6482 aarch64_reg_type imm_reg_type;
6483
6484 clear_error ();
6485 skip_whitespace (str);
6486
6487 if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SME2))
6488 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP_PN;
6489 else if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
6490 || AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2))
6491 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP;
6492 else
6493 imm_reg_type = REG_TYPE_R_ZR_BHSDQ_V;
6494
6495 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6496 {
6497 int64_t val;
6498 const reg_entry *reg;
6499 int comma_skipped_p = 0;
6500 struct vector_type_el vectype;
6501 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6502 aarch64_opnd_info *info = &inst.base.operands[i];
6503 aarch64_reg_type reg_type;
6504
6505 DEBUG_TRACE ("parse operand %d", i);
6506
6507 /* Assign the operand code. */
6508 info->type = operands[i];
6509
6510 if (optional_operand_p (opcode, i))
6511 {
6512 /* Remember where we are in case we need to backtrack. */
6513 gas_assert (!backtrack_pos);
6514 backtrack_pos = str;
6515 }
6516
6517 /* Expect comma between operands; the backtrack mechanism will take
6518 care of cases of omitted optional operand. */
6519 if (i > 0 && ! skip_past_char (&str, ','))
6520 {
6521 set_syntax_error (_("comma expected between operands"));
6522 goto failure;
6523 }
6524 else
6525 comma_skipped_p = 1;
6526
6527 switch (operands[i])
6528 {
6529 case AARCH64_OPND_Rd:
6530 case AARCH64_OPND_Rn:
6531 case AARCH64_OPND_Rm:
6532 case AARCH64_OPND_Rt:
6533 case AARCH64_OPND_Rt2:
6534 case AARCH64_OPND_X16:
6535 case AARCH64_OPND_Rs:
6536 case AARCH64_OPND_Ra:
6537 case AARCH64_OPND_Rt_LS64:
6538 case AARCH64_OPND_Rt_SYS:
6539 case AARCH64_OPND_PAIRREG:
6540 case AARCH64_OPND_PAIRREG_OR_XZR:
6541 case AARCH64_OPND_SVE_Rm:
6542 po_int_fp_reg_or_fail (REG_TYPE_R_ZR);
6543
6544 /* In LS64 load/store instructions Rt register number must be even
6545 and <=22. */
6546 if (operands[i] == AARCH64_OPND_Rt_LS64)
6547 {
6548 /* We've already checked if this is valid register.
6549 This will check if register number (Rt) is not undefined for
6550 LS64 instructions:
6551 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6552 if ((info->reg.regno & 0x18) == 0x18
6553 || (info->reg.regno & 0x01) == 0x01)
6554 {
6555 set_syntax_error
6556 (_("invalid Rt register number in 64-byte load/store"));
6557 goto failure;
6558 }
6559 }
6560 else if (operands[i] == AARCH64_OPND_X16)
6561 {
6562 if (info->reg.regno != 16)
6563 {
6564 goto failure;
6565 }
6566 }
6567 break;
6568
6569 case AARCH64_OPND_Rd_SP:
6570 case AARCH64_OPND_Rn_SP:
6571 case AARCH64_OPND_Rt_SP:
6572 case AARCH64_OPND_SVE_Rn_SP:
6573 case AARCH64_OPND_Rm_SP:
6574 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6575 break;
6576
6577 case AARCH64_OPND_Rm_EXT:
6578 case AARCH64_OPND_Rm_SFT:
6579 po_misc_or_fail (parse_shifter_operand
6580 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6581 ? SHIFTED_ARITH_IMM
6582 : SHIFTED_LOGIC_IMM)));
6583 if (!info->shifter.operator_present)
6584 {
6585 /* Default to LSL if not present. Libopcodes prefers shifter
6586 kind to be explicit. */
6587 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6588 info->shifter.kind = AARCH64_MOD_LSL;
6589 /* For Rm_EXT, libopcodes will carry out further check on whether
6590 or not stack pointer is used in the instruction (Recall that
6591 "the extend operator is not optional unless at least one of
6592 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6593 }
6594 break;
6595
6596 case AARCH64_OPND_Fd:
6597 case AARCH64_OPND_Fn:
6598 case AARCH64_OPND_Fm:
6599 case AARCH64_OPND_Fa:
6600 case AARCH64_OPND_Ft:
6601 case AARCH64_OPND_Ft2:
6602 case AARCH64_OPND_Sd:
6603 case AARCH64_OPND_Sn:
6604 case AARCH64_OPND_Sm:
6605 case AARCH64_OPND_SVE_VZn:
6606 case AARCH64_OPND_SVE_Vd:
6607 case AARCH64_OPND_SVE_Vm:
6608 case AARCH64_OPND_SVE_Vn:
6609 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6610 break;
6611
6612 case AARCH64_OPND_SVE_Pd:
6613 case AARCH64_OPND_SVE_Pg3:
6614 case AARCH64_OPND_SVE_Pg4_5:
6615 case AARCH64_OPND_SVE_Pg4_10:
6616 case AARCH64_OPND_SVE_Pg4_16:
6617 case AARCH64_OPND_SVE_Pm:
6618 case AARCH64_OPND_SVE_Pn:
6619 case AARCH64_OPND_SVE_Pt:
6620 case AARCH64_OPND_SME_Pm:
6621 reg_type = REG_TYPE_P;
6622 goto vector_reg;
6623
6624 case AARCH64_OPND_SVE_Za_5:
6625 case AARCH64_OPND_SVE_Za_16:
6626 case AARCH64_OPND_SVE_Zd:
6627 case AARCH64_OPND_SVE_Zm_5:
6628 case AARCH64_OPND_SVE_Zm_16:
6629 case AARCH64_OPND_SVE_Zn:
6630 case AARCH64_OPND_SVE_Zt:
6631 case AARCH64_OPND_SME_Zm:
6632 reg_type = REG_TYPE_Z;
6633 goto vector_reg;
6634
6635 case AARCH64_OPND_SVE_PNd:
6636 case AARCH64_OPND_SVE_PNg4_10:
6637 case AARCH64_OPND_SVE_PNn:
6638 case AARCH64_OPND_SVE_PNt:
6639 case AARCH64_OPND_SME_PNd3:
6640 case AARCH64_OPND_SME_PNg3:
6641 case AARCH64_OPND_SME_PNn:
6642 reg_type = REG_TYPE_PN;
6643 goto vector_reg;
6644
6645 case AARCH64_OPND_Va:
6646 case AARCH64_OPND_Vd:
6647 case AARCH64_OPND_Vn:
6648 case AARCH64_OPND_Vm:
6649 reg_type = REG_TYPE_V;
6650 vector_reg:
6651 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6652 if (!reg)
6653 goto failure;
6654 if (vectype.defined & NTA_HASINDEX)
6655 goto failure;
6656
6657 info->reg.regno = reg->number;
6658 if ((reg_type == REG_TYPE_P
6659 || reg_type == REG_TYPE_PN
6660 || reg_type == REG_TYPE_Z)
6661 && vectype.type == NT_invtype)
6662 /* Unqualified P and Z registers are allowed in certain
6663 contexts. Rely on F_STRICT qualifier checking to catch
6664 invalid uses. */
6665 info->qualifier = AARCH64_OPND_QLF_NIL;
6666 else
6667 {
6668 info->qualifier = vectype_to_qualifier (&vectype);
6669 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6670 goto failure;
6671 }
6672 break;
6673
6674 case AARCH64_OPND_VdD1:
6675 case AARCH64_OPND_VnD1:
6676 reg = aarch64_reg_parse (&str, REG_TYPE_V, &vectype);
6677 if (!reg)
6678 goto failure;
6679 if (vectype.type != NT_d || vectype.index != 1)
6680 {
6681 set_fatal_syntax_error
6682 (_("the top half of a 128-bit FP/SIMD register is expected"));
6683 goto failure;
6684 }
6685 info->reg.regno = reg->number;
6686 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6687 here; it is correct for the purpose of encoding/decoding since
6688 only the register number is explicitly encoded in the related
6689 instructions, although this appears a bit hacky. */
6690 info->qualifier = AARCH64_OPND_QLF_S_D;
6691 break;
6692
6693 case AARCH64_OPND_SVE_Zm3_INDEX:
6694 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6695 case AARCH64_OPND_SVE_Zm3_19_INDEX:
6696 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6697 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6698 case AARCH64_OPND_SVE_Zm4_INDEX:
6699 case AARCH64_OPND_SVE_Zn_INDEX:
6700 case AARCH64_OPND_SVE_Zm_imm4:
6701 case AARCH64_OPND_SVE_Zn_5_INDEX:
6702 case AARCH64_OPND_SME_Zm_INDEX1:
6703 case AARCH64_OPND_SME_Zm_INDEX2:
6704 case AARCH64_OPND_SME_Zm_INDEX3_1:
6705 case AARCH64_OPND_SME_Zm_INDEX3_2:
6706 case AARCH64_OPND_SME_Zm_INDEX3_10:
6707 case AARCH64_OPND_SME_Zm_INDEX4_1:
6708 case AARCH64_OPND_SME_Zm_INDEX4_10:
6709 case AARCH64_OPND_SME_Zn_INDEX1_16:
6710 case AARCH64_OPND_SME_Zn_INDEX2_15:
6711 case AARCH64_OPND_SME_Zn_INDEX2_16:
6712 case AARCH64_OPND_SME_Zn_INDEX3_14:
6713 case AARCH64_OPND_SME_Zn_INDEX3_15:
6714 case AARCH64_OPND_SME_Zn_INDEX4_14:
6715 reg_type = REG_TYPE_Z;
6716 goto vector_reg_index;
6717
6718 case AARCH64_OPND_Ed:
6719 case AARCH64_OPND_En:
6720 case AARCH64_OPND_Em:
6721 case AARCH64_OPND_Em16:
6722 case AARCH64_OPND_SM3_IMM2:
6723 reg_type = REG_TYPE_V;
6724 vector_reg_index:
6725 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6726 if (!reg)
6727 goto failure;
6728 if (!(vectype.defined & NTA_HASINDEX))
6729 goto failure;
6730
6731 if (reg->type == REG_TYPE_Z && vectype.type == NT_invtype)
6732 /* Unqualified Zn[index] is allowed in LUTI2 instructions. */
6733 info->qualifier = AARCH64_OPND_QLF_NIL;
6734 else
6735 {
6736 if (vectype.type == NT_invtype)
6737 goto failure;
6738 info->qualifier = vectype_to_qualifier (&vectype);
6739 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6740 goto failure;
6741 }
6742
6743 info->reglane.regno = reg->number;
6744 info->reglane.index = vectype.index;
6745 break;
6746
6747 case AARCH64_OPND_SVE_ZnxN:
6748 case AARCH64_OPND_SVE_ZtxN:
6749 case AARCH64_OPND_SME_Zdnx2:
6750 case AARCH64_OPND_SME_Zdnx4:
6751 case AARCH64_OPND_SME_Zt2:
6752 case AARCH64_OPND_SME_Zt3:
6753 case AARCH64_OPND_SME_Zt4:
6754 case AARCH64_OPND_SME_Zmx2:
6755 case AARCH64_OPND_SME_Zmx4:
6756 case AARCH64_OPND_SME_Znx2:
6757 case AARCH64_OPND_SME_Znx4:
6758 case AARCH64_OPND_SME_Ztx2_STRIDED:
6759 case AARCH64_OPND_SME_Ztx4_STRIDED:
6760 reg_type = REG_TYPE_Z;
6761 goto vector_reg_list;
6762
6763 case AARCH64_OPND_SME_Pdx2:
6764 case AARCH64_OPND_SME_PdxN:
6765 reg_type = REG_TYPE_P;
6766 goto vector_reg_list;
6767
6768 case AARCH64_OPND_LVn:
6769 case AARCH64_OPND_LVt:
6770 case AARCH64_OPND_LVt_AL:
6771 case AARCH64_OPND_LEt:
6772 reg_type = REG_TYPE_V;
6773 vector_reg_list:
6774 if (reg_type == REG_TYPE_Z
6775 && get_opcode_dependent_value (opcode) == 1
6776 && *str != '{')
6777 {
6778 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6779 if (!reg)
6780 goto failure;
6781 info->reglist.first_regno = reg->number;
6782 info->reglist.num_regs = 1;
6783 info->reglist.stride = 1;
6784 }
6785 else
6786 {
6787 val = parse_vector_reg_list (&str, reg_type, &vectype);
6788 if (val == PARSE_FAIL)
6789 goto failure;
6790
6791 if (! reg_list_valid_p (val, &info->reglist, reg_type))
6792 {
6793 set_fatal_syntax_error (_("invalid register list"));
6794 goto failure;
6795 }
6796
6797 if ((int) vectype.width > 0 && *str != ',')
6798 {
6799 set_fatal_syntax_error
6800 (_("expected element type rather than vector type"));
6801 goto failure;
6802 }
6803 }
6804 if (operands[i] == AARCH64_OPND_LEt)
6805 {
6806 if (!(vectype.defined & NTA_HASINDEX))
6807 goto failure;
6808 info->reglist.has_index = 1;
6809 info->reglist.index = vectype.index;
6810 }
6811 else
6812 {
6813 if (vectype.defined & NTA_HASINDEX)
6814 goto failure;
6815 if (!(vectype.defined & NTA_HASTYPE))
6816 {
6817 if (reg_type == REG_TYPE_Z || reg_type == REG_TYPE_P)
6818 set_fatal_syntax_error (_("missing type suffix"));
6819 goto failure;
6820 }
6821 }
6822 info->qualifier = vectype_to_qualifier (&vectype);
6823 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6824 goto failure;
6825 break;
6826
6827 case AARCH64_OPND_CRn:
6828 case AARCH64_OPND_CRm:
6829 {
6830 char prefix = *(str++);
6831 if (prefix != 'c' && prefix != 'C')
6832 goto failure;
6833
6834 po_imm_nc_or_fail ();
6835 if (flags & F_OPD_NARROW)
6836 {
6837 if ((operands[i] == AARCH64_OPND_CRn)
6838 && (val < 8 || val > 9))
6839 {
6840 set_fatal_syntax_error (_(N_ ("C8 - C9 expected")));
6841 goto failure;
6842 }
6843 else if ((operands[i] == AARCH64_OPND_CRm)
6844 && (val > 7))
6845 {
6846 set_fatal_syntax_error (_(N_ ("C0 - C7 expected")));
6847 goto failure;
6848 }
6849 }
6850 else if (val > 15)
6851 {
6852 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6853 goto failure;
6854 }
6855 info->qualifier = AARCH64_OPND_QLF_CR;
6856 info->imm.value = val;
6857 break;
6858 }
6859
6860 case AARCH64_OPND_SHLL_IMM:
6861 case AARCH64_OPND_IMM_VLSR:
6862 po_imm_or_fail (1, 64);
6863 info->imm.value = val;
6864 break;
6865
6866 case AARCH64_OPND_CCMP_IMM:
6867 case AARCH64_OPND_SIMM5:
6868 case AARCH64_OPND_FBITS:
6869 case AARCH64_OPND_TME_UIMM16:
6870 case AARCH64_OPND_UIMM4:
6871 case AARCH64_OPND_UIMM4_ADDG:
6872 case AARCH64_OPND_UIMM10:
6873 case AARCH64_OPND_UIMM3_OP1:
6874 case AARCH64_OPND_UIMM3_OP2:
6875 case AARCH64_OPND_IMM_VLSL:
6876 case AARCH64_OPND_IMM:
6877 case AARCH64_OPND_IMM_2:
6878 case AARCH64_OPND_WIDTH:
6879 case AARCH64_OPND_SVE_INV_LIMM:
6880 case AARCH64_OPND_SVE_LIMM:
6881 case AARCH64_OPND_SVE_LIMM_MOV:
6882 case AARCH64_OPND_SVE_SHLIMM_PRED:
6883 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6884 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6885 case AARCH64_OPND_SME_SHRIMM4:
6886 case AARCH64_OPND_SME_SHRIMM5:
6887 case AARCH64_OPND_SVE_SHRIMM_PRED:
6888 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6889 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6890 case AARCH64_OPND_SVE_SIMM5:
6891 case AARCH64_OPND_SVE_SIMM5B:
6892 case AARCH64_OPND_SVE_SIMM6:
6893 case AARCH64_OPND_SVE_SIMM8:
6894 case AARCH64_OPND_SVE_UIMM3:
6895 case AARCH64_OPND_SVE_UIMM7:
6896 case AARCH64_OPND_SVE_UIMM8:
6897 case AARCH64_OPND_SVE_UIMM8_53:
6898 case AARCH64_OPND_IMM_ROT1:
6899 case AARCH64_OPND_IMM_ROT2:
6900 case AARCH64_OPND_IMM_ROT3:
6901 case AARCH64_OPND_SVE_IMM_ROT1:
6902 case AARCH64_OPND_SVE_IMM_ROT2:
6903 case AARCH64_OPND_SVE_IMM_ROT3:
6904 case AARCH64_OPND_CSSC_SIMM8:
6905 case AARCH64_OPND_CSSC_UIMM8:
6906 po_imm_nc_or_fail ();
6907 info->imm.value = val;
6908 break;
6909
6910 case AARCH64_OPND_SVE_AIMM:
6911 case AARCH64_OPND_SVE_ASIMM:
6912 po_imm_nc_or_fail ();
6913 info->imm.value = val;
6914 skip_whitespace (str);
6915 if (skip_past_comma (&str))
6916 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6917 else
6918 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6919 break;
6920
6921 case AARCH64_OPND_SVE_PATTERN:
6922 po_enum_or_fail (aarch64_sve_pattern_array);
6923 info->imm.value = val;
6924 break;
6925
6926 case AARCH64_OPND_SVE_PATTERN_SCALED:
6927 po_enum_or_fail (aarch64_sve_pattern_array);
6928 info->imm.value = val;
6929 if (skip_past_comma (&str)
6930 && !parse_shift (&str, info, SHIFTED_MUL))
6931 goto failure;
6932 if (!info->shifter.operator_present)
6933 {
6934 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6935 info->shifter.kind = AARCH64_MOD_MUL;
6936 info->shifter.amount = 1;
6937 }
6938 break;
6939
6940 case AARCH64_OPND_SVE_PRFOP:
6941 po_enum_or_fail (aarch64_sve_prfop_array);
6942 info->imm.value = val;
6943 break;
6944
6945 case AARCH64_OPND_UIMM7:
6946 po_imm_or_fail (0, 127);
6947 info->imm.value = val;
6948 break;
6949
6950 case AARCH64_OPND_IDX:
6951 case AARCH64_OPND_MASK:
6952 case AARCH64_OPND_BIT_NUM:
6953 case AARCH64_OPND_IMMR:
6954 case AARCH64_OPND_IMMS:
6955 po_imm_or_fail (0, 63);
6956 info->imm.value = val;
6957 break;
6958
6959 case AARCH64_OPND_IMM0:
6960 po_imm_nc_or_fail ();
6961 if (val != 0)
6962 {
6963 set_fatal_syntax_error (_("immediate zero expected"));
6964 goto failure;
6965 }
6966 info->imm.value = 0;
6967 break;
6968
6969 case AARCH64_OPND_FPIMM0:
6970 {
6971 int qfloat;
6972 bool res1 = false, res2 = false;
6973 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6974 it is probably not worth the effort to support it. */
6975 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6976 imm_reg_type))
6977 && (error_p ()
6978 || !(res2 = parse_constant_immediate (&str, &val,
6979 imm_reg_type))))
6980 goto failure;
6981 if ((res1 && qfloat == 0) || (res2 && val == 0))
6982 {
6983 info->imm.value = 0;
6984 info->imm.is_fp = 1;
6985 break;
6986 }
6987 set_fatal_syntax_error (_("immediate zero expected"));
6988 goto failure;
6989 }
6990
6991 case AARCH64_OPND_IMM_MOV:
6992 {
6993 char *saved = str;
6994 if (reg_name_p (str, REG_TYPE_R_ZR_SP)
6995 || reg_name_p (str, REG_TYPE_V))
6996 goto failure;
6997 str = saved;
6998 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6999 GE_OPT_PREFIX, REJECT_ABSENT));
7000 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
7001 later. fix_mov_imm_insn will try to determine a machine
7002 instruction (MOVZ, MOVN or ORR) for it and will issue an error
7003 message if the immediate cannot be moved by a single
7004 instruction. */
7005 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7006 inst.base.operands[i].skip = 1;
7007 }
7008 break;
7009
7010 case AARCH64_OPND_SIMD_IMM:
7011 case AARCH64_OPND_SIMD_IMM_SFT:
7012 if (! parse_big_immediate (&str, &val, imm_reg_type))
7013 goto failure;
7014 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7015 /* addr_off_p */ 0,
7016 /* need_libopcodes_p */ 1,
7017 /* skip_p */ 1);
7018 /* Parse shift.
7019 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
7020 shift, we don't check it here; we leave the checking to
7021 the libopcodes (operand_general_constraint_met_p). By
7022 doing this, we achieve better diagnostics. */
7023 if (skip_past_comma (&str)
7024 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
7025 goto failure;
7026 if (!info->shifter.operator_present
7027 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
7028 {
7029 /* Default to LSL if not present. Libopcodes prefers shifter
7030 kind to be explicit. */
7031 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7032 info->shifter.kind = AARCH64_MOD_LSL;
7033 }
7034 break;
7035
7036 case AARCH64_OPND_FPIMM:
7037 case AARCH64_OPND_SIMD_FPIMM:
7038 case AARCH64_OPND_SVE_FPIMM8:
7039 {
7040 int qfloat;
7041 bool dp_p;
7042
7043 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7044 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
7045 || !aarch64_imm_float_p (qfloat))
7046 {
7047 if (!error_p ())
7048 set_fatal_syntax_error (_("invalid floating-point"
7049 " constant"));
7050 goto failure;
7051 }
7052 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
7053 inst.base.operands[i].imm.is_fp = 1;
7054 }
7055 break;
7056
7057 case AARCH64_OPND_SVE_I1_HALF_ONE:
7058 case AARCH64_OPND_SVE_I1_HALF_TWO:
7059 case AARCH64_OPND_SVE_I1_ZERO_ONE:
7060 {
7061 int qfloat;
7062 bool dp_p;
7063
7064 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7065 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
7066 {
7067 if (!error_p ())
7068 set_fatal_syntax_error (_("invalid floating-point"
7069 " constant"));
7070 goto failure;
7071 }
7072 inst.base.operands[i].imm.value = qfloat;
7073 inst.base.operands[i].imm.is_fp = 1;
7074 }
7075 break;
7076
7077 case AARCH64_OPND_LIMM:
7078 po_misc_or_fail (parse_shifter_operand (&str, info,
7079 SHIFTED_LOGIC_IMM));
7080 if (info->shifter.operator_present)
7081 {
7082 set_fatal_syntax_error
7083 (_("shift not allowed for bitmask immediate"));
7084 goto failure;
7085 }
7086 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7087 /* addr_off_p */ 0,
7088 /* need_libopcodes_p */ 1,
7089 /* skip_p */ 1);
7090 break;
7091
7092 case AARCH64_OPND_AIMM:
7093 if (opcode->op == OP_ADD)
7094 /* ADD may have relocation types. */
7095 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
7096 SHIFTED_ARITH_IMM));
7097 else
7098 po_misc_or_fail (parse_shifter_operand (&str, info,
7099 SHIFTED_ARITH_IMM));
7100 switch (inst.reloc.type)
7101 {
7102 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7103 info->shifter.amount = 12;
7104 break;
7105 case BFD_RELOC_UNUSED:
7106 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7107 if (info->shifter.kind != AARCH64_MOD_NONE)
7108 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
7109 inst.reloc.pc_rel = 0;
7110 break;
7111 default:
7112 break;
7113 }
7114 info->imm.value = 0;
7115 if (!info->shifter.operator_present)
7116 {
7117 /* Default to LSL if not present. Libopcodes prefers shifter
7118 kind to be explicit. */
7119 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7120 info->shifter.kind = AARCH64_MOD_LSL;
7121 }
7122 break;
7123
7124 case AARCH64_OPND_HALF:
7125 {
7126 /* #<imm16> or relocation. */
7127 int internal_fixup_p;
7128 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
7129 if (internal_fixup_p)
7130 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7131 skip_whitespace (str);
7132 if (skip_past_comma (&str))
7133 {
7134 /* {, LSL #<shift>} */
7135 if (! aarch64_gas_internal_fixup_p ())
7136 {
7137 set_fatal_syntax_error (_("can't mix relocation modifier "
7138 "with explicit shift"));
7139 goto failure;
7140 }
7141 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
7142 }
7143 else
7144 inst.base.operands[i].shifter.amount = 0;
7145 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
7146 inst.base.operands[i].imm.value = 0;
7147 if (! process_movw_reloc_info ())
7148 goto failure;
7149 }
7150 break;
7151
7152 case AARCH64_OPND_EXCEPTION:
7153 case AARCH64_OPND_UNDEFINED:
7154 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
7155 imm_reg_type));
7156 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7157 /* addr_off_p */ 0,
7158 /* need_libopcodes_p */ 0,
7159 /* skip_p */ 1);
7160 break;
7161
7162 case AARCH64_OPND_NZCV:
7163 {
7164 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
7165 if (nzcv != NULL)
7166 {
7167 str += 4;
7168 info->imm.value = nzcv->value;
7169 break;
7170 }
7171 po_imm_or_fail (0, 15);
7172 info->imm.value = val;
7173 }
7174 break;
7175
7176 case AARCH64_OPND_COND:
7177 case AARCH64_OPND_COND1:
7178 {
7179 char *start = str;
7180 do
7181 str++;
7182 while (ISALPHA (*str));
7183 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
7184 if (info->cond == NULL)
7185 {
7186 set_syntax_error (_("invalid condition"));
7187 goto failure;
7188 }
7189 else if (operands[i] == AARCH64_OPND_COND1
7190 && (info->cond->value & 0xe) == 0xe)
7191 {
7192 /* Do not allow AL or NV. */
7193 set_default_error ();
7194 goto failure;
7195 }
7196 }
7197 break;
7198
7199 case AARCH64_OPND_ADDR_ADRP:
7200 po_misc_or_fail (parse_adrp (&str));
7201 /* Clear the value as operand needs to be relocated. */
7202 info->imm.value = 0;
7203 break;
7204
7205 case AARCH64_OPND_ADDR_PCREL14:
7206 case AARCH64_OPND_ADDR_PCREL19:
7207 case AARCH64_OPND_ADDR_PCREL21:
7208 case AARCH64_OPND_ADDR_PCREL26:
7209 po_misc_or_fail (parse_address (&str, info));
7210 if (!info->addr.pcrel)
7211 {
7212 set_syntax_error (_("invalid pc-relative address"));
7213 goto failure;
7214 }
7215 if (inst.gen_lit_pool
7216 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7217 {
7218 /* Only permit "=value" in the literal load instructions.
7219 The literal will be generated by programmer_friendly_fixup. */
7220 set_syntax_error (_("invalid use of \"=immediate\""));
7221 goto failure;
7222 }
7223 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7224 {
7225 set_syntax_error (_("unrecognized relocation suffix"));
7226 goto failure;
7227 }
7228 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7229 {
7230 info->imm.value = inst.reloc.exp.X_add_number;
7231 inst.reloc.type = BFD_RELOC_UNUSED;
7232 }
7233 else
7234 {
7235 info->imm.value = 0;
7236 if (inst.reloc.type == BFD_RELOC_UNUSED)
7237 switch (opcode->iclass)
7238 {
7239 case compbranch:
7240 case condbranch:
7241 /* e.g. CBZ or B.COND */
7242 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7243 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7244 break;
7245 case testbranch:
7246 /* e.g. TBZ */
7247 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7248 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7249 break;
7250 case branch_imm:
7251 /* e.g. B or BL */
7252 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7253 inst.reloc.type =
7254 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7255 : BFD_RELOC_AARCH64_JUMP26;
7256 break;
7257 case loadlit:
7258 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7259 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7260 break;
7261 case pcreladdr:
7262 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7263 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7264 break;
7265 default:
7266 gas_assert (0);
7267 abort ();
7268 }
7269 inst.reloc.pc_rel = 1;
7270 }
7271 break;
7272 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
7273 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
7274 po_misc_or_fail (parse_address (&str, info));
7275 if (info->addr.writeback)
7276 {
7277 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7278 /* addr_off_p */ 1,
7279 /* need_libopcodes_p */ 1,
7280 /* skip_p */ 0);
7281 break;
7282 }
7283 set_syntax_error (_("invalid addressing mode"));
7284 goto failure;
7285 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
7286 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
7287 {
7288 char *start = str;
7289 /* First use the normal address-parsing routines, to get
7290 the usual syntax errors. */
7291 po_misc_or_fail (parse_address (&str, info));
7292 if ((operands[i] == AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB
7293 && info->addr.writeback && info->addr.preind)
7294 || (operands[i] == AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND
7295 && info->addr.writeback && info->addr.postind))
7296 {
7297 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7298 /* addr_off_p */ 1,
7299 /* need_libopcodes_p */ 1,
7300 /* skip_p */ 0);
7301
7302 break;
7303 }
7304 if (info->addr.pcrel || info->addr.offset.is_reg
7305 || !info->addr.preind || info->addr.postind
7306 || info->addr.writeback)
7307 {
7308 set_syntax_error (_("invalid addressing mode"));
7309 goto failure;
7310 }
7311 /* Then retry, matching the specific syntax of these addresses. */
7312 str = start;
7313 po_char_or_fail ('[');
7314 po_reg_or_fail (REG_TYPE_R64_SP);
7315 po_char_or_fail (']');
7316 break;
7317 }
7318 case AARCH64_OPND_ADDR_SIMPLE:
7319 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7320 {
7321 /* [<Xn|SP>{, #<simm>}] */
7322 char *start = str;
7323 /* First use the normal address-parsing routines, to get
7324 the usual syntax errors. */
7325 po_misc_or_fail (parse_address (&str, info));
7326 if (info->addr.pcrel || info->addr.offset.is_reg
7327 || !info->addr.preind || info->addr.postind
7328 || info->addr.writeback)
7329 {
7330 set_syntax_error (_("invalid addressing mode"));
7331 goto failure;
7332 }
7333
7334 /* Then retry, matching the specific syntax of these addresses. */
7335 str = start;
7336 po_char_or_fail ('[');
7337 po_reg_or_fail (REG_TYPE_R64_SP);
7338 /* Accept optional ", #0". */
7339 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7340 && skip_past_char (&str, ','))
7341 {
7342 skip_past_char (&str, '#');
7343 if (! skip_past_char (&str, '0'))
7344 {
7345 set_fatal_syntax_error
7346 (_("the optional immediate offset can only be 0"));
7347 goto failure;
7348 }
7349 }
7350 po_char_or_fail (']');
7351 break;
7352 }
7353
7354 case AARCH64_OPND_ADDR_REGOFF:
7355 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7356 po_misc_or_fail (parse_address (&str, info));
7357 regoff_addr:
7358 if (info->addr.pcrel || !info->addr.offset.is_reg
7359 || !info->addr.preind || info->addr.postind
7360 || info->addr.writeback)
7361 {
7362 set_syntax_error (_("invalid addressing mode"));
7363 goto failure;
7364 }
7365 if (!info->shifter.operator_present)
7366 {
7367 /* Default to LSL if not present. Libopcodes prefers shifter
7368 kind to be explicit. */
7369 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7370 info->shifter.kind = AARCH64_MOD_LSL;
7371 }
7372 /* Qualifier to be deduced by libopcodes. */
7373 break;
7374
7375 case AARCH64_OPND_ADDR_SIMM7:
7376 po_misc_or_fail (parse_address (&str, info));
7377 if (info->addr.pcrel || info->addr.offset.is_reg
7378 || (!info->addr.preind && !info->addr.postind))
7379 {
7380 set_syntax_error (_("invalid addressing mode"));
7381 goto failure;
7382 }
7383 if (inst.reloc.type != BFD_RELOC_UNUSED)
7384 {
7385 set_syntax_error (_("relocation not allowed"));
7386 goto failure;
7387 }
7388 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7389 /* addr_off_p */ 1,
7390 /* need_libopcodes_p */ 1,
7391 /* skip_p */ 0);
7392 break;
7393
7394 case AARCH64_OPND_ADDR_SIMM9:
7395 case AARCH64_OPND_ADDR_SIMM9_2:
7396 case AARCH64_OPND_ADDR_SIMM11:
7397 case AARCH64_OPND_ADDR_SIMM13:
7398 po_misc_or_fail (parse_address (&str, info));
7399 if (info->addr.pcrel || info->addr.offset.is_reg
7400 || (!info->addr.preind && !info->addr.postind)
7401 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7402 && info->addr.writeback))
7403 {
7404 set_syntax_error (_("invalid addressing mode"));
7405 goto failure;
7406 }
7407 if (inst.reloc.type != BFD_RELOC_UNUSED)
7408 {
7409 set_syntax_error (_("relocation not allowed"));
7410 goto failure;
7411 }
7412 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7413 /* addr_off_p */ 1,
7414 /* need_libopcodes_p */ 1,
7415 /* skip_p */ 0);
7416 break;
7417
7418 case AARCH64_OPND_ADDR_SIMM10:
7419 case AARCH64_OPND_ADDR_OFFSET:
7420 po_misc_or_fail (parse_address (&str, info));
7421 if (info->addr.pcrel || info->addr.offset.is_reg
7422 || !info->addr.preind || info->addr.postind)
7423 {
7424 set_syntax_error (_("invalid addressing mode"));
7425 goto failure;
7426 }
7427 if (inst.reloc.type != BFD_RELOC_UNUSED)
7428 {
7429 set_syntax_error (_("relocation not allowed"));
7430 goto failure;
7431 }
7432 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7433 /* addr_off_p */ 1,
7434 /* need_libopcodes_p */ 1,
7435 /* skip_p */ 0);
7436 break;
7437
7438 case AARCH64_OPND_RCPC3_ADDR_OFFSET:
7439 po_misc_or_fail (parse_address (&str, info));
7440 if (info->addr.pcrel || info->addr.offset.is_reg
7441 || !info->addr.preind || info->addr.postind
7442 || info->addr.writeback)
7443 {
7444 set_syntax_error (_("invalid addressing mode"));
7445 goto failure;
7446 }
7447 if (inst.reloc.type != BFD_RELOC_UNUSED)
7448 {
7449 set_syntax_error (_("relocation not allowed"));
7450 goto failure;
7451 }
7452 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7453 /* addr_off_p */ 1,
7454 /* need_libopcodes_p */ 1,
7455 /* skip_p */ 0);
7456 break;
7457
7458 case AARCH64_OPND_ADDR_UIMM12:
7459 po_misc_or_fail (parse_address (&str, info));
7460 if (info->addr.pcrel || info->addr.offset.is_reg
7461 || !info->addr.preind || info->addr.writeback)
7462 {
7463 set_syntax_error (_("invalid addressing mode"));
7464 goto failure;
7465 }
7466 if (inst.reloc.type == BFD_RELOC_UNUSED)
7467 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7468 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7469 || (inst.reloc.type
7470 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7471 || (inst.reloc.type
7472 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7473 || (inst.reloc.type
7474 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7475 || (inst.reloc.type
7476 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7477 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7478 /* Leave qualifier to be determined by libopcodes. */
7479 break;
7480
7481 case AARCH64_OPND_SIMD_ADDR_POST:
7482 /* [<Xn|SP>], <Xm|#<amount>> */
7483 po_misc_or_fail (parse_address (&str, info));
7484 if (!info->addr.postind || !info->addr.writeback)
7485 {
7486 set_syntax_error (_("invalid addressing mode"));
7487 goto failure;
7488 }
7489 if (!info->addr.offset.is_reg)
7490 {
7491 if (inst.reloc.exp.X_op == O_constant)
7492 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7493 else
7494 {
7495 set_fatal_syntax_error
7496 (_("writeback value must be an immediate constant"));
7497 goto failure;
7498 }
7499 }
7500 /* No qualifier. */
7501 break;
7502
7503 case AARCH64_OPND_SME_SM_ZA:
7504 /* { SM | ZA } */
7505 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7506 {
7507 set_syntax_error (_("unknown or missing PSTATE field name"));
7508 goto failure;
7509 }
7510 info->reg.regno = val;
7511 break;
7512
7513 case AARCH64_OPND_SME_PnT_Wm_imm:
7514 if (!parse_dual_indexed_reg (&str, REG_TYPE_P,
7515 &info->indexed_za, &qualifier, 0))
7516 goto failure;
7517 info->qualifier = qualifier;
7518 break;
7519
7520 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7521 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7522 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7523 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7524 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7525 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7526 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7527 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7528 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7529 case AARCH64_OPND_SVE_ADDR_RI_U6:
7530 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7531 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7532 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7533 /* [X<n>{, #imm, MUL VL}]
7534 [X<n>{, #imm}]
7535 but recognizing SVE registers. */
7536 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7537 &offset_qualifier));
7538 if (base_qualifier != AARCH64_OPND_QLF_X)
7539 {
7540 set_syntax_error (_("invalid addressing mode"));
7541 goto failure;
7542 }
7543 sve_regimm:
7544 if (info->addr.pcrel || info->addr.offset.is_reg
7545 || !info->addr.preind || info->addr.writeback)
7546 {
7547 set_syntax_error (_("invalid addressing mode"));
7548 goto failure;
7549 }
7550 if (inst.reloc.type != BFD_RELOC_UNUSED
7551 || inst.reloc.exp.X_op != O_constant)
7552 {
7553 /* Make sure this has priority over
7554 "invalid addressing mode". */
7555 set_fatal_syntax_error (_("constant offset required"));
7556 goto failure;
7557 }
7558 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7559 break;
7560
7561 case AARCH64_OPND_SVE_ADDR_R:
7562 /* [<Xn|SP>{, <R><m>}]
7563 but recognizing SVE registers. */
7564 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7565 &offset_qualifier));
7566 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7567 {
7568 offset_qualifier = AARCH64_OPND_QLF_X;
7569 info->addr.offset.is_reg = 1;
7570 info->addr.offset.regno = 31;
7571 }
7572 else if (base_qualifier != AARCH64_OPND_QLF_X
7573 || offset_qualifier != AARCH64_OPND_QLF_X)
7574 {
7575 set_syntax_error (_("invalid addressing mode"));
7576 goto failure;
7577 }
7578 goto regoff_addr;
7579
7580 case AARCH64_OPND_SVE_ADDR_RR:
7581 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7582 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7583 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7584 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7585 case AARCH64_OPND_SVE_ADDR_RX:
7586 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7587 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7588 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7589 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7590 but recognizing SVE registers. */
7591 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7592 &offset_qualifier));
7593 if (base_qualifier != AARCH64_OPND_QLF_X
7594 || offset_qualifier != AARCH64_OPND_QLF_X)
7595 {
7596 set_syntax_error (_("invalid addressing mode"));
7597 goto failure;
7598 }
7599 goto regoff_addr;
7600
7601 case AARCH64_OPND_SVE_ADDR_RZ:
7602 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7603 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7604 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7605 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7606 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7607 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7608 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7609 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7610 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7611 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7612 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7613 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7614 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7615 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7616 &offset_qualifier));
7617 if (base_qualifier != AARCH64_OPND_QLF_X
7618 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7619 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7620 {
7621 set_syntax_error (_("invalid addressing mode"));
7622 goto failure;
7623 }
7624 info->qualifier = offset_qualifier;
7625 goto regoff_addr;
7626
7627 case AARCH64_OPND_SVE_ADDR_ZX:
7628 /* [Zn.<T>{, <Xm>}]. */
7629 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7630 &offset_qualifier));
7631 /* Things to check:
7632 base_qualifier either S_S or S_D
7633 offset_qualifier must be X
7634 */
7635 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7636 && base_qualifier != AARCH64_OPND_QLF_S_D)
7637 || offset_qualifier != AARCH64_OPND_QLF_X)
7638 {
7639 set_syntax_error (_("invalid addressing mode"));
7640 goto failure;
7641 }
7642 info->qualifier = base_qualifier;
7643 if (!info->addr.offset.is_reg || info->addr.pcrel
7644 || !info->addr.preind || info->addr.writeback
7645 || info->shifter.operator_present != 0)
7646 {
7647 set_syntax_error (_("invalid addressing mode"));
7648 goto failure;
7649 }
7650 info->shifter.kind = AARCH64_MOD_LSL;
7651 break;
7652
7653
7654 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7655 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7656 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7657 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7658 /* [Z<n>.<T>{, #imm}] */
7659 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7660 &offset_qualifier));
7661 if (base_qualifier != AARCH64_OPND_QLF_S_S
7662 && base_qualifier != AARCH64_OPND_QLF_S_D)
7663 {
7664 set_syntax_error (_("invalid addressing mode"));
7665 goto failure;
7666 }
7667 info->qualifier = base_qualifier;
7668 goto sve_regimm;
7669
7670 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7671 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7672 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7673 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7674 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7675
7676 We don't reject:
7677
7678 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7679
7680 here since we get better error messages by leaving it to
7681 the qualifier checking routines. */
7682 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7683 &offset_qualifier));
7684 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7685 && base_qualifier != AARCH64_OPND_QLF_S_D)
7686 || offset_qualifier != base_qualifier)
7687 {
7688 set_syntax_error (_("invalid addressing mode"));
7689 goto failure;
7690 }
7691 info->qualifier = base_qualifier;
7692 goto regoff_addr;
7693 case AARCH64_OPND_SYSREG:
7694 case AARCH64_OPND_SYSREG128:
7695 {
7696 bool sysreg128_p = operands[i] == AARCH64_OPND_SYSREG128;
7697 uint32_t sysreg_flags;
7698 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7699 &sysreg_flags,
7700 sysreg128_p)) == PARSE_FAIL)
7701 {
7702 set_syntax_error (_("unknown or missing system register name"));
7703 goto failure;
7704 }
7705 inst.base.operands[i].sysreg.value = val;
7706 inst.base.operands[i].sysreg.flags = sysreg_flags;
7707 break;
7708 }
7709
7710 case AARCH64_OPND_PSTATEFIELD:
7711 {
7712 uint32_t sysreg_flags;
7713 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7714 &sysreg_flags, false)) == PARSE_FAIL)
7715 {
7716 set_syntax_error (_("unknown or missing PSTATE field name"));
7717 goto failure;
7718 }
7719 inst.base.operands[i].pstatefield = val;
7720 inst.base.operands[i].sysreg.flags = sysreg_flags;
7721 break;
7722 }
7723
7724 case AARCH64_OPND_SYSREG_IC:
7725 inst.base.operands[i].sysins_op =
7726 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh, false);
7727 goto sys_reg_ins;
7728
7729 case AARCH64_OPND_SYSREG_DC:
7730 inst.base.operands[i].sysins_op =
7731 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh, false);
7732 goto sys_reg_ins;
7733
7734 case AARCH64_OPND_SYSREG_AT:
7735 inst.base.operands[i].sysins_op =
7736 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh, false);
7737 goto sys_reg_ins;
7738
7739 case AARCH64_OPND_SYSREG_SR:
7740 inst.base.operands[i].sysins_op =
7741 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh, false);
7742 goto sys_reg_ins;
7743
7744 case AARCH64_OPND_SYSREG_TLBI:
7745 inst.base.operands[i].sysins_op =
7746 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh, false);
7747 goto sys_reg_ins;
7748
7749 case AARCH64_OPND_SYSREG_TLBIP:
7750 inst.base.operands[i].sysins_op =
7751 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh, true);
7752 sys_reg_ins:
7753 if (inst.base.operands[i].sysins_op == NULL)
7754 {
7755 set_fatal_syntax_error ( _("unknown or missing operation name"));
7756 goto failure;
7757 }
7758 break;
7759
7760 case AARCH64_OPND_BARRIER:
7761 case AARCH64_OPND_BARRIER_ISB:
7762 val = parse_barrier (&str);
7763 if (val != PARSE_FAIL
7764 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7765 {
7766 /* ISB only accepts options name 'sy'. */
7767 set_syntax_error
7768 (_("the specified option is not accepted in ISB"));
7769 /* Turn off backtrack as this optional operand is present. */
7770 backtrack_pos = 0;
7771 goto failure;
7772 }
7773 if (val != PARSE_FAIL
7774 && operands[i] == AARCH64_OPND_BARRIER)
7775 {
7776 /* Regular barriers accept options CRm (C0-C15).
7777 DSB nXS barrier variant accepts values > 15. */
7778 if (val < 0 || val > 15)
7779 {
7780 set_syntax_error (_("the specified option is not accepted in DSB"));
7781 goto failure;
7782 }
7783 }
7784 /* This is an extension to accept a 0..15 immediate. */
7785 if (val == PARSE_FAIL)
7786 po_imm_or_fail (0, 15);
7787 info->barrier = aarch64_barrier_options + val;
7788 break;
7789
7790 case AARCH64_OPND_BARRIER_DSB_NXS:
7791 val = parse_barrier (&str);
7792 if (val != PARSE_FAIL)
7793 {
7794 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7795 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7796 {
7797 set_syntax_error (_("the specified option is not accepted in DSB"));
7798 /* Turn off backtrack as this optional operand is present. */
7799 backtrack_pos = 0;
7800 goto failure;
7801 }
7802 }
7803 else
7804 {
7805 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7806 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7807 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7808 goto failure;
7809 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7810 {
7811 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7812 goto failure;
7813 }
7814 }
7815 /* Option index is encoded as 2-bit value in val<3:2>. */
7816 val = (val >> 2) - 4;
7817 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7818 break;
7819
7820 case AARCH64_OPND_PRFOP:
7821 val = parse_pldop (&str);
7822 /* This is an extension to accept a 0..31 immediate. */
7823 if (val == PARSE_FAIL)
7824 po_imm_or_fail (0, 31);
7825 inst.base.operands[i].prfop = aarch64_prfops + val;
7826 break;
7827
7828 case AARCH64_OPND_RPRFMOP:
7829 po_enum_or_fail (aarch64_rprfmop_array);
7830 info->imm.value = val;
7831 break;
7832
7833 case AARCH64_OPND_BARRIER_PSB:
7834 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7835 goto failure;
7836 break;
7837
7838 case AARCH64_OPND_SME_ZT0:
7839 po_reg_or_fail (REG_TYPE_ZT0);
7840 break;
7841
7842 case AARCH64_OPND_SME_ZT0_INDEX:
7843 reg = aarch64_reg_parse (&str, REG_TYPE_ZT0, &vectype);
7844 if (!reg || vectype.type != NT_invtype)
7845 goto failure;
7846 if (!(vectype.defined & NTA_HASINDEX))
7847 {
7848 set_syntax_error (_("missing register index"));
7849 goto failure;
7850 }
7851 info->imm.value = vectype.index;
7852 break;
7853
7854 case AARCH64_OPND_SME_ZT0_LIST:
7855 if (*str != '{')
7856 {
7857 set_expected_reglist_error (REG_TYPE_ZT0, parse_reg (&str));
7858 goto failure;
7859 }
7860 str++;
7861 if (!parse_typed_reg (&str, REG_TYPE_ZT0, &vectype, PTR_IN_REGLIST))
7862 goto failure;
7863 if (*str != '}')
7864 {
7865 set_syntax_error (_("expected '}' after ZT0"));
7866 goto failure;
7867 }
7868 str++;
7869 break;
7870
7871 case AARCH64_OPND_SME_PNn3_INDEX1:
7872 case AARCH64_OPND_SME_PNn3_INDEX2:
7873 reg = aarch64_reg_parse (&str, REG_TYPE_PN, &vectype);
7874 if (!reg)
7875 goto failure;
7876 if (!(vectype.defined & NTA_HASINDEX))
7877 {
7878 set_syntax_error (_("missing register index"));
7879 goto failure;
7880 }
7881 info->reglane.regno = reg->number;
7882 info->reglane.index = vectype.index;
7883 if (vectype.type == NT_invtype)
7884 info->qualifier = AARCH64_OPND_QLF_NIL;
7885 else
7886 info->qualifier = vectype_to_qualifier (&vectype);
7887 break;
7888
7889 case AARCH64_OPND_BARRIER_GCSB:
7890 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7891 goto failure;
7892 break;
7893
7894 case AARCH64_OPND_BTI_TARGET:
7895 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7896 goto failure;
7897 break;
7898
7899 case AARCH64_OPND_SME_ZAda_2b:
7900 case AARCH64_OPND_SME_ZAda_3b:
7901 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7902 if (!reg)
7903 goto failure;
7904 info->reg.regno = reg->number;
7905 info->qualifier = qualifier;
7906 break;
7907
7908 case AARCH64_OPND_SME_ZA_HV_idx_src:
7909 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
7910 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7911 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
7912 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7913 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7914 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7915 &info->indexed_za,
7916 &qualifier)
7917 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7918 &info->indexed_za, &qualifier, 0))
7919 goto failure;
7920 info->qualifier = qualifier;
7921 break;
7922
7923 case AARCH64_OPND_SME_list_of_64bit_tiles:
7924 val = parse_sme_list_of_64bit_tiles (&str);
7925 if (val == PARSE_FAIL)
7926 goto failure;
7927 info->imm.value = val;
7928 break;
7929
7930 case AARCH64_OPND_SME_ZA_array_off1x4:
7931 case AARCH64_OPND_SME_ZA_array_off2x2:
7932 case AARCH64_OPND_SME_ZA_array_off2x4:
7933 case AARCH64_OPND_SME_ZA_array_off3_0:
7934 case AARCH64_OPND_SME_ZA_array_off3_5:
7935 case AARCH64_OPND_SME_ZA_array_off3x2:
7936 case AARCH64_OPND_SME_ZA_array_off4:
7937 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7938 &info->indexed_za, &qualifier, 0))
7939 goto failure;
7940 info->qualifier = qualifier;
7941 break;
7942
7943 case AARCH64_OPND_SME_ZA_array_vrsb_1:
7944 case AARCH64_OPND_SME_ZA_array_vrsh_1:
7945 case AARCH64_OPND_SME_ZA_array_vrss_1:
7946 case AARCH64_OPND_SME_ZA_array_vrsd_1:
7947 case AARCH64_OPND_SME_ZA_array_vrsb_2:
7948 case AARCH64_OPND_SME_ZA_array_vrsh_2:
7949 case AARCH64_OPND_SME_ZA_array_vrss_2:
7950 case AARCH64_OPND_SME_ZA_array_vrsd_2:
7951 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7952 &info->indexed_za, &qualifier, 0))
7953 goto failure;
7954 info->qualifier = qualifier;
7955 break;
7956
7957
7958 case AARCH64_OPND_SME_VLxN_10:
7959 case AARCH64_OPND_SME_VLxN_13:
7960 po_strict_enum_or_fail (aarch64_sme_vlxn_array);
7961 info->imm.value = val;
7962 break;
7963
7964 case AARCH64_OPND_MOPS_ADDR_Rd:
7965 case AARCH64_OPND_MOPS_ADDR_Rs:
7966 po_char_or_fail ('[');
7967 if (!parse_x0_to_x30 (&str, info))
7968 goto failure;
7969 po_char_or_fail (']');
7970 po_char_or_fail ('!');
7971 break;
7972
7973 case AARCH64_OPND_MOPS_WB_Rn:
7974 if (!parse_x0_to_x30 (&str, info))
7975 goto failure;
7976 po_char_or_fail ('!');
7977 break;
7978
7979 case AARCH64_OPND_LSE128_Rt:
7980 case AARCH64_OPND_LSE128_Rt2:
7981 po_int_fp_reg_or_fail (REG_TYPE_R_64);
7982 break;
7983
7984 default:
7985 as_fatal (_("unhandled operand code %d"), operands[i]);
7986 }
7987
7988 /* If we get here, this operand was successfully parsed. */
7989 inst.base.operands[i].present = 1;
7990
7991 /* As instructions can have multiple optional operands, it is imporant to
7992 reset the backtrack_pos variable once we finish processing an operand
7993 successfully. */
7994 backtrack_pos = 0;
7995
7996 continue;
7997
7998 failure:
7999 /* The parse routine should already have set the error, but in case
8000 not, set a default one here. */
8001 if (! error_p ())
8002 set_default_error ();
8003
8004 if (! backtrack_pos)
8005 goto parse_operands_return;
8006
8007 {
8008 /* We reach here because this operand is marked as optional, and
8009 either no operand was supplied or the operand was supplied but it
8010 was syntactically incorrect. In the latter case we report an
8011 error. In the former case we perform a few more checks before
8012 dropping through to the code to insert the default operand. */
8013
8014 char *tmp = backtrack_pos;
8015 char endchar = END_OF_INSN;
8016
8017 skip_past_char (&tmp, ',');
8018
8019 if (*tmp != endchar)
8020 /* The user has supplied an operand in the wrong format. */
8021 goto parse_operands_return;
8022
8023 /* Make sure there is not a comma before the optional operand.
8024 For example the fifth operand of 'sys' is optional:
8025
8026 sys #0,c0,c0,#0, <--- wrong
8027 sys #0,c0,c0,#0 <--- correct. */
8028 if (comma_skipped_p && i && endchar == END_OF_INSN)
8029 {
8030 set_fatal_syntax_error
8031 (_("unexpected comma before the omitted optional operand"));
8032 goto parse_operands_return;
8033 }
8034 }
8035
8036 /* Reaching here means we are dealing with an optional operand that is
8037 omitted from the assembly line. */
8038 gas_assert (optional_operand_p (opcode, i));
8039 info->present = 0;
8040 process_omitted_operand (operands[i], opcode, i, info);
8041
8042 /* Try again, skipping the optional operand at backtrack_pos. */
8043 str = backtrack_pos;
8044 backtrack_pos = 0;
8045
8046 /* Clear any error record after the omitted optional operand has been
8047 successfully handled. */
8048 clear_error ();
8049 }
8050
8051 /* Check if we have parsed all the operands. */
8052 if (*str != '\0' && ! error_p ())
8053 {
8054 /* Set I to the index of the last present operand; this is
8055 for the purpose of diagnostics. */
8056 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
8057 ;
8058 set_fatal_syntax_error
8059 (_("unexpected characters following instruction"));
8060 }
8061
8062 parse_operands_return:
8063
8064 if (error_p ())
8065 {
8066 inst.parsing_error.index = i;
8067 DEBUG_TRACE ("parsing FAIL: %s - %s",
8068 operand_mismatch_kind_names[inst.parsing_error.kind],
8069 inst.parsing_error.error);
8070 /* Record the operand error properly; this is useful when there
8071 are multiple instruction templates for a mnemonic name, so that
8072 later on, we can select the error that most closely describes
8073 the problem. */
8074 record_operand_error_info (opcode, &inst.parsing_error);
8075 return false;
8076 }
8077 else
8078 {
8079 DEBUG_TRACE ("parsing SUCCESS");
8080 return true;
8081 }
8082 }
8083
8084 /* It does some fix-up to provide some programmer friendly feature while
8085 keeping the libopcodes happy, i.e. libopcodes only accepts
8086 the preferred architectural syntax.
8087 Return FALSE if there is any failure; otherwise return TRUE. */
8088
8089 static bool
programmer_friendly_fixup(aarch64_instruction * instr)8090 programmer_friendly_fixup (aarch64_instruction *instr)
8091 {
8092 aarch64_inst *base = &instr->base;
8093 const aarch64_opcode *opcode = base->opcode;
8094 enum aarch64_op op = opcode->op;
8095 aarch64_opnd_info *operands = base->operands;
8096
8097 DEBUG_TRACE ("enter");
8098
8099 switch (opcode->iclass)
8100 {
8101 case testbranch:
8102 /* TBNZ Xn|Wn, #uimm6, label
8103 Test and Branch Not Zero: conditionally jumps to label if bit number
8104 uimm6 in register Xn is not zero. The bit number implies the width of
8105 the register, which may be written and should be disassembled as Wn if
8106 uimm is less than 32. */
8107 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
8108 {
8109 if (operands[1].imm.value >= 32)
8110 {
8111 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
8112 0, 31);
8113 return false;
8114 }
8115 operands[0].qualifier = AARCH64_OPND_QLF_X;
8116 }
8117 break;
8118 case loadlit:
8119 /* LDR Wt, label | =value
8120 As a convenience assemblers will typically permit the notation
8121 "=value" in conjunction with the pc-relative literal load instructions
8122 to automatically place an immediate value or symbolic address in a
8123 nearby literal pool and generate a hidden label which references it.
8124 ISREG has been set to 0 in the case of =value. */
8125 if (instr->gen_lit_pool
8126 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
8127 {
8128 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
8129 if (op == OP_LDRSW_LIT)
8130 size = 4;
8131 if (instr->reloc.exp.X_op != O_constant
8132 && instr->reloc.exp.X_op != O_big
8133 && instr->reloc.exp.X_op != O_symbol)
8134 {
8135 record_operand_error (opcode, 1,
8136 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
8137 _("constant expression expected"));
8138 return false;
8139 }
8140 if (! add_to_lit_pool (&instr->reloc.exp, size))
8141 {
8142 record_operand_error (opcode, 1,
8143 AARCH64_OPDE_OTHER_ERROR,
8144 _("literal pool insertion failed"));
8145 return false;
8146 }
8147 }
8148 break;
8149 case log_shift:
8150 case bitfield:
8151 /* UXT[BHW] Wd, Wn
8152 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
8153 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
8154 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
8155 A programmer-friendly assembler should accept a destination Xd in
8156 place of Wd, however that is not the preferred form for disassembly.
8157 */
8158 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
8159 && operands[1].qualifier == AARCH64_OPND_QLF_W
8160 && operands[0].qualifier == AARCH64_OPND_QLF_X)
8161 operands[0].qualifier = AARCH64_OPND_QLF_W;
8162 break;
8163
8164 case addsub_ext:
8165 {
8166 /* In the 64-bit form, the final register operand is written as Wm
8167 for all but the (possibly omitted) UXTX/LSL and SXTX
8168 operators.
8169 As a programmer-friendly assembler, we accept e.g.
8170 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
8171 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
8172 int idx = aarch64_operand_index (opcode->operands,
8173 AARCH64_OPND_Rm_EXT);
8174 gas_assert (idx == 1 || idx == 2);
8175 if (operands[0].qualifier == AARCH64_OPND_QLF_X
8176 && operands[idx].qualifier == AARCH64_OPND_QLF_X
8177 && operands[idx].shifter.kind != AARCH64_MOD_LSL
8178 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
8179 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
8180 operands[idx].qualifier = AARCH64_OPND_QLF_W;
8181 }
8182 break;
8183
8184 default:
8185 break;
8186 }
8187
8188 DEBUG_TRACE ("exit with SUCCESS");
8189 return true;
8190 }
8191
8192 /* Check for loads and stores that will cause unpredictable behavior. */
8193
8194 static void
warn_unpredictable_ldst(aarch64_instruction * instr,char * str)8195 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
8196 {
8197 aarch64_inst *base = &instr->base;
8198 const aarch64_opcode *opcode = base->opcode;
8199 const aarch64_opnd_info *opnds = base->operands;
8200 switch (opcode->iclass)
8201 {
8202 case ldst_pos:
8203 case ldst_imm9:
8204 case ldst_imm10:
8205 case ldst_unscaled:
8206 case ldst_unpriv:
8207 /* Loading/storing the base register is unpredictable if writeback. */
8208 if ((aarch64_get_operand_class (opnds[0].type)
8209 == AARCH64_OPND_CLASS_INT_REG)
8210 && opnds[0].reg.regno == opnds[1].addr.base_regno
8211 && opnds[1].addr.base_regno != REG_SP
8212 /* Exempt STG/STZG/ST2G/STZ2G. */
8213 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
8214 && opnds[1].addr.writeback)
8215 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8216 break;
8217
8218 case ldstpair_off:
8219 case ldstnapair_offs:
8220 case ldstpair_indexed:
8221 /* Loading/storing the base register is unpredictable if writeback. */
8222 if ((aarch64_get_operand_class (opnds[0].type)
8223 == AARCH64_OPND_CLASS_INT_REG)
8224 && (opnds[0].reg.regno == opnds[2].addr.base_regno
8225 || opnds[1].reg.regno == opnds[2].addr.base_regno)
8226 && opnds[2].addr.base_regno != REG_SP
8227 /* Exempt STGP. */
8228 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
8229 && opnds[2].addr.writeback)
8230 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8231 /* Load operations must load different registers. */
8232 if ((opcode->opcode & (1 << 22))
8233 && opnds[0].reg.regno == opnds[1].reg.regno)
8234 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8235 break;
8236
8237 case ldstexcl:
8238 if ((aarch64_get_operand_class (opnds[0].type)
8239 == AARCH64_OPND_CLASS_INT_REG)
8240 && (aarch64_get_operand_class (opnds[1].type)
8241 == AARCH64_OPND_CLASS_INT_REG))
8242 {
8243 if ((opcode->opcode & (1 << 22)))
8244 {
8245 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
8246 if ((opcode->opcode & (1 << 21))
8247 && opnds[0].reg.regno == opnds[1].reg.regno)
8248 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8249 }
8250 else
8251 {
8252 /* Store-Exclusive is unpredictable if Rt == Rs. */
8253 if (opnds[0].reg.regno == opnds[1].reg.regno)
8254 as_warn
8255 (_("unpredictable: identical transfer and status registers"
8256 " --`%s'"),str);
8257
8258 if (opnds[0].reg.regno == opnds[2].reg.regno)
8259 {
8260 if (!(opcode->opcode & (1 << 21)))
8261 /* Store-Exclusive is unpredictable if Rn == Rs. */
8262 as_warn
8263 (_("unpredictable: identical base and status registers"
8264 " --`%s'"),str);
8265 else
8266 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
8267 as_warn
8268 (_("unpredictable: "
8269 "identical transfer and status registers"
8270 " --`%s'"),str);
8271 }
8272
8273 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
8274 if ((opcode->opcode & (1 << 21))
8275 && opnds[0].reg.regno == opnds[3].reg.regno
8276 && opnds[3].reg.regno != REG_SP)
8277 as_warn (_("unpredictable: identical base and status registers"
8278 " --`%s'"),str);
8279 }
8280 }
8281 break;
8282
8283 default:
8284 break;
8285 }
8286 }
8287
8288 static void
force_automatic_sequence_close(void)8289 force_automatic_sequence_close (void)
8290 {
8291 struct aarch64_segment_info_type *tc_seg_info;
8292
8293 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8294 if (tc_seg_info->insn_sequence.instr)
8295 {
8296 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
8297 _("previous `%s' sequence has not been closed"),
8298 tc_seg_info->insn_sequence.instr->opcode->name);
8299 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
8300 }
8301 }
8302
8303 /* A wrapper function to interface with libopcodes on encoding and
8304 record the error message if there is any.
8305
8306 Return TRUE on success; otherwise return FALSE. */
8307
8308 static bool
do_encode(const aarch64_opcode * opcode,aarch64_inst * instr,aarch64_insn * code)8309 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
8310 aarch64_insn *code)
8311 {
8312 aarch64_operand_error error_info;
8313 memset (&error_info, '\0', sizeof (error_info));
8314 error_info.kind = AARCH64_OPDE_NIL;
8315 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
8316 && !error_info.non_fatal)
8317 return true;
8318
8319 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
8320 record_operand_error_info (opcode, &error_info);
8321 return error_info.non_fatal;
8322 }
8323
8324 #ifdef DEBUG_AARCH64
8325 static inline void
dump_opcode_operands(const aarch64_opcode * opcode)8326 dump_opcode_operands (const aarch64_opcode *opcode)
8327 {
8328 int i = 0;
8329 while (opcode->operands[i] != AARCH64_OPND_NIL)
8330 {
8331 aarch64_verbose ("\t\t opnd%d: %s", i,
8332 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
8333 ? aarch64_get_operand_name (opcode->operands[i])
8334 : aarch64_get_operand_desc (opcode->operands[i]));
8335 ++i;
8336 }
8337 }
8338 #endif /* DEBUG_AARCH64 */
8339
8340 /* This is the guts of the machine-dependent assembler. STR points to a
8341 machine dependent instruction. This function is supposed to emit
8342 the frags/bytes it assembles to. */
8343
8344 void
md_assemble(char * str)8345 md_assemble (char *str)
8346 {
8347 templates *template;
8348 const aarch64_opcode *opcode;
8349 struct aarch64_segment_info_type *tc_seg_info;
8350 aarch64_inst *inst_base;
8351 unsigned saved_cond;
8352
8353 /* Align the previous label if needed. */
8354 if (last_label_seen != NULL)
8355 {
8356 symbol_set_frag (last_label_seen, frag_now);
8357 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8358 S_SET_SEGMENT (last_label_seen, now_seg);
8359 }
8360
8361 /* Update the current insn_sequence from the segment. */
8362 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8363 insn_sequence = &tc_seg_info->insn_sequence;
8364 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8365
8366 inst.reloc.type = BFD_RELOC_UNUSED;
8367
8368 DEBUG_TRACE ("\n\n");
8369 DEBUG_TRACE ("==============================");
8370 DEBUG_TRACE ("Enter md_assemble with %s", str);
8371
8372 /* Scan up to the end of the mnemonic, which must end in whitespace,
8373 '.', or end of string. */
8374 char *p = str;
8375 char *dot = 0;
8376 for (; is_part_of_name (*p); p++)
8377 if (*p == '.' && !dot)
8378 dot = p;
8379
8380 if (p == str)
8381 {
8382 as_bad (_("unknown mnemonic -- `%s'"), str);
8383 return;
8384 }
8385
8386 if (!dot && create_register_alias (str, p))
8387 return;
8388
8389 template = opcode_lookup (str, dot, p);
8390 if (!template)
8391 {
8392 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8393 str);
8394 return;
8395 }
8396
8397 skip_whitespace (p);
8398 if (*p == ',')
8399 {
8400 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8401 get_mnemonic_name (str), str);
8402 return;
8403 }
8404
8405 init_operand_error_report ();
8406
8407 /* Sections are assumed to start aligned. In executable section, there is no
8408 MAP_DATA symbol pending. So we only align the address during
8409 MAP_DATA --> MAP_INSN transition.
8410 For other sections, this is not guaranteed. */
8411 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8412 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8413 frag_align_code (2, 0);
8414
8415 saved_cond = inst.cond;
8416 reset_aarch64_instruction (&inst);
8417 inst.cond = saved_cond;
8418
8419 /* Iterate through all opcode entries with the same mnemonic name. */
8420 do
8421 {
8422 opcode = template->opcode;
8423
8424 DEBUG_TRACE ("opcode %s found", opcode->name);
8425 #ifdef DEBUG_AARCH64
8426 if (debug_dump)
8427 dump_opcode_operands (opcode);
8428 #endif /* DEBUG_AARCH64 */
8429
8430 mapping_state (MAP_INSN);
8431
8432 inst_base = &inst.base;
8433 inst_base->opcode = opcode;
8434
8435 /* Truly conditionally executed instructions, e.g. b.cond. */
8436 if (opcode->flags & F_COND)
8437 {
8438 gas_assert (inst.cond != COND_ALWAYS);
8439 inst_base->cond = get_cond_from_value (inst.cond);
8440 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8441 }
8442 else if (inst.cond != COND_ALWAYS)
8443 {
8444 /* It shouldn't arrive here, where the assembly looks like a
8445 conditional instruction but the found opcode is unconditional. */
8446 gas_assert (0);
8447 continue;
8448 }
8449
8450 if (parse_operands (p, opcode)
8451 && programmer_friendly_fixup (&inst)
8452 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8453 {
8454 /* Check that this instruction is supported for this CPU. */
8455 if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
8456 {
8457 as_bad (_("selected processor does not support `%s'"), str);
8458 return;
8459 }
8460
8461 warn_unpredictable_ldst (&inst, str);
8462
8463 if (inst.reloc.type == BFD_RELOC_UNUSED
8464 || !inst.reloc.need_libopcodes_p)
8465 output_inst (NULL);
8466 else
8467 {
8468 /* If there is relocation generated for the instruction,
8469 store the instruction information for the future fix-up. */
8470 struct aarch64_inst *copy;
8471 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8472 copy = XNEW (struct aarch64_inst);
8473 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8474 output_inst (copy);
8475 }
8476
8477 /* Issue non-fatal messages if any. */
8478 output_operand_error_report (str, true);
8479 return;
8480 }
8481
8482 template = template->next;
8483 if (template != NULL)
8484 {
8485 reset_aarch64_instruction (&inst);
8486 inst.cond = saved_cond;
8487 }
8488 }
8489 while (template != NULL);
8490
8491 /* Issue the error messages if any. */
8492 output_operand_error_report (str, false);
8493 }
8494
8495 /* Various frobbings of labels and their addresses. */
8496
8497 void
aarch64_start_line_hook(void)8498 aarch64_start_line_hook (void)
8499 {
8500 last_label_seen = NULL;
8501 }
8502
8503 void
aarch64_frob_label(symbolS * sym)8504 aarch64_frob_label (symbolS * sym)
8505 {
8506 last_label_seen = sym;
8507
8508 dwarf2_emit_label (sym);
8509 }
8510
8511 void
aarch64_frob_section(asection * sec ATTRIBUTE_UNUSED)8512 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8513 {
8514 /* Check to see if we have a block to close. */
8515 force_automatic_sequence_close ();
8516 }
8517
8518 int
aarch64_data_in_code(void)8519 aarch64_data_in_code (void)
8520 {
8521 if (startswith (input_line_pointer + 1, "data:"))
8522 {
8523 *input_line_pointer = '/';
8524 input_line_pointer += 5;
8525 *input_line_pointer = 0;
8526 return 1;
8527 }
8528
8529 return 0;
8530 }
8531
8532 char *
aarch64_canonicalize_symbol_name(char * name)8533 aarch64_canonicalize_symbol_name (char *name)
8534 {
8535 int len;
8536
8537 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8538 *(name + len - 5) = 0;
8539
8540 return name;
8541 }
8542
8543 /* Table of all register names defined by default. The user can
8544 define additional names with .req. Note that all register names
8545 should appear in both upper and lowercase variants. Some registers
8546 also have mixed-case names. */
8547
8548 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8549 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8550 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8551 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8552 #define REGSET16(p,t) \
8553 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8554 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8555 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8556 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8557 #define REGSET16S(p,s,t) \
8558 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8559 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8560 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8561 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8562 #define REGSET31(p,t) \
8563 REGSET16(p, t), \
8564 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8565 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8566 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8567 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8568 #define REGSET(p,t) \
8569 REGSET31(p,t), REGNUM(p,31,t)
8570
8571 /* These go into aarch64_reg_hsh hash-table. */
8572 static const reg_entry reg_names[] = {
8573 /* Integer registers. */
8574 REGSET31 (x, R_64), REGSET31 (X, R_64),
8575 REGSET31 (w, R_32), REGSET31 (W, R_32),
8576
8577 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8578 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8579 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8580 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8581 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8582 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8583
8584 REGDEF (wzr, 31, ZR_32), REGDEF (WZR, 31, ZR_32),
8585 REGDEF (xzr, 31, ZR_64), REGDEF (XZR, 31, ZR_64),
8586
8587 /* Floating-point single precision registers. */
8588 REGSET (s, FP_S), REGSET (S, FP_S),
8589
8590 /* Floating-point double precision registers. */
8591 REGSET (d, FP_D), REGSET (D, FP_D),
8592
8593 /* Floating-point half precision registers. */
8594 REGSET (h, FP_H), REGSET (H, FP_H),
8595
8596 /* Floating-point byte precision registers. */
8597 REGSET (b, FP_B), REGSET (B, FP_B),
8598
8599 /* Floating-point quad precision registers. */
8600 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8601
8602 /* FP/SIMD registers. */
8603 REGSET (v, V), REGSET (V, V),
8604
8605 /* SVE vector registers. */
8606 REGSET (z, Z), REGSET (Z, Z),
8607
8608 /* SVE predicate(-as-mask) registers. */
8609 REGSET16 (p, P), REGSET16 (P, P),
8610
8611 /* SVE predicate-as-counter registers. */
8612 REGSET16 (pn, PN), REGSET16 (PN, PN),
8613
8614 /* SME ZA. We model this as a register because it acts syntactically
8615 like ZA0H, supporting qualifier suffixes and indexing. */
8616 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8617
8618 /* SME ZA tile registers. */
8619 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8620
8621 /* SME ZA tile registers (horizontal slice). */
8622 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8623
8624 /* SME ZA tile registers (vertical slice). */
8625 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV),
8626
8627 /* SME2 ZT0. */
8628 REGDEF (zt0, 0, ZT0), REGDEF (ZT0, 0, ZT0)
8629 };
8630
8631 #undef REGDEF
8632 #undef REGDEF_ALIAS
8633 #undef REGNUM
8634 #undef REGSET16
8635 #undef REGSET31
8636 #undef REGSET
8637
8638 #define N 1
8639 #define n 0
8640 #define Z 1
8641 #define z 0
8642 #define C 1
8643 #define c 0
8644 #define V 1
8645 #define v 0
8646 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8647 static const asm_nzcv nzcv_names[] = {
8648 {"nzcv", B (n, z, c, v)},
8649 {"nzcV", B (n, z, c, V)},
8650 {"nzCv", B (n, z, C, v)},
8651 {"nzCV", B (n, z, C, V)},
8652 {"nZcv", B (n, Z, c, v)},
8653 {"nZcV", B (n, Z, c, V)},
8654 {"nZCv", B (n, Z, C, v)},
8655 {"nZCV", B (n, Z, C, V)},
8656 {"Nzcv", B (N, z, c, v)},
8657 {"NzcV", B (N, z, c, V)},
8658 {"NzCv", B (N, z, C, v)},
8659 {"NzCV", B (N, z, C, V)},
8660 {"NZcv", B (N, Z, c, v)},
8661 {"NZcV", B (N, Z, c, V)},
8662 {"NZCv", B (N, Z, C, v)},
8663 {"NZCV", B (N, Z, C, V)}
8664 };
8665
8666 #undef N
8667 #undef n
8668 #undef Z
8669 #undef z
8670 #undef C
8671 #undef c
8672 #undef V
8673 #undef v
8674 #undef B
8675
8676 /* MD interface: bits in the object file. */
8677
8678 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8679 for use in the a.out file, and stores them in the array pointed to by buf.
8680 This knows about the endian-ness of the target machine and does
8681 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8682 2 (short) and 4 (long) Floating numbers are put out as a series of
8683 LITTLENUMS (shorts, here at least). */
8684
8685 void
md_number_to_chars(char * buf,valueT val,int n)8686 md_number_to_chars (char *buf, valueT val, int n)
8687 {
8688 if (target_big_endian)
8689 number_to_chars_bigendian (buf, val, n);
8690 else
8691 number_to_chars_littleendian (buf, val, n);
8692 }
8693
8694 /* MD interface: Sections. */
8695
8696 /* Estimate the size of a frag before relaxing. Assume everything fits in
8697 4 bytes. */
8698
8699 int
md_estimate_size_before_relax(fragS * fragp,segT segtype ATTRIBUTE_UNUSED)8700 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8701 {
8702 fragp->fr_var = 4;
8703 return 4;
8704 }
8705
8706 /* Round up a section size to the appropriate boundary. */
8707
8708 valueT
md_section_align(segT segment ATTRIBUTE_UNUSED,valueT size)8709 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8710 {
8711 return size;
8712 }
8713
8714 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8715 of an rs_align_code fragment.
8716
8717 Here we fill the frag with the appropriate info for padding the
8718 output stream. The resulting frag will consist of a fixed (fr_fix)
8719 and of a repeating (fr_var) part.
8720
8721 The fixed content is always emitted before the repeating content and
8722 these two parts are used as follows in constructing the output:
8723 - the fixed part will be used to align to a valid instruction word
8724 boundary, in case that we start at a misaligned address; as no
8725 executable instruction can live at the misaligned location, we
8726 simply fill with zeros;
8727 - the variable part will be used to cover the remaining padding and
8728 we fill using the AArch64 NOP instruction.
8729
8730 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8731 enough storage space for up to 3 bytes for padding the back to a valid
8732 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8733
8734 void
aarch64_handle_align(fragS * fragP)8735 aarch64_handle_align (fragS * fragP)
8736 {
8737 /* NOP = d503201f */
8738 /* AArch64 instructions are always little-endian. */
8739 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8740
8741 int bytes, fix, noop_size;
8742 char *p;
8743
8744 if (fragP->fr_type != rs_align_code)
8745 return;
8746
8747 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8748 p = fragP->fr_literal + fragP->fr_fix;
8749
8750 #ifdef OBJ_ELF
8751 gas_assert (fragP->tc_frag_data.recorded);
8752 #endif
8753
8754 noop_size = sizeof (aarch64_noop);
8755
8756 fix = bytes & (noop_size - 1);
8757 if (fix)
8758 {
8759 #if defined OBJ_ELF || defined OBJ_COFF
8760 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8761 #endif
8762 memset (p, 0, fix);
8763 p += fix;
8764 fragP->fr_fix += fix;
8765 }
8766
8767 if (noop_size)
8768 memcpy (p, aarch64_noop, noop_size);
8769 fragP->fr_var = noop_size;
8770 }
8771
8772 /* Perform target specific initialisation of a frag.
8773 Note - despite the name this initialisation is not done when the frag
8774 is created, but only when its type is assigned. A frag can be created
8775 and used a long time before its type is set, so beware of assuming that
8776 this initialisation is performed first. */
8777
8778 #ifndef OBJ_ELF
8779 void
aarch64_init_frag(fragS * fragP ATTRIBUTE_UNUSED,int max_chars ATTRIBUTE_UNUSED)8780 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8781 int max_chars ATTRIBUTE_UNUSED)
8782 {
8783 }
8784
8785 #else /* OBJ_ELF is defined. */
8786 void
aarch64_init_frag(fragS * fragP,int max_chars)8787 aarch64_init_frag (fragS * fragP, int max_chars)
8788 {
8789 /* Record a mapping symbol for alignment frags. We will delete this
8790 later if the alignment ends up empty. */
8791 if (!fragP->tc_frag_data.recorded)
8792 fragP->tc_frag_data.recorded = 1;
8793
8794 /* PR 21809: Do not set a mapping state for debug sections
8795 - it just confuses other tools. */
8796 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8797 return;
8798
8799 switch (fragP->fr_type)
8800 {
8801 case rs_align_test:
8802 case rs_fill:
8803 mapping_state_2 (MAP_DATA, max_chars);
8804 break;
8805 case rs_align:
8806 /* PR 20364: We can get alignment frags in code sections,
8807 so do not just assume that we should use the MAP_DATA state. */
8808 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8809 break;
8810 case rs_align_code:
8811 mapping_state_2 (MAP_INSN, max_chars);
8812 break;
8813 default:
8814 break;
8815 }
8816 }
8817
8818 /* Whether SFrame stack trace info is supported. */
8819
8820 bool
aarch64_support_sframe_p(void)8821 aarch64_support_sframe_p (void)
8822 {
8823 /* At this time, SFrame is supported for aarch64 only. */
8824 return (aarch64_abi == AARCH64_ABI_LP64);
8825 }
8826
8827 /* Specify if RA tracking is needed. */
8828
8829 bool
aarch64_sframe_ra_tracking_p(void)8830 aarch64_sframe_ra_tracking_p (void)
8831 {
8832 return true;
8833 }
8834
8835 /* Specify the fixed offset to recover RA from CFA.
8836 (useful only when RA tracking is not needed). */
8837
8838 offsetT
aarch64_sframe_cfa_ra_offset(void)8839 aarch64_sframe_cfa_ra_offset (void)
8840 {
8841 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8842 }
8843
8844 /* Get the abi/arch indentifier for SFrame. */
8845
8846 unsigned char
aarch64_sframe_get_abi_arch(void)8847 aarch64_sframe_get_abi_arch (void)
8848 {
8849 unsigned char sframe_abi_arch = 0;
8850
8851 if (aarch64_support_sframe_p ())
8852 {
8853 sframe_abi_arch = target_big_endian
8854 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8855 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8856 }
8857
8858 return sframe_abi_arch;
8859 }
8860
8861 #endif /* OBJ_ELF */
8862
8863 /* Initialize the DWARF-2 unwind information for this procedure. */
8864
8865 void
tc_aarch64_frame_initial_instructions(void)8866 tc_aarch64_frame_initial_instructions (void)
8867 {
8868 cfi_add_CFA_def_cfa (REG_SP, 0);
8869 }
8870
8871 /* Convert REGNAME to a DWARF-2 register number. */
8872
8873 int
tc_aarch64_regname_to_dw2regnum(char * regname)8874 tc_aarch64_regname_to_dw2regnum (char *regname)
8875 {
8876 const reg_entry *reg = parse_reg (®name);
8877 if (reg == NULL)
8878 return -1;
8879
8880 switch (reg->type)
8881 {
8882 case REG_TYPE_SP_32:
8883 case REG_TYPE_SP_64:
8884 case REG_TYPE_R_32:
8885 case REG_TYPE_R_64:
8886 return reg->number;
8887
8888 case REG_TYPE_FP_B:
8889 case REG_TYPE_FP_H:
8890 case REG_TYPE_FP_S:
8891 case REG_TYPE_FP_D:
8892 case REG_TYPE_FP_Q:
8893 return reg->number + 64;
8894
8895 default:
8896 break;
8897 }
8898 return -1;
8899 }
8900
8901 /* Implement DWARF2_ADDR_SIZE. */
8902
8903 int
aarch64_dwarf2_addr_size(void)8904 aarch64_dwarf2_addr_size (void)
8905 {
8906 if (ilp32_p)
8907 return 4;
8908 else if (llp64_p)
8909 return 8;
8910 return bfd_arch_bits_per_address (stdoutput) / 8;
8911 }
8912
8913 /* MD interface: Symbol and relocation handling. */
8914
8915 /* Return the address within the segment that a PC-relative fixup is
8916 relative to. For AArch64 PC-relative fixups applied to instructions
8917 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8918
8919 long
md_pcrel_from_section(fixS * fixP,segT seg)8920 md_pcrel_from_section (fixS * fixP, segT seg)
8921 {
8922 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8923
8924 /* If this is pc-relative and we are going to emit a relocation
8925 then we just want to put out any pipeline compensation that the linker
8926 will need. Otherwise we want to use the calculated base. */
8927 if (fixP->fx_pcrel
8928 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8929 || aarch64_force_relocation (fixP)))
8930 base = 0;
8931
8932 /* AArch64 should be consistent for all pc-relative relocations. */
8933 return base + AARCH64_PCREL_OFFSET;
8934 }
8935
8936 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8937 Otherwise we have no need to default values of symbols. */
8938
8939 symbolS *
md_undefined_symbol(char * name ATTRIBUTE_UNUSED)8940 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8941 {
8942 #ifdef OBJ_ELF
8943 if (name[0] == '_' && name[1] == 'G'
8944 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8945 {
8946 if (!GOT_symbol)
8947 {
8948 if (symbol_find (name))
8949 as_bad (_("GOT already in the symbol table"));
8950
8951 GOT_symbol = symbol_new (name, undefined_section,
8952 &zero_address_frag, 0);
8953 }
8954
8955 return GOT_symbol;
8956 }
8957 #endif
8958
8959 return 0;
8960 }
8961
8962 /* Return non-zero if the indicated VALUE has overflowed the maximum
8963 range expressible by a unsigned number with the indicated number of
8964 BITS. */
8965
8966 static bool
unsigned_overflow(valueT value,unsigned bits)8967 unsigned_overflow (valueT value, unsigned bits)
8968 {
8969 valueT lim;
8970 if (bits >= sizeof (valueT) * 8)
8971 return false;
8972 lim = (valueT) 1 << bits;
8973 return (value >= lim);
8974 }
8975
8976
8977 /* Return non-zero if the indicated VALUE has overflowed the maximum
8978 range expressible by an signed number with the indicated number of
8979 BITS. */
8980
8981 static bool
signed_overflow(offsetT value,unsigned bits)8982 signed_overflow (offsetT value, unsigned bits)
8983 {
8984 offsetT lim;
8985 if (bits >= sizeof (offsetT) * 8)
8986 return false;
8987 lim = (offsetT) 1 << (bits - 1);
8988 return (value < -lim || value >= lim);
8989 }
8990
8991 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8992 unsigned immediate offset load/store instruction, try to encode it as
8993 an unscaled, 9-bit, signed immediate offset load/store instruction.
8994 Return TRUE if it is successful; otherwise return FALSE.
8995
8996 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8997 in response to the standard LDR/STR mnemonics when the immediate offset is
8998 unambiguous, i.e. when it is negative or unaligned. */
8999
9000 static bool
try_to_encode_as_unscaled_ldst(aarch64_inst * instr)9001 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
9002 {
9003 int idx;
9004 enum aarch64_op new_op;
9005 const aarch64_opcode *new_opcode;
9006
9007 gas_assert (instr->opcode->iclass == ldst_pos);
9008
9009 switch (instr->opcode->op)
9010 {
9011 case OP_LDRB_POS:new_op = OP_LDURB; break;
9012 case OP_STRB_POS: new_op = OP_STURB; break;
9013 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
9014 case OP_LDRH_POS: new_op = OP_LDURH; break;
9015 case OP_STRH_POS: new_op = OP_STURH; break;
9016 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
9017 case OP_LDR_POS: new_op = OP_LDUR; break;
9018 case OP_STR_POS: new_op = OP_STUR; break;
9019 case OP_LDRF_POS: new_op = OP_LDURV; break;
9020 case OP_STRF_POS: new_op = OP_STURV; break;
9021 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
9022 case OP_PRFM_POS: new_op = OP_PRFUM; break;
9023 default: new_op = OP_NIL; break;
9024 }
9025
9026 if (new_op == OP_NIL)
9027 return false;
9028
9029 new_opcode = aarch64_get_opcode (new_op);
9030 gas_assert (new_opcode != NULL);
9031
9032 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
9033 instr->opcode->op, new_opcode->op);
9034
9035 aarch64_replace_opcode (instr, new_opcode);
9036
9037 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
9038 qualifier matching may fail because the out-of-date qualifier will
9039 prevent the operand being updated with a new and correct qualifier. */
9040 idx = aarch64_operand_index (instr->opcode->operands,
9041 AARCH64_OPND_ADDR_SIMM9);
9042 gas_assert (idx == 1);
9043 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
9044
9045 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
9046
9047 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
9048 insn_sequence))
9049 return false;
9050
9051 return true;
9052 }
9053
9054 /* Called by fix_insn to fix a MOV immediate alias instruction.
9055
9056 Operand for a generic move immediate instruction, which is an alias
9057 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
9058 a 32-bit/64-bit immediate value into general register. An assembler error
9059 shall result if the immediate cannot be created by a single one of these
9060 instructions. If there is a choice, then to ensure reversability an
9061 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
9062
9063 static void
fix_mov_imm_insn(fixS * fixP,char * buf,aarch64_inst * instr,offsetT value)9064 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
9065 {
9066 const aarch64_opcode *opcode;
9067
9068 /* Need to check if the destination is SP/ZR. The check has to be done
9069 before any aarch64_replace_opcode. */
9070 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
9071 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
9072
9073 instr->operands[1].imm.value = value;
9074 instr->operands[1].skip = 0;
9075
9076 if (try_mov_wide_p)
9077 {
9078 /* Try the MOVZ alias. */
9079 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
9080 aarch64_replace_opcode (instr, opcode);
9081 if (aarch64_opcode_encode (instr->opcode, instr,
9082 &instr->value, NULL, NULL, insn_sequence))
9083 {
9084 put_aarch64_insn (buf, instr->value);
9085 return;
9086 }
9087 /* Try the MOVK alias. */
9088 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
9089 aarch64_replace_opcode (instr, opcode);
9090 if (aarch64_opcode_encode (instr->opcode, instr,
9091 &instr->value, NULL, NULL, insn_sequence))
9092 {
9093 put_aarch64_insn (buf, instr->value);
9094 return;
9095 }
9096 }
9097
9098 if (try_mov_bitmask_p)
9099 {
9100 /* Try the ORR alias. */
9101 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
9102 aarch64_replace_opcode (instr, opcode);
9103 if (aarch64_opcode_encode (instr->opcode, instr,
9104 &instr->value, NULL, NULL, insn_sequence))
9105 {
9106 put_aarch64_insn (buf, instr->value);
9107 return;
9108 }
9109 }
9110
9111 as_bad_where (fixP->fx_file, fixP->fx_line,
9112 _("immediate cannot be moved by a single instruction"));
9113 }
9114
9115 /* An instruction operand which is immediate related may have symbol used
9116 in the assembly, e.g.
9117
9118 mov w0, u32
9119 .set u32, 0x00ffff00
9120
9121 At the time when the assembly instruction is parsed, a referenced symbol,
9122 like 'u32' in the above example may not have been seen; a fixS is created
9123 in such a case and is handled here after symbols have been resolved.
9124 Instruction is fixed up with VALUE using the information in *FIXP plus
9125 extra information in FLAGS.
9126
9127 This function is called by md_apply_fix to fix up instructions that need
9128 a fix-up described above but does not involve any linker-time relocation. */
9129
9130 static void
fix_insn(fixS * fixP,uint32_t flags,offsetT value)9131 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
9132 {
9133 int idx;
9134 uint32_t insn;
9135 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9136 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
9137 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
9138
9139 if (new_inst)
9140 {
9141 /* Now the instruction is about to be fixed-up, so the operand that
9142 was previously marked as 'ignored' needs to be unmarked in order
9143 to get the encoding done properly. */
9144 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9145 new_inst->operands[idx].skip = 0;
9146 }
9147
9148 gas_assert (opnd != AARCH64_OPND_NIL);
9149
9150 switch (opnd)
9151 {
9152 case AARCH64_OPND_EXCEPTION:
9153 case AARCH64_OPND_UNDEFINED:
9154 if (unsigned_overflow (value, 16))
9155 as_bad_where (fixP->fx_file, fixP->fx_line,
9156 _("immediate out of range"));
9157 insn = get_aarch64_insn (buf);
9158 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
9159 put_aarch64_insn (buf, insn);
9160 break;
9161
9162 case AARCH64_OPND_AIMM:
9163 /* ADD or SUB with immediate.
9164 NOTE this assumes we come here with a add/sub shifted reg encoding
9165 3 322|2222|2 2 2 21111 111111
9166 1 098|7654|3 2 1 09876 543210 98765 43210
9167 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
9168 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
9169 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
9170 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
9171 ->
9172 3 322|2222|2 2 221111111111
9173 1 098|7654|3 2 109876543210 98765 43210
9174 11000000 sf 001|0001|shift imm12 Rn Rd ADD
9175 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
9176 51000000 sf 101|0001|shift imm12 Rn Rd SUB
9177 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
9178 Fields sf Rn Rd are already set. */
9179 insn = get_aarch64_insn (buf);
9180 if (value < 0)
9181 {
9182 /* Add <-> sub. */
9183 insn = reencode_addsub_switch_add_sub (insn);
9184 value = -value;
9185 }
9186
9187 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
9188 && unsigned_overflow (value, 12))
9189 {
9190 /* Try to shift the value by 12 to make it fit. */
9191 if (((value >> 12) << 12) == value
9192 && ! unsigned_overflow (value, 12 + 12))
9193 {
9194 value >>= 12;
9195 insn |= encode_addsub_imm_shift_amount (1);
9196 }
9197 }
9198
9199 if (unsigned_overflow (value, 12))
9200 as_bad_where (fixP->fx_file, fixP->fx_line,
9201 _("immediate out of range"));
9202
9203 insn |= encode_addsub_imm (value);
9204
9205 put_aarch64_insn (buf, insn);
9206 break;
9207
9208 case AARCH64_OPND_SIMD_IMM:
9209 case AARCH64_OPND_SIMD_IMM_SFT:
9210 case AARCH64_OPND_LIMM:
9211 /* Bit mask immediate. */
9212 gas_assert (new_inst != NULL);
9213 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9214 new_inst->operands[idx].imm.value = value;
9215 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9216 &new_inst->value, NULL, NULL, insn_sequence))
9217 put_aarch64_insn (buf, new_inst->value);
9218 else
9219 as_bad_where (fixP->fx_file, fixP->fx_line,
9220 _("invalid immediate"));
9221 break;
9222
9223 case AARCH64_OPND_HALF:
9224 /* 16-bit unsigned immediate. */
9225 if (unsigned_overflow (value, 16))
9226 as_bad_where (fixP->fx_file, fixP->fx_line,
9227 _("immediate out of range"));
9228 insn = get_aarch64_insn (buf);
9229 insn |= encode_movw_imm (value & 0xffff);
9230 put_aarch64_insn (buf, insn);
9231 break;
9232
9233 case AARCH64_OPND_IMM_MOV:
9234 /* Operand for a generic move immediate instruction, which is
9235 an alias instruction that generates a single MOVZ, MOVN or ORR
9236 instruction to loads a 32-bit/64-bit immediate value into general
9237 register. An assembler error shall result if the immediate cannot be
9238 created by a single one of these instructions. If there is a choice,
9239 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
9240 and MOVZ or MOVN to ORR. */
9241 gas_assert (new_inst != NULL);
9242 fix_mov_imm_insn (fixP, buf, new_inst, value);
9243 break;
9244
9245 case AARCH64_OPND_ADDR_SIMM7:
9246 case AARCH64_OPND_ADDR_SIMM9:
9247 case AARCH64_OPND_ADDR_SIMM9_2:
9248 case AARCH64_OPND_ADDR_SIMM10:
9249 case AARCH64_OPND_ADDR_UIMM12:
9250 case AARCH64_OPND_ADDR_SIMM11:
9251 case AARCH64_OPND_ADDR_SIMM13:
9252 /* Immediate offset in an address. */
9253 insn = get_aarch64_insn (buf);
9254
9255 gas_assert (new_inst != NULL && new_inst->value == insn);
9256 gas_assert (new_inst->opcode->operands[1] == opnd
9257 || new_inst->opcode->operands[2] == opnd);
9258
9259 /* Get the index of the address operand. */
9260 if (new_inst->opcode->operands[1] == opnd)
9261 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
9262 idx = 1;
9263 else
9264 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
9265 idx = 2;
9266
9267 /* Update the resolved offset value. */
9268 new_inst->operands[idx].addr.offset.imm = value;
9269
9270 /* Encode/fix-up. */
9271 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9272 &new_inst->value, NULL, NULL, insn_sequence))
9273 {
9274 put_aarch64_insn (buf, new_inst->value);
9275 break;
9276 }
9277 else if (new_inst->opcode->iclass == ldst_pos
9278 && try_to_encode_as_unscaled_ldst (new_inst))
9279 {
9280 put_aarch64_insn (buf, new_inst->value);
9281 break;
9282 }
9283
9284 as_bad_where (fixP->fx_file, fixP->fx_line,
9285 _("immediate offset out of range"));
9286 break;
9287
9288 default:
9289 gas_assert (0);
9290 as_fatal (_("unhandled operand code %d"), opnd);
9291 }
9292 }
9293
9294 /* Apply a fixup (fixP) to segment data, once it has been determined
9295 by our caller that we have all the info we need to fix it up.
9296
9297 Parameter valP is the pointer to the value of the bits. */
9298
9299 void
md_apply_fix(fixS * fixP,valueT * valP,segT seg)9300 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
9301 {
9302 offsetT value = *valP;
9303 uint32_t insn;
9304 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9305 int scale;
9306 unsigned flags = fixP->fx_addnumber;
9307
9308 DEBUG_TRACE ("\n\n");
9309 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
9310 DEBUG_TRACE ("Enter md_apply_fix");
9311
9312 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
9313
9314 /* Note whether this will delete the relocation. */
9315
9316 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
9317 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
9318 fixP->fx_done = 1;
9319
9320 /* Process the relocations. */
9321 switch (fixP->fx_r_type)
9322 {
9323 case BFD_RELOC_NONE:
9324 /* This will need to go in the object file. */
9325 fixP->fx_done = 0;
9326 break;
9327
9328 case BFD_RELOC_8:
9329 case BFD_RELOC_8_PCREL:
9330 if (fixP->fx_done || !seg->use_rela_p)
9331 md_number_to_chars (buf, value, 1);
9332 break;
9333
9334 case BFD_RELOC_16:
9335 case BFD_RELOC_16_PCREL:
9336 if (fixP->fx_done || !seg->use_rela_p)
9337 md_number_to_chars (buf, value, 2);
9338 break;
9339
9340 case BFD_RELOC_32:
9341 case BFD_RELOC_32_PCREL:
9342 if (fixP->fx_done || !seg->use_rela_p)
9343 md_number_to_chars (buf, value, 4);
9344 break;
9345
9346 case BFD_RELOC_64:
9347 case BFD_RELOC_64_PCREL:
9348 if (fixP->fx_done || !seg->use_rela_p)
9349 md_number_to_chars (buf, value, 8);
9350 break;
9351
9352 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9353 /* We claim that these fixups have been processed here, even if
9354 in fact we generate an error because we do not have a reloc
9355 for them, so tc_gen_reloc() will reject them. */
9356 fixP->fx_done = 1;
9357 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9358 {
9359 as_bad_where (fixP->fx_file, fixP->fx_line,
9360 _("undefined symbol %s used as an immediate value"),
9361 S_GET_NAME (fixP->fx_addsy));
9362 goto apply_fix_return;
9363 }
9364 fix_insn (fixP, flags, value);
9365 break;
9366
9367 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9368 if (fixP->fx_done || !seg->use_rela_p)
9369 {
9370 if (value & 3)
9371 as_bad_where (fixP->fx_file, fixP->fx_line,
9372 _("pc-relative load offset not word aligned"));
9373 if (signed_overflow (value, 21))
9374 as_bad_where (fixP->fx_file, fixP->fx_line,
9375 _("pc-relative load offset out of range"));
9376 insn = get_aarch64_insn (buf);
9377 insn |= encode_ld_lit_ofs_19 (value >> 2);
9378 put_aarch64_insn (buf, insn);
9379 }
9380 break;
9381
9382 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9383 if (fixP->fx_done || !seg->use_rela_p)
9384 {
9385 if (signed_overflow (value, 21))
9386 as_bad_where (fixP->fx_file, fixP->fx_line,
9387 _("pc-relative address offset out of range"));
9388 insn = get_aarch64_insn (buf);
9389 insn |= encode_adr_imm (value);
9390 put_aarch64_insn (buf, insn);
9391 }
9392 break;
9393
9394 case BFD_RELOC_AARCH64_BRANCH19:
9395 if (fixP->fx_done || !seg->use_rela_p)
9396 {
9397 if (value & 3)
9398 as_bad_where (fixP->fx_file, fixP->fx_line,
9399 _("conditional branch target not word aligned"));
9400 if (signed_overflow (value, 21))
9401 as_bad_where (fixP->fx_file, fixP->fx_line,
9402 _("conditional branch out of range"));
9403 insn = get_aarch64_insn (buf);
9404 insn |= encode_cond_branch_ofs_19 (value >> 2);
9405 put_aarch64_insn (buf, insn);
9406 }
9407 break;
9408
9409 case BFD_RELOC_AARCH64_TSTBR14:
9410 if (fixP->fx_done || !seg->use_rela_p)
9411 {
9412 if (value & 3)
9413 as_bad_where (fixP->fx_file, fixP->fx_line,
9414 _("conditional branch target not word aligned"));
9415 if (signed_overflow (value, 16))
9416 as_bad_where (fixP->fx_file, fixP->fx_line,
9417 _("conditional branch out of range"));
9418 insn = get_aarch64_insn (buf);
9419 insn |= encode_tst_branch_ofs_14 (value >> 2);
9420 put_aarch64_insn (buf, insn);
9421 }
9422 break;
9423
9424 case BFD_RELOC_AARCH64_CALL26:
9425 case BFD_RELOC_AARCH64_JUMP26:
9426 if (fixP->fx_done || !seg->use_rela_p)
9427 {
9428 if (value & 3)
9429 as_bad_where (fixP->fx_file, fixP->fx_line,
9430 _("branch target not word aligned"));
9431 if (signed_overflow (value, 28))
9432 as_bad_where (fixP->fx_file, fixP->fx_line,
9433 _("branch out of range"));
9434 insn = get_aarch64_insn (buf);
9435 insn |= encode_branch_ofs_26 (value >> 2);
9436 put_aarch64_insn (buf, insn);
9437 }
9438 break;
9439
9440 case BFD_RELOC_AARCH64_MOVW_G0:
9441 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9442 case BFD_RELOC_AARCH64_MOVW_G0_S:
9443 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9444 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9445 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9446 scale = 0;
9447 goto movw_common;
9448 case BFD_RELOC_AARCH64_MOVW_G1:
9449 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9450 case BFD_RELOC_AARCH64_MOVW_G1_S:
9451 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9452 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9453 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9454 scale = 16;
9455 goto movw_common;
9456 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9457 scale = 0;
9458 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9459 /* Should always be exported to object file, see
9460 aarch64_force_relocation(). */
9461 gas_assert (!fixP->fx_done);
9462 gas_assert (seg->use_rela_p);
9463 goto movw_common;
9464 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9465 scale = 16;
9466 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9467 /* Should always be exported to object file, see
9468 aarch64_force_relocation(). */
9469 gas_assert (!fixP->fx_done);
9470 gas_assert (seg->use_rela_p);
9471 goto movw_common;
9472 case BFD_RELOC_AARCH64_MOVW_G2:
9473 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9474 case BFD_RELOC_AARCH64_MOVW_G2_S:
9475 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9476 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9477 scale = 32;
9478 goto movw_common;
9479 case BFD_RELOC_AARCH64_MOVW_G3:
9480 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9481 scale = 48;
9482 movw_common:
9483 if (fixP->fx_done || !seg->use_rela_p)
9484 {
9485 insn = get_aarch64_insn (buf);
9486
9487 if (!fixP->fx_done)
9488 {
9489 /* REL signed addend must fit in 16 bits */
9490 if (signed_overflow (value, 16))
9491 as_bad_where (fixP->fx_file, fixP->fx_line,
9492 _("offset out of range"));
9493 }
9494 else
9495 {
9496 /* Check for overflow and scale. */
9497 switch (fixP->fx_r_type)
9498 {
9499 case BFD_RELOC_AARCH64_MOVW_G0:
9500 case BFD_RELOC_AARCH64_MOVW_G1:
9501 case BFD_RELOC_AARCH64_MOVW_G2:
9502 case BFD_RELOC_AARCH64_MOVW_G3:
9503 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9504 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9505 if (unsigned_overflow (value, scale + 16))
9506 as_bad_where (fixP->fx_file, fixP->fx_line,
9507 _("unsigned value out of range"));
9508 break;
9509 case BFD_RELOC_AARCH64_MOVW_G0_S:
9510 case BFD_RELOC_AARCH64_MOVW_G1_S:
9511 case BFD_RELOC_AARCH64_MOVW_G2_S:
9512 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9513 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9514 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9515 /* NOTE: We can only come here with movz or movn. */
9516 if (signed_overflow (value, scale + 16))
9517 as_bad_where (fixP->fx_file, fixP->fx_line,
9518 _("signed value out of range"));
9519 if (value < 0)
9520 {
9521 /* Force use of MOVN. */
9522 value = ~value;
9523 insn = reencode_movzn_to_movn (insn);
9524 }
9525 else
9526 {
9527 /* Force use of MOVZ. */
9528 insn = reencode_movzn_to_movz (insn);
9529 }
9530 break;
9531 default:
9532 /* Unchecked relocations. */
9533 break;
9534 }
9535 value >>= scale;
9536 }
9537
9538 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9539 insn |= encode_movw_imm (value & 0xffff);
9540
9541 put_aarch64_insn (buf, insn);
9542 }
9543 break;
9544
9545 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9546 fixP->fx_r_type = (ilp32_p
9547 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9548 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9549 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9550 /* Should always be exported to object file, see
9551 aarch64_force_relocation(). */
9552 gas_assert (!fixP->fx_done);
9553 gas_assert (seg->use_rela_p);
9554 break;
9555
9556 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9557 fixP->fx_r_type = (ilp32_p
9558 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9559 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9560 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9561 /* Should always be exported to object file, see
9562 aarch64_force_relocation(). */
9563 gas_assert (!fixP->fx_done);
9564 gas_assert (seg->use_rela_p);
9565 break;
9566
9567 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9568 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9569 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9570 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9571 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9572 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9573 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9574 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9575 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9576 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9577 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9578 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9579 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9580 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9581 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9582 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9583 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9584 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9585 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9586 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9587 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9588 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9589 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9590 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9591 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9592 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9593 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9594 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9595 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9596 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9597 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9598 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9599 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9600 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9601 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9602 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9603 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9604 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9605 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9606 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9607 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9608 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9609 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9610 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9611 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9612 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9613 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9614 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9615 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9616 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9617 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9618 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9619 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9620 /* Should always be exported to object file, see
9621 aarch64_force_relocation(). */
9622 gas_assert (!fixP->fx_done);
9623 gas_assert (seg->use_rela_p);
9624 break;
9625
9626 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9627 /* Should always be exported to object file, see
9628 aarch64_force_relocation(). */
9629 fixP->fx_r_type = (ilp32_p
9630 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9631 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9632 gas_assert (!fixP->fx_done);
9633 gas_assert (seg->use_rela_p);
9634 break;
9635
9636 case BFD_RELOC_AARCH64_ADD_LO12:
9637 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9638 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9639 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9640 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9641 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9642 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9643 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9644 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9645 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9646 case BFD_RELOC_AARCH64_LDST128_LO12:
9647 case BFD_RELOC_AARCH64_LDST16_LO12:
9648 case BFD_RELOC_AARCH64_LDST32_LO12:
9649 case BFD_RELOC_AARCH64_LDST64_LO12:
9650 case BFD_RELOC_AARCH64_LDST8_LO12:
9651 /* Should always be exported to object file, see
9652 aarch64_force_relocation(). */
9653 gas_assert (!fixP->fx_done);
9654 gas_assert (seg->use_rela_p);
9655 break;
9656
9657 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9658 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9659 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9660 break;
9661
9662 case BFD_RELOC_UNUSED:
9663 /* An error will already have been reported. */
9664 break;
9665
9666 case BFD_RELOC_RVA:
9667 case BFD_RELOC_32_SECREL:
9668 case BFD_RELOC_16_SECIDX:
9669 break;
9670
9671 default:
9672 as_bad_where (fixP->fx_file, fixP->fx_line,
9673 _("unexpected %s fixup"),
9674 bfd_get_reloc_code_name (fixP->fx_r_type));
9675 break;
9676 }
9677
9678 apply_fix_return:
9679 /* Free the allocated the struct aarch64_inst.
9680 N.B. currently there are very limited number of fix-up types actually use
9681 this field, so the impact on the performance should be minimal . */
9682 free (fixP->tc_fix_data.inst);
9683
9684 return;
9685 }
9686
9687 /* Translate internal representation of relocation info to BFD target
9688 format. */
9689
9690 arelent *
tc_gen_reloc(asection * section,fixS * fixp)9691 tc_gen_reloc (asection * section, fixS * fixp)
9692 {
9693 arelent *reloc;
9694 bfd_reloc_code_real_type code;
9695
9696 reloc = XNEW (arelent);
9697
9698 reloc->sym_ptr_ptr = XNEW (asymbol *);
9699 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9700 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9701
9702 if (fixp->fx_pcrel)
9703 {
9704 if (section->use_rela_p)
9705 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9706 else
9707 fixp->fx_offset = reloc->address;
9708 }
9709 reloc->addend = fixp->fx_offset;
9710
9711 code = fixp->fx_r_type;
9712 switch (code)
9713 {
9714 case BFD_RELOC_16:
9715 if (fixp->fx_pcrel)
9716 code = BFD_RELOC_16_PCREL;
9717 break;
9718
9719 case BFD_RELOC_32:
9720 if (fixp->fx_pcrel)
9721 code = BFD_RELOC_32_PCREL;
9722 break;
9723
9724 case BFD_RELOC_64:
9725 if (fixp->fx_pcrel)
9726 code = BFD_RELOC_64_PCREL;
9727 break;
9728
9729 default:
9730 break;
9731 }
9732
9733 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9734 if (reloc->howto == NULL)
9735 {
9736 as_bad_where (fixp->fx_file, fixp->fx_line,
9737 _
9738 ("cannot represent %s relocation in this object file format"),
9739 bfd_get_reloc_code_name (code));
9740 return NULL;
9741 }
9742
9743 return reloc;
9744 }
9745
9746 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9747
9748 void
cons_fix_new_aarch64(fragS * frag,int where,int size,expressionS * exp)9749 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9750 {
9751 bfd_reloc_code_real_type type;
9752 int pcrel = 0;
9753
9754 #ifdef TE_PE
9755 if (exp->X_op == O_secrel)
9756 {
9757 exp->X_op = O_symbol;
9758 type = BFD_RELOC_32_SECREL;
9759 }
9760 else if (exp->X_op == O_secidx)
9761 {
9762 exp->X_op = O_symbol;
9763 type = BFD_RELOC_16_SECIDX;
9764 }
9765 else
9766 {
9767 #endif
9768 /* Pick a reloc.
9769 FIXME: @@ Should look at CPU word size. */
9770 switch (size)
9771 {
9772 case 1:
9773 type = BFD_RELOC_8;
9774 break;
9775 case 2:
9776 type = BFD_RELOC_16;
9777 break;
9778 case 4:
9779 type = BFD_RELOC_32;
9780 break;
9781 case 8:
9782 type = BFD_RELOC_64;
9783 break;
9784 default:
9785 as_bad (_("cannot do %u-byte relocation"), size);
9786 type = BFD_RELOC_UNUSED;
9787 break;
9788 }
9789 #ifdef TE_PE
9790 }
9791 #endif
9792
9793 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9794 }
9795
9796 /* Implement md_after_parse_args. This is the earliest time we need to decide
9797 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9798
9799 void
aarch64_after_parse_args(void)9800 aarch64_after_parse_args (void)
9801 {
9802 if (aarch64_abi != AARCH64_ABI_NONE)
9803 return;
9804
9805 #ifdef OBJ_ELF
9806 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9807 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9808 aarch64_abi = AARCH64_ABI_ILP32;
9809 else
9810 aarch64_abi = AARCH64_ABI_LP64;
9811 #else
9812 aarch64_abi = AARCH64_ABI_LLP64;
9813 #endif
9814 }
9815
9816 #ifdef OBJ_ELF
9817 const char *
elf64_aarch64_target_format(void)9818 elf64_aarch64_target_format (void)
9819 {
9820 #ifdef TE_CLOUDABI
9821 /* FIXME: What to do for ilp32_p ? */
9822 if (target_big_endian)
9823 return "elf64-bigaarch64-cloudabi";
9824 else
9825 return "elf64-littleaarch64-cloudabi";
9826 #else
9827 if (target_big_endian)
9828 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9829 else
9830 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9831 #endif
9832 }
9833
9834 void
aarch64elf_frob_symbol(symbolS * symp,int * puntp)9835 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9836 {
9837 elf_frob_symbol (symp, puntp);
9838 }
9839 #elif defined OBJ_COFF
9840 const char *
coff_aarch64_target_format(void)9841 coff_aarch64_target_format (void)
9842 {
9843 return "pe-aarch64-little";
9844 }
9845 #endif
9846
9847 /* MD interface: Finalization. */
9848
9849 /* A good place to do this, although this was probably not intended
9850 for this kind of use. We need to dump the literal pool before
9851 references are made to a null symbol pointer. */
9852
9853 void
aarch64_cleanup(void)9854 aarch64_cleanup (void)
9855 {
9856 literal_pool *pool;
9857
9858 for (pool = list_of_pools; pool; pool = pool->next)
9859 {
9860 /* Put it at the end of the relevant section. */
9861 subseg_set (pool->section, pool->sub_section);
9862 s_ltorg (0);
9863 }
9864 }
9865
9866 #ifdef OBJ_ELF
9867 /* Remove any excess mapping symbols generated for alignment frags in
9868 SEC. We may have created a mapping symbol before a zero byte
9869 alignment; remove it if there's a mapping symbol after the
9870 alignment. */
9871 static void
check_mapping_symbols(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,void * dummy ATTRIBUTE_UNUSED)9872 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9873 void *dummy ATTRIBUTE_UNUSED)
9874 {
9875 segment_info_type *seginfo = seg_info (sec);
9876 fragS *fragp;
9877
9878 if (seginfo == NULL || seginfo->frchainP == NULL)
9879 return;
9880
9881 for (fragp = seginfo->frchainP->frch_root;
9882 fragp != NULL; fragp = fragp->fr_next)
9883 {
9884 symbolS *sym = fragp->tc_frag_data.last_map;
9885 fragS *next = fragp->fr_next;
9886
9887 /* Variable-sized frags have been converted to fixed size by
9888 this point. But if this was variable-sized to start with,
9889 there will be a fixed-size frag after it. So don't handle
9890 next == NULL. */
9891 if (sym == NULL || next == NULL)
9892 continue;
9893
9894 if (S_GET_VALUE (sym) < next->fr_address)
9895 /* Not at the end of this frag. */
9896 continue;
9897 know (S_GET_VALUE (sym) == next->fr_address);
9898
9899 do
9900 {
9901 if (next->tc_frag_data.first_map != NULL)
9902 {
9903 /* Next frag starts with a mapping symbol. Discard this
9904 one. */
9905 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9906 break;
9907 }
9908
9909 if (next->fr_next == NULL)
9910 {
9911 /* This mapping symbol is at the end of the section. Discard
9912 it. */
9913 know (next->fr_fix == 0 && next->fr_var == 0);
9914 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9915 break;
9916 }
9917
9918 /* As long as we have empty frags without any mapping symbols,
9919 keep looking. */
9920 /* If the next frag is non-empty and does not start with a
9921 mapping symbol, then this mapping symbol is required. */
9922 if (next->fr_address != next->fr_next->fr_address)
9923 break;
9924
9925 next = next->fr_next;
9926 }
9927 while (next != NULL);
9928 }
9929 }
9930 #endif
9931
9932 /* Adjust the symbol table. */
9933
9934 void
aarch64_adjust_symtab(void)9935 aarch64_adjust_symtab (void)
9936 {
9937 #ifdef OBJ_ELF
9938 /* Remove any overlapping mapping symbols generated by alignment frags. */
9939 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9940 /* Now do generic ELF adjustments. */
9941 elf_adjust_symtab ();
9942 #endif
9943 }
9944
9945 static void
checked_hash_insert(htab_t table,const char * key,void * value)9946 checked_hash_insert (htab_t table, const char *key, void *value)
9947 {
9948 str_hash_insert (table, key, value, 0);
9949 }
9950
9951 static void
sysreg_hash_insert(htab_t table,const char * key,void * value)9952 sysreg_hash_insert (htab_t table, const char *key, void *value)
9953 {
9954 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9955 checked_hash_insert (table, key, value);
9956 }
9957
9958 static void
fill_instruction_hash_table(void)9959 fill_instruction_hash_table (void)
9960 {
9961 const aarch64_opcode *opcode = aarch64_opcode_table;
9962
9963 while (opcode->name != NULL)
9964 {
9965 templates *templ, *new_templ;
9966 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9967
9968 new_templ = XNEW (templates);
9969 new_templ->opcode = opcode;
9970 new_templ->next = NULL;
9971
9972 if (!templ)
9973 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9974 else
9975 {
9976 new_templ->next = templ->next;
9977 templ->next = new_templ;
9978 }
9979 ++opcode;
9980 }
9981 }
9982
9983 static inline void
convert_to_upper(char * dst,const char * src,size_t num)9984 convert_to_upper (char *dst, const char *src, size_t num)
9985 {
9986 unsigned int i;
9987 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9988 *dst = TOUPPER (*src);
9989 *dst = '\0';
9990 }
9991
9992 /* Assume STR point to a lower-case string, allocate, convert and return
9993 the corresponding upper-case string. */
9994 static inline const char*
get_upper_str(const char * str)9995 get_upper_str (const char *str)
9996 {
9997 char *ret;
9998 size_t len = strlen (str);
9999 ret = XNEWVEC (char, len + 1);
10000 convert_to_upper (ret, str, len);
10001 return ret;
10002 }
10003
10004 /* MD interface: Initialization. */
10005
10006 void
md_begin(void)10007 md_begin (void)
10008 {
10009 unsigned mach;
10010 unsigned int i;
10011
10012 aarch64_ops_hsh = str_htab_create ();
10013 aarch64_cond_hsh = str_htab_create ();
10014 aarch64_shift_hsh = str_htab_create ();
10015 aarch64_sys_regs_hsh = str_htab_create ();
10016 aarch64_pstatefield_hsh = str_htab_create ();
10017 aarch64_sys_regs_ic_hsh = str_htab_create ();
10018 aarch64_sys_regs_dc_hsh = str_htab_create ();
10019 aarch64_sys_regs_at_hsh = str_htab_create ();
10020 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
10021 aarch64_sys_regs_sr_hsh = str_htab_create ();
10022 aarch64_reg_hsh = str_htab_create ();
10023 aarch64_barrier_opt_hsh = str_htab_create ();
10024 aarch64_nzcv_hsh = str_htab_create ();
10025 aarch64_pldop_hsh = str_htab_create ();
10026 aarch64_hint_opt_hsh = str_htab_create ();
10027
10028 fill_instruction_hash_table ();
10029
10030 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
10031 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
10032 (void *) (aarch64_sys_regs + i));
10033
10034 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
10035 sysreg_hash_insert (aarch64_pstatefield_hsh,
10036 aarch64_pstatefields[i].name,
10037 (void *) (aarch64_pstatefields + i));
10038
10039 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
10040 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
10041 aarch64_sys_regs_ic[i].name,
10042 (void *) (aarch64_sys_regs_ic + i));
10043
10044 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
10045 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
10046 aarch64_sys_regs_dc[i].name,
10047 (void *) (aarch64_sys_regs_dc + i));
10048
10049 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
10050 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
10051 aarch64_sys_regs_at[i].name,
10052 (void *) (aarch64_sys_regs_at + i));
10053
10054 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
10055 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
10056 aarch64_sys_regs_tlbi[i].name,
10057 (void *) (aarch64_sys_regs_tlbi + i));
10058
10059 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
10060 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
10061 aarch64_sys_regs_sr[i].name,
10062 (void *) (aarch64_sys_regs_sr + i));
10063
10064 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
10065 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
10066 (void *) (reg_names + i));
10067
10068 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
10069 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
10070 (void *) (nzcv_names + i));
10071
10072 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
10073 {
10074 const char *name = aarch64_operand_modifiers[i].name;
10075 checked_hash_insert (aarch64_shift_hsh, name,
10076 (void *) (aarch64_operand_modifiers + i));
10077 /* Also hash the name in the upper case. */
10078 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
10079 (void *) (aarch64_operand_modifiers + i));
10080 }
10081
10082 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
10083 {
10084 unsigned int j;
10085 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
10086 the same condition code. */
10087 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
10088 {
10089 const char *name = aarch64_conds[i].names[j];
10090 if (name == NULL)
10091 break;
10092 checked_hash_insert (aarch64_cond_hsh, name,
10093 (void *) (aarch64_conds + i));
10094 /* Also hash the name in the upper case. */
10095 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
10096 (void *) (aarch64_conds + i));
10097 }
10098 }
10099
10100 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
10101 {
10102 const char *name = aarch64_barrier_options[i].name;
10103 /* Skip xx00 - the unallocated values of option. */
10104 if ((i & 0x3) == 0)
10105 continue;
10106 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10107 (void *) (aarch64_barrier_options + i));
10108 /* Also hash the name in the upper case. */
10109 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10110 (void *) (aarch64_barrier_options + i));
10111 }
10112
10113 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
10114 {
10115 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
10116 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10117 (void *) (aarch64_barrier_dsb_nxs_options + i));
10118 /* Also hash the name in the upper case. */
10119 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10120 (void *) (aarch64_barrier_dsb_nxs_options + i));
10121 }
10122
10123 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
10124 {
10125 const char* name = aarch64_prfops[i].name;
10126 /* Skip the unallocated hint encodings. */
10127 if (name == NULL)
10128 continue;
10129 checked_hash_insert (aarch64_pldop_hsh, name,
10130 (void *) (aarch64_prfops + i));
10131 /* Also hash the name in the upper case. */
10132 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
10133 (void *) (aarch64_prfops + i));
10134 }
10135
10136 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
10137 {
10138 const char* name = aarch64_hint_options[i].name;
10139 const char* upper_name = get_upper_str(name);
10140
10141 checked_hash_insert (aarch64_hint_opt_hsh, name,
10142 (void *) (aarch64_hint_options + i));
10143
10144 /* Also hash the name in the upper case if not the same. */
10145 if (strcmp (name, upper_name) != 0)
10146 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
10147 (void *) (aarch64_hint_options + i));
10148 }
10149
10150 /* Set the cpu variant based on the command-line options. */
10151 if (!mcpu_cpu_opt)
10152 mcpu_cpu_opt = march_cpu_opt;
10153
10154 if (!mcpu_cpu_opt)
10155 mcpu_cpu_opt = &cpu_default;
10156
10157 cpu_variant = *mcpu_cpu_opt;
10158
10159 /* Record the CPU type. */
10160 if(ilp32_p)
10161 mach = bfd_mach_aarch64_ilp32;
10162 else if (llp64_p)
10163 mach = bfd_mach_aarch64_llp64;
10164 else
10165 mach = bfd_mach_aarch64;
10166
10167 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
10168 #ifdef OBJ_ELF
10169 /* FIXME - is there a better way to do it ? */
10170 aarch64_sframe_cfa_sp_reg = 31;
10171 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
10172 aarch64_sframe_cfa_ra_reg = 30;
10173 #endif
10174 }
10175
10176 /* Command line processing. */
10177
10178 const char *md_shortopts = "m:";
10179
10180 #ifdef AARCH64_BI_ENDIAN
10181 #define OPTION_EB (OPTION_MD_BASE + 0)
10182 #define OPTION_EL (OPTION_MD_BASE + 1)
10183 #else
10184 #if TARGET_BYTES_BIG_ENDIAN
10185 #define OPTION_EB (OPTION_MD_BASE + 0)
10186 #else
10187 #define OPTION_EL (OPTION_MD_BASE + 1)
10188 #endif
10189 #endif
10190
10191 struct option md_longopts[] = {
10192 #ifdef OPTION_EB
10193 {"EB", no_argument, NULL, OPTION_EB},
10194 #endif
10195 #ifdef OPTION_EL
10196 {"EL", no_argument, NULL, OPTION_EL},
10197 #endif
10198 {NULL, no_argument, NULL, 0}
10199 };
10200
10201 size_t md_longopts_size = sizeof (md_longopts);
10202
10203 struct aarch64_option_table
10204 {
10205 const char *option; /* Option name to match. */
10206 const char *help; /* Help information. */
10207 int *var; /* Variable to change. */
10208 int value; /* What to change it to. */
10209 char *deprecated; /* If non-null, print this message. */
10210 };
10211
10212 static struct aarch64_option_table aarch64_opts[] = {
10213 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
10214 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
10215 NULL},
10216 #ifdef DEBUG_AARCH64
10217 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
10218 #endif /* DEBUG_AARCH64 */
10219 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
10220 NULL},
10221 {"mno-verbose-error", N_("do not output verbose error messages"),
10222 &verbose_error_p, 0, NULL},
10223 {NULL, NULL, NULL, 0, NULL}
10224 };
10225
10226 struct aarch64_cpu_option_table
10227 {
10228 const char *name;
10229 const aarch64_feature_set value;
10230 /* The canonical name of the CPU, or NULL to use NAME converted to upper
10231 case. */
10232 const char *canonical_name;
10233 };
10234
10235 /* This list should, at a minimum, contain all the cpu names
10236 recognized by GCC. */
10237 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
10238 {"all", AARCH64_ALL_FEATURES, NULL},
10239 {"cortex-a34", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A34"},
10240 {"cortex-a35", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A35"},
10241 {"cortex-a53", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A53"},
10242 {"cortex-a57", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A57"},
10243 {"cortex-a72", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A72"},
10244 {"cortex-a73", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A73"},
10245 {"cortex-a55", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10246 "Cortex-A55"},
10247 {"cortex-a75", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10248 "Cortex-A75"},
10249 {"cortex-a76", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10250 "Cortex-A76"},
10251 {"cortex-a76ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10252 SSBS), "Cortex-A76AE"},
10253 {"cortex-a77", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10254 SSBS), "Cortex-A77"},
10255 {"cortex-a65", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10256 SSBS), "Cortex-A65"},
10257 {"cortex-a65ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10258 SSBS), "Cortex-A65AE"},
10259 {"cortex-a78", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10260 SSBS, PROFILE), "Cortex-A78"},
10261 {"cortex-a78ae", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10262 SSBS, PROFILE), "Cortex-A78AE"},
10263 {"cortex-a78c", AARCH64_CPU_FEATURES (V8_2A, 7, DOTPROD, F16, FLAGM,
10264 PAC, PROFILE, RCPC, SSBS),
10265 "Cortex-A78C"},
10266 {"cortex-a510", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10267 SVE2_BITPERM), "Cortex-A510"},
10268 {"cortex-a520", AARCH64_CPU_FEATURES (V9_2A, 2, MEMTAG, SVE2_BITPERM),
10269 "Cortex-A520"},
10270 {"cortex-a710", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10271 SVE2_BITPERM), "Cortex-A710"},
10272 {"cortex-a720", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
10273 SVE2_BITPERM), "Cortex-A720"},
10274 {"ares", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10275 PROFILE), "Ares"},
10276 {"exynos-m1", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10277 "Samsung Exynos M1"},
10278 {"falkor", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10279 "Qualcomm Falkor"},
10280 {"neoverse-e1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10281 SSBS), "Neoverse E1"},
10282 {"neoverse-n1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10283 PROFILE), "Neoverse N1"},
10284 {"neoverse-n2", AARCH64_CPU_FEATURES (V8_5A, 8, BFLOAT16, I8MM, F16,
10285 SVE, SVE2, SVE2_BITPERM, MEMTAG,
10286 RNG), "Neoverse N2"},
10287 {"neoverse-v1", AARCH64_CPU_FEATURES (V8_4A, 8, PROFILE, CVADP, SVE,
10288 SSBS, RNG, F16, BFLOAT16, I8MM),
10289 "Neoverse V1"},
10290 {"qdf24xx", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10291 "Qualcomm QDF24XX"},
10292 {"saphira", AARCH64_CPU_FEATURES (V8_4A, 3, SHA2, AES, PROFILE),
10293 "Qualcomm Saphira"},
10294 {"thunderx", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10295 "Cavium ThunderX"},
10296 {"vulcan", AARCH64_CPU_FEATURES (V8_1A, 2, SHA2, AES),
10297 "Broadcom Vulcan"},
10298 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10299 in earlier releases and is superseded by 'xgene1' in all
10300 tools. */
10301 {"xgene-1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10302 {"xgene1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10303 {"xgene2", AARCH64_CPU_FEATURES (V8A, 1, CRC), "APM X-Gene 2"},
10304 {"cortex-r82", AARCH64_ARCH_FEATURES (V8R), "Cortex-R82"},
10305 {"cortex-x1", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10306 SSBS, PROFILE), "Cortex-X1"},
10307 {"cortex-x2", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10308 SVE2_BITPERM), "Cortex-X2"},
10309 {"cortex-x3", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10310 SVE2_BITPERM), "Cortex-X3"},
10311 {"cortex-x4", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
10312 SVE2_BITPERM), "Cortex-X4"},
10313 {"generic", AARCH64_ARCH_FEATURES (V8A), NULL},
10314
10315 {NULL, AARCH64_NO_FEATURES, NULL}
10316 };
10317
10318 struct aarch64_arch_option_table
10319 {
10320 const char *name;
10321 const aarch64_feature_set value;
10322 };
10323
10324 /* This list should, at a minimum, contain all the architecture names
10325 recognized by GCC. */
10326 static const struct aarch64_arch_option_table aarch64_archs[] = {
10327 {"all", AARCH64_ALL_FEATURES},
10328 {"armv8-a", AARCH64_ARCH_FEATURES (V8A)},
10329 {"armv8.1-a", AARCH64_ARCH_FEATURES (V8_1A)},
10330 {"armv8.2-a", AARCH64_ARCH_FEATURES (V8_2A)},
10331 {"armv8.3-a", AARCH64_ARCH_FEATURES (V8_3A)},
10332 {"armv8.4-a", AARCH64_ARCH_FEATURES (V8_4A)},
10333 {"armv8.5-a", AARCH64_ARCH_FEATURES (V8_5A)},
10334 {"armv8.6-a", AARCH64_ARCH_FEATURES (V8_6A)},
10335 {"armv8.7-a", AARCH64_ARCH_FEATURES (V8_7A)},
10336 {"armv8.8-a", AARCH64_ARCH_FEATURES (V8_8A)},
10337 {"armv8.9-a", AARCH64_ARCH_FEATURES (V8_9A)},
10338 {"armv8-r", AARCH64_ARCH_FEATURES (V8R)},
10339 {"armv9-a", AARCH64_ARCH_FEATURES (V9A)},
10340 {"armv9.1-a", AARCH64_ARCH_FEATURES (V9_1A)},
10341 {"armv9.2-a", AARCH64_ARCH_FEATURES (V9_2A)},
10342 {"armv9.3-a", AARCH64_ARCH_FEATURES (V9_3A)},
10343 {"armv9.4-a", AARCH64_ARCH_FEATURES (V9_4A)},
10344 {NULL, AARCH64_NO_FEATURES}
10345 };
10346
10347 /* ISA extensions. */
10348 struct aarch64_option_cpu_value_table
10349 {
10350 const char *name;
10351 const aarch64_feature_set value;
10352 const aarch64_feature_set require; /* Feature dependencies. */
10353 };
10354
10355 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10356 {"crc", AARCH64_FEATURE (CRC), AARCH64_NO_FEATURES},
10357 {"crypto", AARCH64_FEATURES (2, AES, SHA2),
10358 AARCH64_FEATURE (SIMD)},
10359 {"fp", AARCH64_FEATURE (FP), AARCH64_NO_FEATURES},
10360 {"lse", AARCH64_FEATURE (LSE), AARCH64_NO_FEATURES},
10361 {"lse128", AARCH64_FEATURE (LSE128), AARCH64_FEATURE (LSE)},
10362 {"simd", AARCH64_FEATURE (SIMD), AARCH64_FEATURE (FP)},
10363 {"pan", AARCH64_FEATURE (PAN), AARCH64_NO_FEATURES},
10364 {"lor", AARCH64_FEATURE (LOR), AARCH64_NO_FEATURES},
10365 {"ras", AARCH64_FEATURE (RAS), AARCH64_NO_FEATURES},
10366 {"rdma", AARCH64_FEATURE (RDMA), AARCH64_FEATURE (SIMD)},
10367 {"rdm", AARCH64_FEATURE (RDMA), AARCH64_FEATURE (SIMD)},
10368 {"fp16", AARCH64_FEATURE (F16), AARCH64_FEATURE (FP)},
10369 {"fp16fml", AARCH64_FEATURE (F16_FML), AARCH64_FEATURE (F16)},
10370 {"profile", AARCH64_FEATURE (PROFILE), AARCH64_NO_FEATURES},
10371 {"sve", AARCH64_FEATURE (SVE), AARCH64_FEATURE (COMPNUM)},
10372 {"tme", AARCH64_FEATURE (TME), AARCH64_NO_FEATURES},
10373 {"fcma", AARCH64_FEATURE (COMPNUM),
10374 AARCH64_FEATURES (2, F16, SIMD)},
10375 {"compnum", AARCH64_FEATURE (COMPNUM),
10376 AARCH64_FEATURES (2, F16, SIMD)},
10377 {"jscvt", AARCH64_FEATURE (JSCVT), AARCH64_FEATURE (FP)},
10378 {"rcpc", AARCH64_FEATURE (RCPC), AARCH64_NO_FEATURES},
10379 {"rcpc2", AARCH64_FEATURE (RCPC2), AARCH64_FEATURE (RCPC)},
10380 {"dotprod", AARCH64_FEATURE (DOTPROD), AARCH64_FEATURE (SIMD)},
10381 {"sha2", AARCH64_FEATURE (SHA2), AARCH64_FEATURE (FP)},
10382 {"frintts", AARCH64_FEATURE (FRINTTS), AARCH64_FEATURE (SIMD)},
10383 {"sb", AARCH64_FEATURE (SB), AARCH64_NO_FEATURES},
10384 {"predres", AARCH64_FEATURE (PREDRES), AARCH64_NO_FEATURES},
10385 {"predres2", AARCH64_FEATURE (PREDRES2), AARCH64_FEATURE (PREDRES)},
10386 {"aes", AARCH64_FEATURE (AES), AARCH64_FEATURE (SIMD)},
10387 {"sm4", AARCH64_FEATURE (SM4), AARCH64_FEATURE (SIMD)},
10388 {"sha3", AARCH64_FEATURE (SHA3), AARCH64_FEATURE (SHA2)},
10389 {"rng", AARCH64_FEATURE (RNG), AARCH64_NO_FEATURES},
10390 {"ssbs", AARCH64_FEATURE (SSBS), AARCH64_NO_FEATURES},
10391 {"memtag", AARCH64_FEATURE (MEMTAG), AARCH64_NO_FEATURES},
10392 {"sve2", AARCH64_FEATURE (SVE2), AARCH64_FEATURE (SVE)},
10393 {"sve2-sm4", AARCH64_FEATURE (SVE2_SM4),
10394 AARCH64_FEATURES (2, SVE2, SM4)},
10395 {"sve2-aes", AARCH64_FEATURE (SVE2_AES),
10396 AARCH64_FEATURES (2, SVE2, AES)},
10397 {"sve2-sha3", AARCH64_FEATURE (SVE2_SHA3),
10398 AARCH64_FEATURES (2, SVE2, SHA3)},
10399 {"sve2-bitperm", AARCH64_FEATURE (SVE2_BITPERM),
10400 AARCH64_FEATURE (SVE2)},
10401 {"sme", AARCH64_FEATURE (SME),
10402 AARCH64_FEATURES (2, SVE2, BFLOAT16)},
10403 {"sme-f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10404 {"sme-f64f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10405 {"sme-i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10406 {"sme-i16i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10407 {"sme2", AARCH64_FEATURE (SME2), AARCH64_FEATURE (SME)},
10408 {"bf16", AARCH64_FEATURE (BFLOAT16), AARCH64_FEATURE (FP)},
10409 {"i8mm", AARCH64_FEATURE (I8MM), AARCH64_FEATURE (SIMD)},
10410 {"f32mm", AARCH64_FEATURE (F32MM), AARCH64_FEATURE (SVE)},
10411 {"f64mm", AARCH64_FEATURE (F64MM), AARCH64_FEATURE (SVE)},
10412 {"ls64", AARCH64_FEATURE (LS64), AARCH64_NO_FEATURES},
10413 {"flagm", AARCH64_FEATURE (FLAGM), AARCH64_NO_FEATURES},
10414 {"flagm2", AARCH64_FEATURE (FLAGMANIP), AARCH64_FEATURE (FLAGM)},
10415 {"pauth", AARCH64_FEATURE (PAC), AARCH64_NO_FEATURES},
10416 {"xs", AARCH64_FEATURE (XS), AARCH64_NO_FEATURES},
10417 {"wfxt", AARCH64_FEATURE (WFXT), AARCH64_NO_FEATURES},
10418 {"mops", AARCH64_FEATURE (MOPS), AARCH64_NO_FEATURES},
10419 {"hbc", AARCH64_FEATURE (HBC), AARCH64_NO_FEATURES},
10420 {"cssc", AARCH64_FEATURE (CSSC), AARCH64_NO_FEATURES},
10421 {"chk", AARCH64_FEATURE (CHK), AARCH64_NO_FEATURES},
10422 {"gcs", AARCH64_FEATURE (GCS), AARCH64_NO_FEATURES},
10423 {"the", AARCH64_FEATURE (THE), AARCH64_NO_FEATURES},
10424 {"rasv2", AARCH64_FEATURE (RASv2), AARCH64_FEATURE (RAS)},
10425 {"ite", AARCH64_FEATURE (ITE), AARCH64_NO_FEATURES},
10426 {"d128", AARCH64_FEATURE (D128),
10427 AARCH64_FEATURE (LSE128)},
10428 {"b16b16", AARCH64_FEATURE (B16B16), AARCH64_FEATURE (SVE2)},
10429 {"sme2p1", AARCH64_FEATURE (SME2p1), AARCH64_FEATURE (SME2)},
10430 {"sve2p1", AARCH64_FEATURE (SVE2p1), AARCH64_FEATURE (SVE2)},
10431 {"rcpc3", AARCH64_FEATURE (RCPC3), AARCH64_FEATURE (RCPC2)},
10432 {NULL, AARCH64_NO_FEATURES, AARCH64_NO_FEATURES},
10433 };
10434
10435 struct aarch64_long_option_table
10436 {
10437 const char *option; /* Substring to match. */
10438 const char *help; /* Help information. */
10439 int (*func) (const char *subopt); /* Function to decode sub-option. */
10440 char *deprecated; /* If non-null, print this message. */
10441 };
10442
10443 /* Transitive closure of features depending on set. */
10444 static aarch64_feature_set
aarch64_feature_disable_set(aarch64_feature_set set)10445 aarch64_feature_disable_set (aarch64_feature_set set)
10446 {
10447 const struct aarch64_option_cpu_value_table *opt;
10448 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10449
10450 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10451 {
10452 prev = set;
10453 for (opt = aarch64_features; opt->name != NULL; opt++)
10454 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10455 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10456 }
10457 return set;
10458 }
10459
10460 /* Transitive closure of dependencies of set. */
10461 static aarch64_feature_set
aarch64_feature_enable_set(aarch64_feature_set set)10462 aarch64_feature_enable_set (aarch64_feature_set set)
10463 {
10464 const struct aarch64_option_cpu_value_table *opt;
10465 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10466
10467 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10468 {
10469 prev = set;
10470 for (opt = aarch64_features; opt->name != NULL; opt++)
10471 if (AARCH64_CPU_HAS_ALL_FEATURES (set, opt->value))
10472 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10473 }
10474 return set;
10475 }
10476
10477 static int
aarch64_parse_features(const char * str,const aarch64_feature_set ** opt_p,bool ext_only)10478 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10479 bool ext_only)
10480 {
10481 /* We insist on extensions being added before being removed. We achieve
10482 this by using the ADDING_VALUE variable to indicate whether we are
10483 adding an extension (1) or removing it (0) and only allowing it to
10484 change in the order -1 -> 1 -> 0. */
10485 int adding_value = -1;
10486 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10487
10488 /* Copy the feature set, so that we can modify it. */
10489 *ext_set = **opt_p;
10490 *opt_p = ext_set;
10491
10492 while (str != NULL && *str != 0)
10493 {
10494 const struct aarch64_option_cpu_value_table *opt;
10495 const char *ext = NULL;
10496 int optlen;
10497
10498 if (!ext_only)
10499 {
10500 if (*str != '+')
10501 {
10502 as_bad (_("invalid architectural extension"));
10503 return 0;
10504 }
10505
10506 ext = strchr (++str, '+');
10507 }
10508
10509 if (ext != NULL)
10510 optlen = ext - str;
10511 else
10512 optlen = strlen (str);
10513
10514 if (optlen >= 2 && startswith (str, "no"))
10515 {
10516 if (adding_value != 0)
10517 adding_value = 0;
10518 optlen -= 2;
10519 str += 2;
10520 }
10521 else if (optlen > 0)
10522 {
10523 if (adding_value == -1)
10524 adding_value = 1;
10525 else if (adding_value != 1)
10526 {
10527 as_bad (_("must specify extensions to add before specifying "
10528 "those to remove"));
10529 return false;
10530 }
10531 }
10532
10533 if (optlen == 0)
10534 {
10535 as_bad (_("missing architectural extension"));
10536 return 0;
10537 }
10538
10539 gas_assert (adding_value != -1);
10540
10541 for (opt = aarch64_features; opt->name != NULL; opt++)
10542 if (optlen == (int) strlen(opt->name)
10543 && strncmp (opt->name, str, optlen) == 0)
10544 {
10545 aarch64_feature_set set;
10546
10547 /* Add or remove the extension. */
10548 if (adding_value)
10549 {
10550 set = aarch64_feature_enable_set (opt->value);
10551 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10552 }
10553 else
10554 {
10555 set = aarch64_feature_disable_set (opt->value);
10556 AARCH64_CLEAR_FEATURES (*ext_set, *ext_set, set);
10557 }
10558 break;
10559 }
10560
10561 if (opt->name == NULL)
10562 {
10563 as_bad (_("unknown architectural extension `%s'"), str);
10564 return 0;
10565 }
10566
10567 str = ext;
10568 };
10569
10570 return 1;
10571 }
10572
10573 static int
aarch64_parse_cpu(const char * str)10574 aarch64_parse_cpu (const char *str)
10575 {
10576 const struct aarch64_cpu_option_table *opt;
10577 const char *ext = strchr (str, '+');
10578 size_t optlen;
10579
10580 if (ext != NULL)
10581 optlen = ext - str;
10582 else
10583 optlen = strlen (str);
10584
10585 if (optlen == 0)
10586 {
10587 as_bad (_("missing cpu name `%s'"), str);
10588 return 0;
10589 }
10590
10591 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10592 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10593 {
10594 mcpu_cpu_opt = &opt->value;
10595 if (ext != NULL)
10596 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10597
10598 return 1;
10599 }
10600
10601 as_bad (_("unknown cpu `%s'"), str);
10602 return 0;
10603 }
10604
10605 static int
aarch64_parse_arch(const char * str)10606 aarch64_parse_arch (const char *str)
10607 {
10608 const struct aarch64_arch_option_table *opt;
10609 const char *ext = strchr (str, '+');
10610 size_t optlen;
10611
10612 if (ext != NULL)
10613 optlen = ext - str;
10614 else
10615 optlen = strlen (str);
10616
10617 if (optlen == 0)
10618 {
10619 as_bad (_("missing architecture name `%s'"), str);
10620 return 0;
10621 }
10622
10623 for (opt = aarch64_archs; opt->name != NULL; opt++)
10624 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10625 {
10626 march_cpu_opt = &opt->value;
10627 if (ext != NULL)
10628 return aarch64_parse_features (ext, &march_cpu_opt, false);
10629
10630 return 1;
10631 }
10632
10633 as_bad (_("unknown architecture `%s'\n"), str);
10634 return 0;
10635 }
10636
10637 /* ABIs. */
10638 struct aarch64_option_abi_value_table
10639 {
10640 const char *name;
10641 enum aarch64_abi_type value;
10642 };
10643
10644 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10645 #ifdef OBJ_ELF
10646 {"ilp32", AARCH64_ABI_ILP32},
10647 {"lp64", AARCH64_ABI_LP64},
10648 #else
10649 {"llp64", AARCH64_ABI_LLP64},
10650 #endif
10651 };
10652
10653 static int
aarch64_parse_abi(const char * str)10654 aarch64_parse_abi (const char *str)
10655 {
10656 unsigned int i;
10657
10658 if (str[0] == '\0')
10659 {
10660 as_bad (_("missing abi name `%s'"), str);
10661 return 0;
10662 }
10663
10664 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10665 if (strcmp (str, aarch64_abis[i].name) == 0)
10666 {
10667 aarch64_abi = aarch64_abis[i].value;
10668 return 1;
10669 }
10670
10671 as_bad (_("unknown abi `%s'\n"), str);
10672 return 0;
10673 }
10674
10675 static struct aarch64_long_option_table aarch64_long_opts[] = {
10676 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10677 aarch64_parse_abi, NULL},
10678 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10679 aarch64_parse_cpu, NULL},
10680 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10681 aarch64_parse_arch, NULL},
10682 {NULL, NULL, 0, NULL}
10683 };
10684
10685 int
md_parse_option(int c,const char * arg)10686 md_parse_option (int c, const char *arg)
10687 {
10688 struct aarch64_option_table *opt;
10689 struct aarch64_long_option_table *lopt;
10690
10691 switch (c)
10692 {
10693 #ifdef OPTION_EB
10694 case OPTION_EB:
10695 target_big_endian = 1;
10696 break;
10697 #endif
10698
10699 #ifdef OPTION_EL
10700 case OPTION_EL:
10701 target_big_endian = 0;
10702 break;
10703 #endif
10704
10705 case 'a':
10706 /* Listing option. Just ignore these, we don't support additional
10707 ones. */
10708 return 0;
10709
10710 default:
10711 for (opt = aarch64_opts; opt->option != NULL; opt++)
10712 {
10713 if (c == opt->option[0]
10714 && ((arg == NULL && opt->option[1] == 0)
10715 || streq (arg, opt->option + 1)))
10716 {
10717 /* If the option is deprecated, tell the user. */
10718 if (opt->deprecated != NULL)
10719 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10720 arg ? arg : "", _(opt->deprecated));
10721
10722 if (opt->var != NULL)
10723 *opt->var = opt->value;
10724
10725 return 1;
10726 }
10727 }
10728
10729 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10730 {
10731 /* These options are expected to have an argument. */
10732 if (c == lopt->option[0]
10733 && arg != NULL
10734 && startswith (arg, lopt->option + 1))
10735 {
10736 /* If the option is deprecated, tell the user. */
10737 if (lopt->deprecated != NULL)
10738 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10739 _(lopt->deprecated));
10740
10741 /* Call the sup-option parser. */
10742 return lopt->func (arg + strlen (lopt->option) - 1);
10743 }
10744 }
10745
10746 return 0;
10747 }
10748
10749 return 1;
10750 }
10751
10752 void
md_show_usage(FILE * fp)10753 md_show_usage (FILE * fp)
10754 {
10755 struct aarch64_option_table *opt;
10756 struct aarch64_long_option_table *lopt;
10757
10758 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10759
10760 for (opt = aarch64_opts; opt->option != NULL; opt++)
10761 if (opt->help != NULL)
10762 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10763
10764 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10765 if (lopt->help != NULL)
10766 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10767
10768 #ifdef OPTION_EB
10769 fprintf (fp, _("\
10770 -EB assemble code for a big-endian cpu\n"));
10771 #endif
10772
10773 #ifdef OPTION_EL
10774 fprintf (fp, _("\
10775 -EL assemble code for a little-endian cpu\n"));
10776 #endif
10777 }
10778
10779 /* Parse a .cpu directive. */
10780
10781 static void
s_aarch64_cpu(int ignored ATTRIBUTE_UNUSED)10782 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10783 {
10784 const struct aarch64_cpu_option_table *opt;
10785 char saved_char;
10786 char *name;
10787 char *ext;
10788 size_t optlen;
10789
10790 name = input_line_pointer;
10791 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10792 saved_char = *input_line_pointer;
10793 *input_line_pointer = 0;
10794
10795 ext = strchr (name, '+');
10796
10797 if (ext != NULL)
10798 optlen = ext - name;
10799 else
10800 optlen = strlen (name);
10801
10802 /* Skip the first "all" entry. */
10803 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10804 if (strlen (opt->name) == optlen
10805 && strncmp (name, opt->name, optlen) == 0)
10806 {
10807 mcpu_cpu_opt = &opt->value;
10808 if (ext != NULL)
10809 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10810 return;
10811
10812 cpu_variant = *mcpu_cpu_opt;
10813
10814 *input_line_pointer = saved_char;
10815 demand_empty_rest_of_line ();
10816 return;
10817 }
10818 as_bad (_("unknown cpu `%s'"), name);
10819 *input_line_pointer = saved_char;
10820 ignore_rest_of_line ();
10821 }
10822
10823
10824 /* Parse a .arch directive. */
10825
10826 static void
s_aarch64_arch(int ignored ATTRIBUTE_UNUSED)10827 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10828 {
10829 const struct aarch64_arch_option_table *opt;
10830 char saved_char;
10831 char *name;
10832 char *ext;
10833 size_t optlen;
10834
10835 name = input_line_pointer;
10836 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10837 saved_char = *input_line_pointer;
10838 *input_line_pointer = 0;
10839
10840 ext = strchr (name, '+');
10841
10842 if (ext != NULL)
10843 optlen = ext - name;
10844 else
10845 optlen = strlen (name);
10846
10847 /* Skip the first "all" entry. */
10848 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10849 if (strlen (opt->name) == optlen
10850 && strncmp (name, opt->name, optlen) == 0)
10851 {
10852 mcpu_cpu_opt = &opt->value;
10853 if (ext != NULL)
10854 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10855 return;
10856
10857 cpu_variant = *mcpu_cpu_opt;
10858
10859 *input_line_pointer = saved_char;
10860 demand_empty_rest_of_line ();
10861 return;
10862 }
10863
10864 as_bad (_("unknown architecture `%s'\n"), name);
10865 *input_line_pointer = saved_char;
10866 ignore_rest_of_line ();
10867 }
10868
10869 /* Parse a .arch_extension directive. */
10870
10871 static void
s_aarch64_arch_extension(int ignored ATTRIBUTE_UNUSED)10872 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10873 {
10874 char saved_char;
10875 char *ext = input_line_pointer;
10876
10877 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10878 saved_char = *input_line_pointer;
10879 *input_line_pointer = 0;
10880
10881 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10882 return;
10883
10884 cpu_variant = *mcpu_cpu_opt;
10885
10886 *input_line_pointer = saved_char;
10887 demand_empty_rest_of_line ();
10888 }
10889
10890 /* Copy symbol information. */
10891
10892 void
aarch64_copy_symbol_attributes(symbolS * dest,symbolS * src)10893 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10894 {
10895 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10896 }
10897
10898 #ifdef OBJ_ELF
10899 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10900 This is needed so AArch64 specific st_other values can be independently
10901 specified for an IFUNC resolver (that is called by the dynamic linker)
10902 and the symbol it resolves (aliased to the resolver). In particular,
10903 if a function symbol has special st_other value set via directives,
10904 then attaching an IFUNC resolver to that symbol should not override
10905 the st_other setting. Requiring the directive on the IFUNC resolver
10906 symbol would be unexpected and problematic in C code, where the two
10907 symbols appear as two independent function declarations. */
10908
10909 void
aarch64_elf_copy_symbol_attributes(symbolS * dest,symbolS * src)10910 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10911 {
10912 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10913 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10914 /* If size is unset, copy size from src. Because we don't track whether
10915 .size has been used, we can't differentiate .size dest, 0 from the case
10916 where dest's size is unset. */
10917 if (!destelf->size && S_GET_SIZE (dest) == 0)
10918 {
10919 if (srcelf->size)
10920 {
10921 destelf->size = XNEW (expressionS);
10922 *destelf->size = *srcelf->size;
10923 }
10924 S_SET_SIZE (dest, S_GET_SIZE (src));
10925 }
10926 }
10927 #endif
10928