1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2022 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bool
vector_qualifier_p(enum aarch64_opnd_qualifier qualifier)106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return (qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q);
110 }
111
112 static inline bool
fp_qualifier_p(enum aarch64_opnd_qualifier qualifier)113 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114 {
115 return (qualifier >= AARCH64_OPND_QLF_S_B
116 && qualifier <= AARCH64_OPND_QLF_S_Q);
117 }
118
119 enum data_pattern
120 {
121 DP_UNKNOWN,
122 DP_VECTOR_3SAME,
123 DP_VECTOR_LONG,
124 DP_VECTOR_WIDE,
125 DP_VECTOR_ACROSS_LANES,
126 };
127
128 static const char significant_operand_index [] =
129 {
130 0, /* DP_UNKNOWN, by default using operand 0. */
131 0, /* DP_VECTOR_3SAME */
132 1, /* DP_VECTOR_LONG */
133 2, /* DP_VECTOR_WIDE */
134 1, /* DP_VECTOR_ACROSS_LANES */
135 };
136
137 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
138 the data pattern.
139 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
140 corresponds to one of a sequence of operands. */
141
142 static enum data_pattern
get_data_pattern(const aarch64_opnd_qualifier_seq_t qualifiers)143 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
144 {
145 if (vector_qualifier_p (qualifiers[0]))
146 {
147 /* e.g. v.4s, v.4s, v.4s
148 or v.4h, v.4h, v.h[3]. */
149 if (qualifiers[0] == qualifiers[1]
150 && vector_qualifier_p (qualifiers[2])
151 && (aarch64_get_qualifier_esize (qualifiers[0])
152 == aarch64_get_qualifier_esize (qualifiers[1]))
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[2])))
155 return DP_VECTOR_3SAME;
156 /* e.g. v.8h, v.8b, v.8b.
157 or v.4s, v.4h, v.h[2].
158 or v.8h, v.16b. */
159 if (vector_qualifier_p (qualifiers[1])
160 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
161 && (aarch64_get_qualifier_esize (qualifiers[0])
162 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
163 return DP_VECTOR_LONG;
164 /* e.g. v.8h, v.8h, v.8b. */
165 if (qualifiers[0] == qualifiers[1]
166 && vector_qualifier_p (qualifiers[2])
167 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168 && (aarch64_get_qualifier_esize (qualifiers[0])
169 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[1])))
172 return DP_VECTOR_WIDE;
173 }
174 else if (fp_qualifier_p (qualifiers[0]))
175 {
176 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
177 if (vector_qualifier_p (qualifiers[1])
178 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
179 return DP_VECTOR_ACROSS_LANES;
180 }
181
182 return DP_UNKNOWN;
183 }
184
185 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
186 the AdvSIMD instructions. */
187 /* N.B. it is possible to do some optimization that doesn't call
188 get_data_pattern each time when we need to select an operand. We can
189 either buffer the caculated the result or statically generate the data,
190 however, it is not obvious that the optimization will bring significant
191 benefit. */
192
193 int
aarch64_select_operand_for_sizeq_field_coding(const aarch64_opcode * opcode)194 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
195 {
196 return
197 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
198 }
199
200 /* Instruction bit-fields.
201 + Keep synced with 'enum aarch64_field_kind'. */
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 5, 4 }, /* imm4_5: in SME instructions. */
248 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
249 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
250 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
251 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
252 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
253 { 5, 14 }, /* imm14: in test bit and branch instructions. */
254 { 5, 16 }, /* imm16: in exception instructions. */
255 { 0, 16 }, /* imm16_2: in udf instruction. */
256 { 0, 26 }, /* imm26: in unconditional branch instructions. */
257 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
258 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
259 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
260 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
261 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
262 { 22, 1 }, /* N: in logical (immediate) instructions. */
263 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
264 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
265 { 31, 1 }, /* sf: in integer data processing instructions. */
266 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
267 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
268 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
269 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
270 { 31, 1 }, /* b5: in the test bit and branch instructions. */
271 { 19, 5 }, /* b40: in the test bit and branch instructions. */
272 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
273 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
274 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
275 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
276 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
277 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
278 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
279 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
280 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
281 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
282 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
283 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
284 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
285 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
286 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
287 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
288 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
290 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
292 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
293 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
294 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
295 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
296 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
297 { 5, 1 }, /* SVE_i1: single-bit immediate. */
298 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
299 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
300 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
301 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
302 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
303 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
304 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
305 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
306 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
307 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
308 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
309 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
310 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
311 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
312 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
313 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
314 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
315 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
316 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
317 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
318 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
319 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
320 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
321 { 16, 4 }, /* SVE_tsz: triangular size select. */
322 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
323 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
324 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
325 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
326 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
327 { 0, 2 }, /* SME ZAda tile ZA0-ZA3. */
328 { 0, 3 }, /* SME ZAda tile ZA0-ZA7. */
329 { 22, 2 }, /* SME_size_10: size<1>, size<0> class field, [23:22]. */
330 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
331 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
332 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
333 { 13, 3 }, /* SME Pm second source scalable predicate register P0-P7. */
334 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
335 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
336 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
337 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
338 { 18, 3 }, /* SME_tshl: immediate and qualifier field, bits [20:18]. */
339 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
340 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
341 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
342 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
343 { 22, 1 }, /* sz: 1-bit element size select. */
344 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
345 };
346
347 enum aarch64_operand_class
aarch64_get_operand_class(enum aarch64_opnd type)348 aarch64_get_operand_class (enum aarch64_opnd type)
349 {
350 return aarch64_operands[type].op_class;
351 }
352
353 const char *
aarch64_get_operand_name(enum aarch64_opnd type)354 aarch64_get_operand_name (enum aarch64_opnd type)
355 {
356 return aarch64_operands[type].name;
357 }
358
359 /* Get operand description string.
360 This is usually for the diagnosis purpose. */
361 const char *
aarch64_get_operand_desc(enum aarch64_opnd type)362 aarch64_get_operand_desc (enum aarch64_opnd type)
363 {
364 return aarch64_operands[type].desc;
365 }
366
367 /* Table of all conditional affixes. */
368 const aarch64_cond aarch64_conds[16] =
369 {
370 {{"eq", "none"}, 0x0},
371 {{"ne", "any"}, 0x1},
372 {{"cs", "hs", "nlast"}, 0x2},
373 {{"cc", "lo", "ul", "last"}, 0x3},
374 {{"mi", "first"}, 0x4},
375 {{"pl", "nfrst"}, 0x5},
376 {{"vs"}, 0x6},
377 {{"vc"}, 0x7},
378 {{"hi", "pmore"}, 0x8},
379 {{"ls", "plast"}, 0x9},
380 {{"ge", "tcont"}, 0xa},
381 {{"lt", "tstop"}, 0xb},
382 {{"gt"}, 0xc},
383 {{"le"}, 0xd},
384 {{"al"}, 0xe},
385 {{"nv"}, 0xf},
386 };
387
388 const aarch64_cond *
get_cond_from_value(aarch64_insn value)389 get_cond_from_value (aarch64_insn value)
390 {
391 assert (value < 16);
392 return &aarch64_conds[(unsigned int) value];
393 }
394
395 const aarch64_cond *
get_inverted_cond(const aarch64_cond * cond)396 get_inverted_cond (const aarch64_cond *cond)
397 {
398 return &aarch64_conds[cond->value ^ 0x1];
399 }
400
401 /* Table describing the operand extension/shifting operators; indexed by
402 enum aarch64_modifier_kind.
403
404 The value column provides the most common values for encoding modifiers,
405 which enables table-driven encoding/decoding for the modifiers. */
406 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
407 {
408 {"none", 0x0},
409 {"msl", 0x0},
410 {"ror", 0x3},
411 {"asr", 0x2},
412 {"lsr", 0x1},
413 {"lsl", 0x0},
414 {"uxtb", 0x0},
415 {"uxth", 0x1},
416 {"uxtw", 0x2},
417 {"uxtx", 0x3},
418 {"sxtb", 0x4},
419 {"sxth", 0x5},
420 {"sxtw", 0x6},
421 {"sxtx", 0x7},
422 {"mul", 0x0},
423 {"mul vl", 0x0},
424 {NULL, 0},
425 };
426
427 enum aarch64_modifier_kind
aarch64_get_operand_modifier(const struct aarch64_name_value_pair * desc)428 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
429 {
430 return desc - aarch64_operand_modifiers;
431 }
432
433 aarch64_insn
aarch64_get_operand_modifier_value(enum aarch64_modifier_kind kind)434 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
435 {
436 return aarch64_operand_modifiers[kind].value;
437 }
438
439 enum aarch64_modifier_kind
aarch64_get_operand_modifier_from_value(aarch64_insn value,bool extend_p)440 aarch64_get_operand_modifier_from_value (aarch64_insn value,
441 bool extend_p)
442 {
443 if (extend_p)
444 return AARCH64_MOD_UXTB + value;
445 else
446 return AARCH64_MOD_LSL - value;
447 }
448
449 bool
aarch64_extend_operator_p(enum aarch64_modifier_kind kind)450 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
451 {
452 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
453 }
454
455 static inline bool
aarch64_shift_operator_p(enum aarch64_modifier_kind kind)456 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
457 {
458 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
459 }
460
461 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
462 {
463 { "#0x00", 0x0 },
464 { "oshld", 0x1 },
465 { "oshst", 0x2 },
466 { "osh", 0x3 },
467 { "#0x04", 0x4 },
468 { "nshld", 0x5 },
469 { "nshst", 0x6 },
470 { "nsh", 0x7 },
471 { "#0x08", 0x8 },
472 { "ishld", 0x9 },
473 { "ishst", 0xa },
474 { "ish", 0xb },
475 { "#0x0c", 0xc },
476 { "ld", 0xd },
477 { "st", 0xe },
478 { "sy", 0xf },
479 };
480
481 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
482 { /* CRm<3:2> #imm */
483 { "oshnxs", 16 }, /* 00 16 */
484 { "nshnxs", 20 }, /* 01 20 */
485 { "ishnxs", 24 }, /* 10 24 */
486 { "synxs", 28 }, /* 11 28 */
487 };
488
489 /* Table describing the operands supported by the aliases of the HINT
490 instruction.
491
492 The name column is the operand that is accepted for the alias. The value
493 column is the hint number of the alias. The list of operands is terminated
494 by NULL in the name column. */
495
496 const struct aarch64_name_value_pair aarch64_hint_options[] =
497 {
498 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
499 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
500 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
501 { "c", HINT_OPD_C }, /* BTI C. */
502 { "j", HINT_OPD_J }, /* BTI J. */
503 { "jc", HINT_OPD_JC }, /* BTI JC. */
504 { NULL, HINT_OPD_NULL },
505 };
506
507 /* op -> op: load = 0 instruction = 1 store = 2
508 l -> level: 1-3
509 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
510 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
511 const struct aarch64_name_value_pair aarch64_prfops[32] =
512 {
513 { "pldl1keep", B(0, 1, 0) },
514 { "pldl1strm", B(0, 1, 1) },
515 { "pldl2keep", B(0, 2, 0) },
516 { "pldl2strm", B(0, 2, 1) },
517 { "pldl3keep", B(0, 3, 0) },
518 { "pldl3strm", B(0, 3, 1) },
519 { NULL, 0x06 },
520 { NULL, 0x07 },
521 { "plil1keep", B(1, 1, 0) },
522 { "plil1strm", B(1, 1, 1) },
523 { "plil2keep", B(1, 2, 0) },
524 { "plil2strm", B(1, 2, 1) },
525 { "plil3keep", B(1, 3, 0) },
526 { "plil3strm", B(1, 3, 1) },
527 { NULL, 0x0e },
528 { NULL, 0x0f },
529 { "pstl1keep", B(2, 1, 0) },
530 { "pstl1strm", B(2, 1, 1) },
531 { "pstl2keep", B(2, 2, 0) },
532 { "pstl2strm", B(2, 2, 1) },
533 { "pstl3keep", B(2, 3, 0) },
534 { "pstl3strm", B(2, 3, 1) },
535 { NULL, 0x16 },
536 { NULL, 0x17 },
537 { NULL, 0x18 },
538 { NULL, 0x19 },
539 { NULL, 0x1a },
540 { NULL, 0x1b },
541 { NULL, 0x1c },
542 { NULL, 0x1d },
543 { NULL, 0x1e },
544 { NULL, 0x1f },
545 };
546 #undef B
547
548 /* Utilities on value constraint. */
549
550 static inline int
value_in_range_p(int64_t value,int low,int high)551 value_in_range_p (int64_t value, int low, int high)
552 {
553 return (value >= low && value <= high) ? 1 : 0;
554 }
555
556 /* Return true if VALUE is a multiple of ALIGN. */
557 static inline int
value_aligned_p(int64_t value,int align)558 value_aligned_p (int64_t value, int align)
559 {
560 return (value % align) == 0;
561 }
562
563 /* A signed value fits in a field. */
564 static inline int
value_fit_signed_field_p(int64_t value,unsigned width)565 value_fit_signed_field_p (int64_t value, unsigned width)
566 {
567 assert (width < 32);
568 if (width < sizeof (value) * 8)
569 {
570 int64_t lim = (uint64_t) 1 << (width - 1);
571 if (value >= -lim && value < lim)
572 return 1;
573 }
574 return 0;
575 }
576
577 /* An unsigned value fits in a field. */
578 static inline int
value_fit_unsigned_field_p(int64_t value,unsigned width)579 value_fit_unsigned_field_p (int64_t value, unsigned width)
580 {
581 assert (width < 32);
582 if (width < sizeof (value) * 8)
583 {
584 int64_t lim = (uint64_t) 1 << width;
585 if (value >= 0 && value < lim)
586 return 1;
587 }
588 return 0;
589 }
590
591 /* Return 1 if OPERAND is SP or WSP. */
592 int
aarch64_stack_pointer_p(const aarch64_opnd_info * operand)593 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
594 {
595 return ((aarch64_get_operand_class (operand->type)
596 == AARCH64_OPND_CLASS_INT_REG)
597 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
598 && operand->reg.regno == 31);
599 }
600
601 /* Return 1 if OPERAND is XZR or WZP. */
602 int
aarch64_zero_register_p(const aarch64_opnd_info * operand)603 aarch64_zero_register_p (const aarch64_opnd_info *operand)
604 {
605 return ((aarch64_get_operand_class (operand->type)
606 == AARCH64_OPND_CLASS_INT_REG)
607 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
608 && operand->reg.regno == 31);
609 }
610
611 /* Return true if the operand *OPERAND that has the operand code
612 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
613 qualified by the qualifier TARGET. */
614
615 static inline int
operand_also_qualified_p(const struct aarch64_opnd_info * operand,aarch64_opnd_qualifier_t target)616 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
617 aarch64_opnd_qualifier_t target)
618 {
619 switch (operand->qualifier)
620 {
621 case AARCH64_OPND_QLF_W:
622 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
623 return 1;
624 break;
625 case AARCH64_OPND_QLF_X:
626 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
627 return 1;
628 break;
629 case AARCH64_OPND_QLF_WSP:
630 if (target == AARCH64_OPND_QLF_W
631 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
632 return 1;
633 break;
634 case AARCH64_OPND_QLF_SP:
635 if (target == AARCH64_OPND_QLF_X
636 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
637 return 1;
638 break;
639 default:
640 break;
641 }
642
643 return 0;
644 }
645
646 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
647 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
648
649 Return NIL if more than one expected qualifiers are found. */
650
651 aarch64_opnd_qualifier_t
aarch64_get_expected_qualifier(const aarch64_opnd_qualifier_seq_t * qseq_list,int idx,const aarch64_opnd_qualifier_t known_qlf,int known_idx)652 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
653 int idx,
654 const aarch64_opnd_qualifier_t known_qlf,
655 int known_idx)
656 {
657 int i, saved_i;
658
659 /* Special case.
660
661 When the known qualifier is NIL, we have to assume that there is only
662 one qualifier sequence in the *QSEQ_LIST and return the corresponding
663 qualifier directly. One scenario is that for instruction
664 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
665 which has only one possible valid qualifier sequence
666 NIL, S_D
667 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
668 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
669
670 Because the qualifier NIL has dual roles in the qualifier sequence:
671 it can mean no qualifier for the operand, or the qualifer sequence is
672 not in use (when all qualifiers in the sequence are NILs), we have to
673 handle this special case here. */
674 if (known_qlf == AARCH64_OPND_NIL)
675 {
676 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
677 return qseq_list[0][idx];
678 }
679
680 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
681 {
682 if (qseq_list[i][known_idx] == known_qlf)
683 {
684 if (saved_i != -1)
685 /* More than one sequences are found to have KNOWN_QLF at
686 KNOWN_IDX. */
687 return AARCH64_OPND_NIL;
688 saved_i = i;
689 }
690 }
691
692 return qseq_list[saved_i][idx];
693 }
694
695 enum operand_qualifier_kind
696 {
697 OQK_NIL,
698 OQK_OPD_VARIANT,
699 OQK_VALUE_IN_RANGE,
700 OQK_MISC,
701 };
702
703 /* Operand qualifier description. */
704 struct operand_qualifier_data
705 {
706 /* The usage of the three data fields depends on the qualifier kind. */
707 int data0;
708 int data1;
709 int data2;
710 /* Description. */
711 const char *desc;
712 /* Kind. */
713 enum operand_qualifier_kind kind;
714 };
715
716 /* Indexed by the operand qualifier enumerators. */
717 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
718 {
719 {0, 0, 0, "NIL", OQK_NIL},
720
721 /* Operand variant qualifiers.
722 First 3 fields:
723 element size, number of elements and common value for encoding. */
724
725 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
726 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
727 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
728 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
729
730 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
731 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
732 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
733 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
734 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
735 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
736 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
737
738 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
739 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
740 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
741 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
742 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
743 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
744 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
745 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
746 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
747 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
748 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
749
750 {0, 0, 0, "z", OQK_OPD_VARIANT},
751 {0, 0, 0, "m", OQK_OPD_VARIANT},
752
753 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
754 {16, 0, 0, "tag", OQK_OPD_VARIANT},
755
756 /* Qualifiers constraining the value range.
757 First 3 fields:
758 Lower bound, higher bound, unused. */
759
760 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
761 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
762 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
763 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
764 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
765 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
766 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
767
768 /* Qualifiers for miscellaneous purpose.
769 First 3 fields:
770 unused, unused and unused. */
771
772 {0, 0, 0, "lsl", 0},
773 {0, 0, 0, "msl", 0},
774
775 {0, 0, 0, "retrieving", 0},
776 };
777
778 static inline bool
operand_variant_qualifier_p(aarch64_opnd_qualifier_t qualifier)779 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
780 {
781 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
782 }
783
784 static inline bool
qualifier_value_in_range_constraint_p(aarch64_opnd_qualifier_t qualifier)785 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
786 {
787 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
788 }
789
790 const char*
aarch64_get_qualifier_name(aarch64_opnd_qualifier_t qualifier)791 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
792 {
793 return aarch64_opnd_qualifiers[qualifier].desc;
794 }
795
796 /* Given an operand qualifier, return the expected data element size
797 of a qualified operand. */
798 unsigned char
aarch64_get_qualifier_esize(aarch64_opnd_qualifier_t qualifier)799 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
800 {
801 assert (operand_variant_qualifier_p (qualifier));
802 return aarch64_opnd_qualifiers[qualifier].data0;
803 }
804
805 unsigned char
aarch64_get_qualifier_nelem(aarch64_opnd_qualifier_t qualifier)806 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
807 {
808 assert (operand_variant_qualifier_p (qualifier));
809 return aarch64_opnd_qualifiers[qualifier].data1;
810 }
811
812 aarch64_insn
aarch64_get_qualifier_standard_value(aarch64_opnd_qualifier_t qualifier)813 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
814 {
815 assert (operand_variant_qualifier_p (qualifier));
816 return aarch64_opnd_qualifiers[qualifier].data2;
817 }
818
819 static int
get_lower_bound(aarch64_opnd_qualifier_t qualifier)820 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
821 {
822 assert (qualifier_value_in_range_constraint_p (qualifier));
823 return aarch64_opnd_qualifiers[qualifier].data0;
824 }
825
826 static int
get_upper_bound(aarch64_opnd_qualifier_t qualifier)827 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
828 {
829 assert (qualifier_value_in_range_constraint_p (qualifier));
830 return aarch64_opnd_qualifiers[qualifier].data1;
831 }
832
833 #ifdef DEBUG_AARCH64
834 void
aarch64_verbose(const char * str,...)835 aarch64_verbose (const char *str, ...)
836 {
837 va_list ap;
838 va_start (ap, str);
839 printf ("#### ");
840 vprintf (str, ap);
841 printf ("\n");
842 va_end (ap);
843 }
844
845 static inline void
dump_qualifier_sequence(const aarch64_opnd_qualifier_t * qualifier)846 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
847 {
848 int i;
849 printf ("#### \t");
850 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
851 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
852 printf ("\n");
853 }
854
855 static void
dump_match_qualifiers(const struct aarch64_opnd_info * opnd,const aarch64_opnd_qualifier_t * qualifier)856 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
857 const aarch64_opnd_qualifier_t *qualifier)
858 {
859 int i;
860 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
861
862 aarch64_verbose ("dump_match_qualifiers:");
863 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
864 curr[i] = opnd[i].qualifier;
865 dump_qualifier_sequence (curr);
866 aarch64_verbose ("against");
867 dump_qualifier_sequence (qualifier);
868 }
869 #endif /* DEBUG_AARCH64 */
870
871 /* This function checks if the given instruction INSN is a destructive
872 instruction based on the usage of the registers. It does not recognize
873 unary destructive instructions. */
874 bool
aarch64_is_destructive_by_operands(const aarch64_opcode * opcode)875 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
876 {
877 int i = 0;
878 const enum aarch64_opnd *opnds = opcode->operands;
879
880 if (opnds[0] == AARCH64_OPND_NIL)
881 return false;
882
883 while (opnds[++i] != AARCH64_OPND_NIL)
884 if (opnds[i] == opnds[0])
885 return true;
886
887 return false;
888 }
889
890 /* TODO improve this, we can have an extra field at the runtime to
891 store the number of operands rather than calculating it every time. */
892
893 int
aarch64_num_of_operands(const aarch64_opcode * opcode)894 aarch64_num_of_operands (const aarch64_opcode *opcode)
895 {
896 int i = 0;
897 const enum aarch64_opnd *opnds = opcode->operands;
898 while (opnds[i++] != AARCH64_OPND_NIL)
899 ;
900 --i;
901 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
902 return i;
903 }
904
905 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
906 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
907
908 N.B. on the entry, it is very likely that only some operands in *INST
909 have had their qualifiers been established.
910
911 If STOP_AT is not -1, the function will only try to match
912 the qualifier sequence for operands before and including the operand
913 of index STOP_AT; and on success *RET will only be filled with the first
914 (STOP_AT+1) qualifiers.
915
916 A couple examples of the matching algorithm:
917
918 X,W,NIL should match
919 X,W,NIL
920
921 NIL,NIL should match
922 X ,NIL
923
924 Apart from serving the main encoding routine, this can also be called
925 during or after the operand decoding. */
926
927 int
aarch64_find_best_match(const aarch64_inst * inst,const aarch64_opnd_qualifier_seq_t * qualifiers_list,int stop_at,aarch64_opnd_qualifier_t * ret)928 aarch64_find_best_match (const aarch64_inst *inst,
929 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
930 int stop_at, aarch64_opnd_qualifier_t *ret)
931 {
932 int found = 0;
933 int i, num_opnds;
934 const aarch64_opnd_qualifier_t *qualifiers;
935
936 num_opnds = aarch64_num_of_operands (inst->opcode);
937 if (num_opnds == 0)
938 {
939 DEBUG_TRACE ("SUCCEED: no operand");
940 return 1;
941 }
942
943 if (stop_at < 0 || stop_at >= num_opnds)
944 stop_at = num_opnds - 1;
945
946 /* For each pattern. */
947 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
948 {
949 int j;
950 qualifiers = *qualifiers_list;
951
952 /* Start as positive. */
953 found = 1;
954
955 DEBUG_TRACE ("%d", i);
956 #ifdef DEBUG_AARCH64
957 if (debug_dump)
958 dump_match_qualifiers (inst->operands, qualifiers);
959 #endif
960
961 /* Most opcodes has much fewer patterns in the list.
962 First NIL qualifier indicates the end in the list. */
963 if (empty_qualifier_sequence_p (qualifiers))
964 {
965 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
966 if (i)
967 found = 0;
968 break;
969 }
970
971 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
972 {
973 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
974 {
975 /* Either the operand does not have qualifier, or the qualifier
976 for the operand needs to be deduced from the qualifier
977 sequence.
978 In the latter case, any constraint checking related with
979 the obtained qualifier should be done later in
980 operand_general_constraint_met_p. */
981 continue;
982 }
983 else if (*qualifiers != inst->operands[j].qualifier)
984 {
985 /* Unless the target qualifier can also qualify the operand
986 (which has already had a non-nil qualifier), non-equal
987 qualifiers are generally un-matched. */
988 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
989 continue;
990 else
991 {
992 found = 0;
993 break;
994 }
995 }
996 else
997 continue; /* Equal qualifiers are certainly matched. */
998 }
999
1000 /* Qualifiers established. */
1001 if (found == 1)
1002 break;
1003 }
1004
1005 if (found == 1)
1006 {
1007 /* Fill the result in *RET. */
1008 int j;
1009 qualifiers = *qualifiers_list;
1010
1011 DEBUG_TRACE ("complete qualifiers using list %d", i);
1012 #ifdef DEBUG_AARCH64
1013 if (debug_dump)
1014 dump_qualifier_sequence (qualifiers);
1015 #endif
1016
1017 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1018 ret[j] = *qualifiers;
1019 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1020 ret[j] = AARCH64_OPND_QLF_NIL;
1021
1022 DEBUG_TRACE ("SUCCESS");
1023 return 1;
1024 }
1025
1026 DEBUG_TRACE ("FAIL");
1027 return 0;
1028 }
1029
1030 /* Operand qualifier matching and resolving.
1031
1032 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1033 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1034
1035 if UPDATE_P, update the qualifier(s) in *INST after the matching
1036 succeeds. */
1037
1038 static int
match_operands_qualifier(aarch64_inst * inst,bool update_p)1039 match_operands_qualifier (aarch64_inst *inst, bool update_p)
1040 {
1041 int i, nops;
1042 aarch64_opnd_qualifier_seq_t qualifiers;
1043
1044 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1045 qualifiers))
1046 {
1047 DEBUG_TRACE ("matching FAIL");
1048 return 0;
1049 }
1050
1051 if (inst->opcode->flags & F_STRICT)
1052 {
1053 /* Require an exact qualifier match, even for NIL qualifiers. */
1054 nops = aarch64_num_of_operands (inst->opcode);
1055 for (i = 0; i < nops; ++i)
1056 if (inst->operands[i].qualifier != qualifiers[i])
1057 return false;
1058 }
1059
1060 /* Update the qualifiers. */
1061 if (update_p)
1062 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1063 {
1064 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1065 break;
1066 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1067 "update %s with %s for operand %d",
1068 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1069 aarch64_get_qualifier_name (qualifiers[i]), i);
1070 inst->operands[i].qualifier = qualifiers[i];
1071 }
1072
1073 DEBUG_TRACE ("matching SUCCESS");
1074 return 1;
1075 }
1076
1077 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1078 register by MOVZ.
1079
1080 IS32 indicates whether value is a 32-bit immediate or not.
1081 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1082 amount will be returned in *SHIFT_AMOUNT. */
1083
1084 bool
aarch64_wide_constant_p(uint64_t value,int is32,unsigned int * shift_amount)1085 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1086 {
1087 int amount;
1088
1089 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1090
1091 if (is32)
1092 {
1093 /* Allow all zeros or all ones in top 32-bits, so that
1094 32-bit constant expressions like ~0x80000000 are
1095 permitted. */
1096 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1097 /* Immediate out of range. */
1098 return false;
1099 value &= 0xffffffff;
1100 }
1101
1102 /* first, try movz then movn */
1103 amount = -1;
1104 if ((value & ((uint64_t) 0xffff << 0)) == value)
1105 amount = 0;
1106 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1107 amount = 16;
1108 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1109 amount = 32;
1110 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1111 amount = 48;
1112
1113 if (amount == -1)
1114 {
1115 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1116 return false;
1117 }
1118
1119 if (shift_amount != NULL)
1120 *shift_amount = amount;
1121
1122 DEBUG_TRACE ("exit true with amount %d", amount);
1123
1124 return true;
1125 }
1126
1127 /* Build the accepted values for immediate logical SIMD instructions.
1128
1129 The standard encodings of the immediate value are:
1130 N imms immr SIMD size R S
1131 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1132 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1133 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1134 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1135 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1136 0 11110s 00000r 2 UInt(r) UInt(s)
1137 where all-ones value of S is reserved.
1138
1139 Let's call E the SIMD size.
1140
1141 The immediate value is: S+1 bits '1' rotated to the right by R.
1142
1143 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1144 (remember S != E - 1). */
1145
1146 #define TOTAL_IMM_NB 5334
1147
1148 typedef struct
1149 {
1150 uint64_t imm;
1151 aarch64_insn encoding;
1152 } simd_imm_encoding;
1153
1154 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1155
1156 static int
simd_imm_encoding_cmp(const void * i1,const void * i2)1157 simd_imm_encoding_cmp(const void *i1, const void *i2)
1158 {
1159 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1160 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1161
1162 if (imm1->imm < imm2->imm)
1163 return -1;
1164 if (imm1->imm > imm2->imm)
1165 return +1;
1166 return 0;
1167 }
1168
1169 /* immediate bitfield standard encoding
1170 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1171 1 ssssss rrrrrr 64 rrrrrr ssssss
1172 0 0sssss 0rrrrr 32 rrrrr sssss
1173 0 10ssss 00rrrr 16 rrrr ssss
1174 0 110sss 000rrr 8 rrr sss
1175 0 1110ss 0000rr 4 rr ss
1176 0 11110s 00000r 2 r s */
1177 static inline int
encode_immediate_bitfield(int is64,uint32_t s,uint32_t r)1178 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1179 {
1180 return (is64 << 12) | (r << 6) | s;
1181 }
1182
1183 static void
build_immediate_table(void)1184 build_immediate_table (void)
1185 {
1186 uint32_t log_e, e, s, r, s_mask;
1187 uint64_t mask, imm;
1188 int nb_imms;
1189 int is64;
1190
1191 nb_imms = 0;
1192 for (log_e = 1; log_e <= 6; log_e++)
1193 {
1194 /* Get element size. */
1195 e = 1u << log_e;
1196 if (log_e == 6)
1197 {
1198 is64 = 1;
1199 mask = 0xffffffffffffffffull;
1200 s_mask = 0;
1201 }
1202 else
1203 {
1204 is64 = 0;
1205 mask = (1ull << e) - 1;
1206 /* log_e s_mask
1207 1 ((1 << 4) - 1) << 2 = 111100
1208 2 ((1 << 3) - 1) << 3 = 111000
1209 3 ((1 << 2) - 1) << 4 = 110000
1210 4 ((1 << 1) - 1) << 5 = 100000
1211 5 ((1 << 0) - 1) << 6 = 000000 */
1212 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1213 }
1214 for (s = 0; s < e - 1; s++)
1215 for (r = 0; r < e; r++)
1216 {
1217 /* s+1 consecutive bits to 1 (s < 63) */
1218 imm = (1ull << (s + 1)) - 1;
1219 /* rotate right by r */
1220 if (r != 0)
1221 imm = (imm >> r) | ((imm << (e - r)) & mask);
1222 /* replicate the constant depending on SIMD size */
1223 switch (log_e)
1224 {
1225 case 1: imm = (imm << 2) | imm;
1226 /* Fall through. */
1227 case 2: imm = (imm << 4) | imm;
1228 /* Fall through. */
1229 case 3: imm = (imm << 8) | imm;
1230 /* Fall through. */
1231 case 4: imm = (imm << 16) | imm;
1232 /* Fall through. */
1233 case 5: imm = (imm << 32) | imm;
1234 /* Fall through. */
1235 case 6: break;
1236 default: abort ();
1237 }
1238 simd_immediates[nb_imms].imm = imm;
1239 simd_immediates[nb_imms].encoding =
1240 encode_immediate_bitfield(is64, s | s_mask, r);
1241 nb_imms++;
1242 }
1243 }
1244 assert (nb_imms == TOTAL_IMM_NB);
1245 qsort(simd_immediates, nb_imms,
1246 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1247 }
1248
1249 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1250 be accepted by logical (immediate) instructions
1251 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1252
1253 ESIZE is the number of bytes in the decoded immediate value.
1254 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1255 VALUE will be returned in *ENCODING. */
1256
1257 bool
aarch64_logical_immediate_p(uint64_t value,int esize,aarch64_insn * encoding)1258 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1259 {
1260 simd_imm_encoding imm_enc;
1261 const simd_imm_encoding *imm_encoding;
1262 static bool initialized = false;
1263 uint64_t upper;
1264 int i;
1265
1266 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1267 value, esize);
1268
1269 if (!initialized)
1270 {
1271 build_immediate_table ();
1272 initialized = true;
1273 }
1274
1275 /* Allow all zeros or all ones in top bits, so that
1276 constant expressions like ~1 are permitted. */
1277 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1278 if ((value & ~upper) != value && (value | upper) != value)
1279 return false;
1280
1281 /* Replicate to a full 64-bit value. */
1282 value &= ~upper;
1283 for (i = esize * 8; i < 64; i *= 2)
1284 value |= (value << i);
1285
1286 imm_enc.imm = value;
1287 imm_encoding = (const simd_imm_encoding *)
1288 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1289 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1290 if (imm_encoding == NULL)
1291 {
1292 DEBUG_TRACE ("exit with false");
1293 return false;
1294 }
1295 if (encoding != NULL)
1296 *encoding = imm_encoding->encoding;
1297 DEBUG_TRACE ("exit with true");
1298 return true;
1299 }
1300
1301 /* If 64-bit immediate IMM is in the format of
1302 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1303 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1304 of value "abcdefgh". Otherwise return -1. */
1305 int
aarch64_shrink_expanded_imm8(uint64_t imm)1306 aarch64_shrink_expanded_imm8 (uint64_t imm)
1307 {
1308 int i, ret;
1309 uint32_t byte;
1310
1311 ret = 0;
1312 for (i = 0; i < 8; i++)
1313 {
1314 byte = (imm >> (8 * i)) & 0xff;
1315 if (byte == 0xff)
1316 ret |= 1 << i;
1317 else if (byte != 0x00)
1318 return -1;
1319 }
1320 return ret;
1321 }
1322
1323 /* Utility inline functions for operand_general_constraint_met_p. */
1324
1325 static inline void
set_error(aarch64_operand_error * mismatch_detail,enum aarch64_operand_error_kind kind,int idx,const char * error)1326 set_error (aarch64_operand_error *mismatch_detail,
1327 enum aarch64_operand_error_kind kind, int idx,
1328 const char* error)
1329 {
1330 if (mismatch_detail == NULL)
1331 return;
1332 mismatch_detail->kind = kind;
1333 mismatch_detail->index = idx;
1334 mismatch_detail->error = error;
1335 }
1336
1337 static inline void
set_syntax_error(aarch64_operand_error * mismatch_detail,int idx,const char * error)1338 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1339 const char* error)
1340 {
1341 if (mismatch_detail == NULL)
1342 return;
1343 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1344 }
1345
1346 static inline void
set_out_of_range_error(aarch64_operand_error * mismatch_detail,int idx,int lower_bound,int upper_bound,const char * error)1347 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1348 int idx, int lower_bound, int upper_bound,
1349 const char* error)
1350 {
1351 if (mismatch_detail == NULL)
1352 return;
1353 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1354 mismatch_detail->data[0].i = lower_bound;
1355 mismatch_detail->data[1].i = upper_bound;
1356 }
1357
1358 static inline void
set_imm_out_of_range_error(aarch64_operand_error * mismatch_detail,int idx,int lower_bound,int upper_bound)1359 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1360 int idx, int lower_bound, int upper_bound)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1365 _("immediate value"));
1366 }
1367
1368 static inline void
set_offset_out_of_range_error(aarch64_operand_error * mismatch_detail,int idx,int lower_bound,int upper_bound)1369 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1370 int idx, int lower_bound, int upper_bound)
1371 {
1372 if (mismatch_detail == NULL)
1373 return;
1374 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1375 _("immediate offset"));
1376 }
1377
1378 static inline void
set_regno_out_of_range_error(aarch64_operand_error * mismatch_detail,int idx,int lower_bound,int upper_bound)1379 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1380 int idx, int lower_bound, int upper_bound)
1381 {
1382 if (mismatch_detail == NULL)
1383 return;
1384 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1385 _("register number"));
1386 }
1387
1388 static inline void
set_elem_idx_out_of_range_error(aarch64_operand_error * mismatch_detail,int idx,int lower_bound,int upper_bound)1389 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1390 int idx, int lower_bound, int upper_bound)
1391 {
1392 if (mismatch_detail == NULL)
1393 return;
1394 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1395 _("register element index"));
1396 }
1397
1398 static inline void
set_sft_amount_out_of_range_error(aarch64_operand_error * mismatch_detail,int idx,int lower_bound,int upper_bound)1399 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1400 int idx, int lower_bound, int upper_bound)
1401 {
1402 if (mismatch_detail == NULL)
1403 return;
1404 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1405 _("shift amount"));
1406 }
1407
1408 /* Report that the MUL modifier in operand IDX should be in the range
1409 [LOWER_BOUND, UPPER_BOUND]. */
1410 static inline void
set_multiplier_out_of_range_error(aarch64_operand_error * mismatch_detail,int idx,int lower_bound,int upper_bound)1411 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1412 int idx, int lower_bound, int upper_bound)
1413 {
1414 if (mismatch_detail == NULL)
1415 return;
1416 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1417 _("multiplier"));
1418 }
1419
1420 static inline void
set_unaligned_error(aarch64_operand_error * mismatch_detail,int idx,int alignment)1421 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1422 int alignment)
1423 {
1424 if (mismatch_detail == NULL)
1425 return;
1426 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1427 mismatch_detail->data[0].i = alignment;
1428 }
1429
1430 static inline void
set_reg_list_error(aarch64_operand_error * mismatch_detail,int idx,int expected_num)1431 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1432 int expected_num)
1433 {
1434 if (mismatch_detail == NULL)
1435 return;
1436 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1437 mismatch_detail->data[0].i = expected_num;
1438 }
1439
1440 static inline void
set_other_error(aarch64_operand_error * mismatch_detail,int idx,const char * error)1441 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1442 const char* error)
1443 {
1444 if (mismatch_detail == NULL)
1445 return;
1446 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1447 }
1448
1449 /* General constraint checking based on operand code.
1450
1451 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1452 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1453
1454 This function has to be called after the qualifiers for all operands
1455 have been resolved.
1456
1457 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1458 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1459 of error message during the disassembling where error message is not
1460 wanted. We avoid the dynamic construction of strings of error messages
1461 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1462 use a combination of error code, static string and some integer data to
1463 represent an error. */
1464
1465 static int
operand_general_constraint_met_p(const aarch64_opnd_info * opnds,int idx,enum aarch64_opnd type,const aarch64_opcode * opcode,aarch64_operand_error * mismatch_detail)1466 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1467 enum aarch64_opnd type,
1468 const aarch64_opcode *opcode,
1469 aarch64_operand_error *mismatch_detail)
1470 {
1471 unsigned num, modifiers, shift;
1472 unsigned char size;
1473 int64_t imm, min_value, max_value;
1474 uint64_t uvalue, mask;
1475 const aarch64_opnd_info *opnd = opnds + idx;
1476 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1477 int i;
1478
1479 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1480
1481 switch (aarch64_operands[type].op_class)
1482 {
1483 case AARCH64_OPND_CLASS_INT_REG:
1484 /* Check pair reg constraints for cas* instructions. */
1485 if (type == AARCH64_OPND_PAIRREG)
1486 {
1487 assert (idx == 1 || idx == 3);
1488 if (opnds[idx - 1].reg.regno % 2 != 0)
1489 {
1490 set_syntax_error (mismatch_detail, idx - 1,
1491 _("reg pair must start from even reg"));
1492 return 0;
1493 }
1494 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1495 {
1496 set_syntax_error (mismatch_detail, idx,
1497 _("reg pair must be contiguous"));
1498 return 0;
1499 }
1500 break;
1501 }
1502
1503 /* <Xt> may be optional in some IC and TLBI instructions. */
1504 if (type == AARCH64_OPND_Rt_SYS)
1505 {
1506 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1507 == AARCH64_OPND_CLASS_SYSTEM));
1508 if (opnds[1].present
1509 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1510 {
1511 set_other_error (mismatch_detail, idx, _("extraneous register"));
1512 return 0;
1513 }
1514 if (!opnds[1].present
1515 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1516 {
1517 set_other_error (mismatch_detail, idx, _("missing register"));
1518 return 0;
1519 }
1520 }
1521 switch (qualifier)
1522 {
1523 case AARCH64_OPND_QLF_WSP:
1524 case AARCH64_OPND_QLF_SP:
1525 if (!aarch64_stack_pointer_p (opnd))
1526 {
1527 set_other_error (mismatch_detail, idx,
1528 _("stack pointer register expected"));
1529 return 0;
1530 }
1531 break;
1532 default:
1533 break;
1534 }
1535 break;
1536
1537 case AARCH64_OPND_CLASS_SVE_REG:
1538 switch (type)
1539 {
1540 case AARCH64_OPND_SVE_Zm3_INDEX:
1541 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1542 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1543 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1544 case AARCH64_OPND_SVE_Zm4_INDEX:
1545 size = get_operand_fields_width (get_operand_from_code (type));
1546 shift = get_operand_specific_data (&aarch64_operands[type]);
1547 mask = (1 << shift) - 1;
1548 if (opnd->reg.regno > mask)
1549 {
1550 assert (mask == 7 || mask == 15);
1551 set_other_error (mismatch_detail, idx,
1552 mask == 15
1553 ? _("z0-z15 expected")
1554 : _("z0-z7 expected"));
1555 return 0;
1556 }
1557 mask = (1u << (size - shift)) - 1;
1558 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1559 {
1560 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1561 return 0;
1562 }
1563 break;
1564
1565 case AARCH64_OPND_SVE_Zn_INDEX:
1566 size = aarch64_get_qualifier_esize (opnd->qualifier);
1567 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1568 {
1569 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1570 0, 64 / size - 1);
1571 return 0;
1572 }
1573 break;
1574
1575 case AARCH64_OPND_SVE_ZnxN:
1576 case AARCH64_OPND_SVE_ZtxN:
1577 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1578 {
1579 set_other_error (mismatch_detail, idx,
1580 _("invalid register list"));
1581 return 0;
1582 }
1583 break;
1584
1585 default:
1586 break;
1587 }
1588 break;
1589
1590 case AARCH64_OPND_CLASS_PRED_REG:
1591 if (opnd->reg.regno >= 8
1592 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1593 {
1594 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1595 return 0;
1596 }
1597 break;
1598
1599 case AARCH64_OPND_CLASS_COND:
1600 if (type == AARCH64_OPND_COND1
1601 && (opnds[idx].cond->value & 0xe) == 0xe)
1602 {
1603 /* Not allow AL or NV. */
1604 set_syntax_error (mismatch_detail, idx, NULL);
1605 }
1606 break;
1607
1608 case AARCH64_OPND_CLASS_ADDRESS:
1609 /* Check writeback. */
1610 switch (opcode->iclass)
1611 {
1612 case ldst_pos:
1613 case ldst_unscaled:
1614 case ldstnapair_offs:
1615 case ldstpair_off:
1616 case ldst_unpriv:
1617 if (opnd->addr.writeback == 1)
1618 {
1619 set_syntax_error (mismatch_detail, idx,
1620 _("unexpected address writeback"));
1621 return 0;
1622 }
1623 break;
1624 case ldst_imm10:
1625 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1626 {
1627 set_syntax_error (mismatch_detail, idx,
1628 _("unexpected address writeback"));
1629 return 0;
1630 }
1631 break;
1632 case ldst_imm9:
1633 case ldstpair_indexed:
1634 case asisdlsep:
1635 case asisdlsop:
1636 if (opnd->addr.writeback == 0)
1637 {
1638 set_syntax_error (mismatch_detail, idx,
1639 _("address writeback expected"));
1640 return 0;
1641 }
1642 break;
1643 default:
1644 assert (opnd->addr.writeback == 0);
1645 break;
1646 }
1647 switch (type)
1648 {
1649 case AARCH64_OPND_ADDR_SIMM7:
1650 /* Scaled signed 7 bits immediate offset. */
1651 /* Get the size of the data element that is accessed, which may be
1652 different from that of the source register size,
1653 e.g. in strb/ldrb. */
1654 size = aarch64_get_qualifier_esize (opnd->qualifier);
1655 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1656 {
1657 set_offset_out_of_range_error (mismatch_detail, idx,
1658 -64 * size, 63 * size);
1659 return 0;
1660 }
1661 if (!value_aligned_p (opnd->addr.offset.imm, size))
1662 {
1663 set_unaligned_error (mismatch_detail, idx, size);
1664 return 0;
1665 }
1666 break;
1667 case AARCH64_OPND_ADDR_OFFSET:
1668 case AARCH64_OPND_ADDR_SIMM9:
1669 /* Unscaled signed 9 bits immediate offset. */
1670 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1671 {
1672 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1673 return 0;
1674 }
1675 break;
1676
1677 case AARCH64_OPND_ADDR_SIMM9_2:
1678 /* Unscaled signed 9 bits immediate offset, which has to be negative
1679 or unaligned. */
1680 size = aarch64_get_qualifier_esize (qualifier);
1681 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1682 && !value_aligned_p (opnd->addr.offset.imm, size))
1683 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1684 return 1;
1685 set_other_error (mismatch_detail, idx,
1686 _("negative or unaligned offset expected"));
1687 return 0;
1688
1689 case AARCH64_OPND_ADDR_SIMM10:
1690 /* Scaled signed 10 bits immediate offset. */
1691 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1692 {
1693 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1694 return 0;
1695 }
1696 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1697 {
1698 set_unaligned_error (mismatch_detail, idx, 8);
1699 return 0;
1700 }
1701 break;
1702
1703 case AARCH64_OPND_ADDR_SIMM11:
1704 /* Signed 11 bits immediate offset (multiple of 16). */
1705 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1706 {
1707 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1708 return 0;
1709 }
1710
1711 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1712 {
1713 set_unaligned_error (mismatch_detail, idx, 16);
1714 return 0;
1715 }
1716 break;
1717
1718 case AARCH64_OPND_ADDR_SIMM13:
1719 /* Signed 13 bits immediate offset (multiple of 16). */
1720 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1721 {
1722 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1723 return 0;
1724 }
1725
1726 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1727 {
1728 set_unaligned_error (mismatch_detail, idx, 16);
1729 return 0;
1730 }
1731 break;
1732
1733 case AARCH64_OPND_SIMD_ADDR_POST:
1734 /* AdvSIMD load/store multiple structures, post-index. */
1735 assert (idx == 1);
1736 if (opnd->addr.offset.is_reg)
1737 {
1738 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1739 return 1;
1740 else
1741 {
1742 set_other_error (mismatch_detail, idx,
1743 _("invalid register offset"));
1744 return 0;
1745 }
1746 }
1747 else
1748 {
1749 const aarch64_opnd_info *prev = &opnds[idx-1];
1750 unsigned num_bytes; /* total number of bytes transferred. */
1751 /* The opcode dependent area stores the number of elements in
1752 each structure to be loaded/stored. */
1753 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1754 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1755 /* Special handling of loading single structure to all lane. */
1756 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1757 * aarch64_get_qualifier_esize (prev->qualifier);
1758 else
1759 num_bytes = prev->reglist.num_regs
1760 * aarch64_get_qualifier_esize (prev->qualifier)
1761 * aarch64_get_qualifier_nelem (prev->qualifier);
1762 if ((int) num_bytes != opnd->addr.offset.imm)
1763 {
1764 set_other_error (mismatch_detail, idx,
1765 _("invalid post-increment amount"));
1766 return 0;
1767 }
1768 }
1769 break;
1770
1771 case AARCH64_OPND_ADDR_REGOFF:
1772 /* Get the size of the data element that is accessed, which may be
1773 different from that of the source register size,
1774 e.g. in strb/ldrb. */
1775 size = aarch64_get_qualifier_esize (opnd->qualifier);
1776 /* It is either no shift or shift by the binary logarithm of SIZE. */
1777 if (opnd->shifter.amount != 0
1778 && opnd->shifter.amount != (int)get_logsz (size))
1779 {
1780 set_other_error (mismatch_detail, idx,
1781 _("invalid shift amount"));
1782 return 0;
1783 }
1784 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1785 operators. */
1786 switch (opnd->shifter.kind)
1787 {
1788 case AARCH64_MOD_UXTW:
1789 case AARCH64_MOD_LSL:
1790 case AARCH64_MOD_SXTW:
1791 case AARCH64_MOD_SXTX: break;
1792 default:
1793 set_other_error (mismatch_detail, idx,
1794 _("invalid extend/shift operator"));
1795 return 0;
1796 }
1797 break;
1798
1799 case AARCH64_OPND_ADDR_UIMM12:
1800 imm = opnd->addr.offset.imm;
1801 /* Get the size of the data element that is accessed, which may be
1802 different from that of the source register size,
1803 e.g. in strb/ldrb. */
1804 size = aarch64_get_qualifier_esize (qualifier);
1805 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1806 {
1807 set_offset_out_of_range_error (mismatch_detail, idx,
1808 0, 4095 * size);
1809 return 0;
1810 }
1811 if (!value_aligned_p (opnd->addr.offset.imm, size))
1812 {
1813 set_unaligned_error (mismatch_detail, idx, size);
1814 return 0;
1815 }
1816 break;
1817
1818 case AARCH64_OPND_ADDR_PCREL14:
1819 case AARCH64_OPND_ADDR_PCREL19:
1820 case AARCH64_OPND_ADDR_PCREL21:
1821 case AARCH64_OPND_ADDR_PCREL26:
1822 imm = opnd->imm.value;
1823 if (operand_need_shift_by_two (get_operand_from_code (type)))
1824 {
1825 /* The offset value in a PC-relative branch instruction is alway
1826 4-byte aligned and is encoded without the lowest 2 bits. */
1827 if (!value_aligned_p (imm, 4))
1828 {
1829 set_unaligned_error (mismatch_detail, idx, 4);
1830 return 0;
1831 }
1832 /* Right shift by 2 so that we can carry out the following check
1833 canonically. */
1834 imm >>= 2;
1835 }
1836 size = get_operand_fields_width (get_operand_from_code (type));
1837 if (!value_fit_signed_field_p (imm, size))
1838 {
1839 set_other_error (mismatch_detail, idx,
1840 _("immediate out of range"));
1841 return 0;
1842 }
1843 break;
1844
1845 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
1846 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
1847 {
1848 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
1849 return 0;
1850 }
1851 break;
1852
1853 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1854 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1855 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1856 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1857 min_value = -8;
1858 max_value = 7;
1859 sve_imm_offset_vl:
1860 assert (!opnd->addr.offset.is_reg);
1861 assert (opnd->addr.preind);
1862 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1863 min_value *= num;
1864 max_value *= num;
1865 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1866 || (opnd->shifter.operator_present
1867 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1868 {
1869 set_other_error (mismatch_detail, idx,
1870 _("invalid addressing mode"));
1871 return 0;
1872 }
1873 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1874 {
1875 set_offset_out_of_range_error (mismatch_detail, idx,
1876 min_value, max_value);
1877 return 0;
1878 }
1879 if (!value_aligned_p (opnd->addr.offset.imm, num))
1880 {
1881 set_unaligned_error (mismatch_detail, idx, num);
1882 return 0;
1883 }
1884 break;
1885
1886 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1887 min_value = -32;
1888 max_value = 31;
1889 goto sve_imm_offset_vl;
1890
1891 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1892 min_value = -256;
1893 max_value = 255;
1894 goto sve_imm_offset_vl;
1895
1896 case AARCH64_OPND_SVE_ADDR_RI_U6:
1897 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1898 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1899 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1900 min_value = 0;
1901 max_value = 63;
1902 sve_imm_offset:
1903 assert (!opnd->addr.offset.is_reg);
1904 assert (opnd->addr.preind);
1905 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1906 min_value *= num;
1907 max_value *= num;
1908 if (opnd->shifter.operator_present
1909 || opnd->shifter.amount_present)
1910 {
1911 set_other_error (mismatch_detail, idx,
1912 _("invalid addressing mode"));
1913 return 0;
1914 }
1915 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1916 {
1917 set_offset_out_of_range_error (mismatch_detail, idx,
1918 min_value, max_value);
1919 return 0;
1920 }
1921 if (!value_aligned_p (opnd->addr.offset.imm, num))
1922 {
1923 set_unaligned_error (mismatch_detail, idx, num);
1924 return 0;
1925 }
1926 break;
1927
1928 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1929 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1930 min_value = -8;
1931 max_value = 7;
1932 goto sve_imm_offset;
1933
1934 case AARCH64_OPND_SVE_ADDR_ZX:
1935 /* Everything is already ensured by parse_operands or
1936 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1937 argument type). */
1938 assert (opnd->addr.offset.is_reg);
1939 assert (opnd->addr.preind);
1940 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1941 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1942 assert (opnd->shifter.operator_present == 0);
1943 break;
1944
1945 case AARCH64_OPND_SVE_ADDR_R:
1946 case AARCH64_OPND_SVE_ADDR_RR:
1947 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1948 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1949 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1950 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
1951 case AARCH64_OPND_SVE_ADDR_RX:
1952 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1953 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1954 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1955 case AARCH64_OPND_SVE_ADDR_RZ:
1956 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1957 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1958 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1959 modifiers = 1 << AARCH64_MOD_LSL;
1960 sve_rr_operand:
1961 assert (opnd->addr.offset.is_reg);
1962 assert (opnd->addr.preind);
1963 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1964 && opnd->addr.offset.regno == 31)
1965 {
1966 set_other_error (mismatch_detail, idx,
1967 _("index register xzr is not allowed"));
1968 return 0;
1969 }
1970 if (((1 << opnd->shifter.kind) & modifiers) == 0
1971 || (opnd->shifter.amount
1972 != get_operand_specific_data (&aarch64_operands[type])))
1973 {
1974 set_other_error (mismatch_detail, idx,
1975 _("invalid addressing mode"));
1976 return 0;
1977 }
1978 break;
1979
1980 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1981 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1982 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1983 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1984 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1985 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1986 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1987 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1988 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1989 goto sve_rr_operand;
1990
1991 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1992 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1993 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1994 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1995 min_value = 0;
1996 max_value = 31;
1997 goto sve_imm_offset;
1998
1999 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2000 modifiers = 1 << AARCH64_MOD_LSL;
2001 sve_zz_operand:
2002 assert (opnd->addr.offset.is_reg);
2003 assert (opnd->addr.preind);
2004 if (((1 << opnd->shifter.kind) & modifiers) == 0
2005 || opnd->shifter.amount < 0
2006 || opnd->shifter.amount > 3)
2007 {
2008 set_other_error (mismatch_detail, idx,
2009 _("invalid addressing mode"));
2010 return 0;
2011 }
2012 break;
2013
2014 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2015 modifiers = (1 << AARCH64_MOD_SXTW);
2016 goto sve_zz_operand;
2017
2018 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2019 modifiers = 1 << AARCH64_MOD_UXTW;
2020 goto sve_zz_operand;
2021
2022 default:
2023 break;
2024 }
2025 break;
2026
2027 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2028 if (type == AARCH64_OPND_LEt)
2029 {
2030 /* Get the upper bound for the element index. */
2031 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2032 if (!value_in_range_p (opnd->reglist.index, 0, num))
2033 {
2034 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2035 return 0;
2036 }
2037 }
2038 /* The opcode dependent area stores the number of elements in
2039 each structure to be loaded/stored. */
2040 num = get_opcode_dependent_value (opcode);
2041 switch (type)
2042 {
2043 case AARCH64_OPND_LVt:
2044 assert (num >= 1 && num <= 4);
2045 /* Unless LD1/ST1, the number of registers should be equal to that
2046 of the structure elements. */
2047 if (num != 1 && opnd->reglist.num_regs != num)
2048 {
2049 set_reg_list_error (mismatch_detail, idx, num);
2050 return 0;
2051 }
2052 break;
2053 case AARCH64_OPND_LVt_AL:
2054 case AARCH64_OPND_LEt:
2055 assert (num >= 1 && num <= 4);
2056 /* The number of registers should be equal to that of the structure
2057 elements. */
2058 if (opnd->reglist.num_regs != num)
2059 {
2060 set_reg_list_error (mismatch_detail, idx, num);
2061 return 0;
2062 }
2063 break;
2064 default:
2065 break;
2066 }
2067 break;
2068
2069 case AARCH64_OPND_CLASS_IMMEDIATE:
2070 /* Constraint check on immediate operand. */
2071 imm = opnd->imm.value;
2072 /* E.g. imm_0_31 constrains value to be 0..31. */
2073 if (qualifier_value_in_range_constraint_p (qualifier)
2074 && !value_in_range_p (imm, get_lower_bound (qualifier),
2075 get_upper_bound (qualifier)))
2076 {
2077 set_imm_out_of_range_error (mismatch_detail, idx,
2078 get_lower_bound (qualifier),
2079 get_upper_bound (qualifier));
2080 return 0;
2081 }
2082
2083 switch (type)
2084 {
2085 case AARCH64_OPND_AIMM:
2086 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2087 {
2088 set_other_error (mismatch_detail, idx,
2089 _("invalid shift operator"));
2090 return 0;
2091 }
2092 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2093 {
2094 set_other_error (mismatch_detail, idx,
2095 _("shift amount must be 0 or 12"));
2096 return 0;
2097 }
2098 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2099 {
2100 set_other_error (mismatch_detail, idx,
2101 _("immediate out of range"));
2102 return 0;
2103 }
2104 break;
2105
2106 case AARCH64_OPND_HALF:
2107 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2108 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2109 {
2110 set_other_error (mismatch_detail, idx,
2111 _("invalid shift operator"));
2112 return 0;
2113 }
2114 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2115 if (!value_aligned_p (opnd->shifter.amount, 16))
2116 {
2117 set_other_error (mismatch_detail, idx,
2118 _("shift amount must be a multiple of 16"));
2119 return 0;
2120 }
2121 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2122 {
2123 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2124 0, size * 8 - 16);
2125 return 0;
2126 }
2127 if (opnd->imm.value < 0)
2128 {
2129 set_other_error (mismatch_detail, idx,
2130 _("negative immediate value not allowed"));
2131 return 0;
2132 }
2133 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2134 {
2135 set_other_error (mismatch_detail, idx,
2136 _("immediate out of range"));
2137 return 0;
2138 }
2139 break;
2140
2141 case AARCH64_OPND_IMM_MOV:
2142 {
2143 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2144 imm = opnd->imm.value;
2145 assert (idx == 1);
2146 switch (opcode->op)
2147 {
2148 case OP_MOV_IMM_WIDEN:
2149 imm = ~imm;
2150 /* Fall through. */
2151 case OP_MOV_IMM_WIDE:
2152 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2153 {
2154 set_other_error (mismatch_detail, idx,
2155 _("immediate out of range"));
2156 return 0;
2157 }
2158 break;
2159 case OP_MOV_IMM_LOG:
2160 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2161 {
2162 set_other_error (mismatch_detail, idx,
2163 _("immediate out of range"));
2164 return 0;
2165 }
2166 break;
2167 default:
2168 assert (0);
2169 return 0;
2170 }
2171 }
2172 break;
2173
2174 case AARCH64_OPND_NZCV:
2175 case AARCH64_OPND_CCMP_IMM:
2176 case AARCH64_OPND_EXCEPTION:
2177 case AARCH64_OPND_UNDEFINED:
2178 case AARCH64_OPND_TME_UIMM16:
2179 case AARCH64_OPND_UIMM4:
2180 case AARCH64_OPND_UIMM4_ADDG:
2181 case AARCH64_OPND_UIMM7:
2182 case AARCH64_OPND_UIMM3_OP1:
2183 case AARCH64_OPND_UIMM3_OP2:
2184 case AARCH64_OPND_SVE_UIMM3:
2185 case AARCH64_OPND_SVE_UIMM7:
2186 case AARCH64_OPND_SVE_UIMM8:
2187 case AARCH64_OPND_SVE_UIMM8_53:
2188 size = get_operand_fields_width (get_operand_from_code (type));
2189 assert (size < 32);
2190 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2191 {
2192 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2193 (1u << size) - 1);
2194 return 0;
2195 }
2196 break;
2197
2198 case AARCH64_OPND_UIMM10:
2199 /* Scaled unsigned 10 bits immediate offset. */
2200 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2201 {
2202 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2203 return 0;
2204 }
2205
2206 if (!value_aligned_p (opnd->imm.value, 16))
2207 {
2208 set_unaligned_error (mismatch_detail, idx, 16);
2209 return 0;
2210 }
2211 break;
2212
2213 case AARCH64_OPND_SIMM5:
2214 case AARCH64_OPND_SVE_SIMM5:
2215 case AARCH64_OPND_SVE_SIMM5B:
2216 case AARCH64_OPND_SVE_SIMM6:
2217 case AARCH64_OPND_SVE_SIMM8:
2218 size = get_operand_fields_width (get_operand_from_code (type));
2219 assert (size < 32);
2220 if (!value_fit_signed_field_p (opnd->imm.value, size))
2221 {
2222 set_imm_out_of_range_error (mismatch_detail, idx,
2223 -(1 << (size - 1)),
2224 (1 << (size - 1)) - 1);
2225 return 0;
2226 }
2227 break;
2228
2229 case AARCH64_OPND_WIDTH:
2230 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2231 && opnds[0].type == AARCH64_OPND_Rd);
2232 size = get_upper_bound (qualifier);
2233 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2234 /* lsb+width <= reg.size */
2235 {
2236 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2237 size - opnds[idx-1].imm.value);
2238 return 0;
2239 }
2240 break;
2241
2242 case AARCH64_OPND_LIMM:
2243 case AARCH64_OPND_SVE_LIMM:
2244 {
2245 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2246 uint64_t uimm = opnd->imm.value;
2247 if (opcode->op == OP_BIC)
2248 uimm = ~uimm;
2249 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2250 {
2251 set_other_error (mismatch_detail, idx,
2252 _("immediate out of range"));
2253 return 0;
2254 }
2255 }
2256 break;
2257
2258 case AARCH64_OPND_IMM0:
2259 case AARCH64_OPND_FPIMM0:
2260 if (opnd->imm.value != 0)
2261 {
2262 set_other_error (mismatch_detail, idx,
2263 _("immediate zero expected"));
2264 return 0;
2265 }
2266 break;
2267
2268 case AARCH64_OPND_IMM_ROT1:
2269 case AARCH64_OPND_IMM_ROT2:
2270 case AARCH64_OPND_SVE_IMM_ROT2:
2271 if (opnd->imm.value != 0
2272 && opnd->imm.value != 90
2273 && opnd->imm.value != 180
2274 && opnd->imm.value != 270)
2275 {
2276 set_other_error (mismatch_detail, idx,
2277 _("rotate expected to be 0, 90, 180 or 270"));
2278 return 0;
2279 }
2280 break;
2281
2282 case AARCH64_OPND_IMM_ROT3:
2283 case AARCH64_OPND_SVE_IMM_ROT1:
2284 case AARCH64_OPND_SVE_IMM_ROT3:
2285 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2286 {
2287 set_other_error (mismatch_detail, idx,
2288 _("rotate expected to be 90 or 270"));
2289 return 0;
2290 }
2291 break;
2292
2293 case AARCH64_OPND_SHLL_IMM:
2294 assert (idx == 2);
2295 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2296 if (opnd->imm.value != size)
2297 {
2298 set_other_error (mismatch_detail, idx,
2299 _("invalid shift amount"));
2300 return 0;
2301 }
2302 break;
2303
2304 case AARCH64_OPND_IMM_VLSL:
2305 size = aarch64_get_qualifier_esize (qualifier);
2306 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2307 {
2308 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2309 size * 8 - 1);
2310 return 0;
2311 }
2312 break;
2313
2314 case AARCH64_OPND_IMM_VLSR:
2315 size = aarch64_get_qualifier_esize (qualifier);
2316 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2317 {
2318 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2319 return 0;
2320 }
2321 break;
2322
2323 case AARCH64_OPND_SIMD_IMM:
2324 case AARCH64_OPND_SIMD_IMM_SFT:
2325 /* Qualifier check. */
2326 switch (qualifier)
2327 {
2328 case AARCH64_OPND_QLF_LSL:
2329 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2330 {
2331 set_other_error (mismatch_detail, idx,
2332 _("invalid shift operator"));
2333 return 0;
2334 }
2335 break;
2336 case AARCH64_OPND_QLF_MSL:
2337 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2338 {
2339 set_other_error (mismatch_detail, idx,
2340 _("invalid shift operator"));
2341 return 0;
2342 }
2343 break;
2344 case AARCH64_OPND_QLF_NIL:
2345 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2346 {
2347 set_other_error (mismatch_detail, idx,
2348 _("shift is not permitted"));
2349 return 0;
2350 }
2351 break;
2352 default:
2353 assert (0);
2354 return 0;
2355 }
2356 /* Is the immediate valid? */
2357 assert (idx == 1);
2358 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2359 {
2360 /* uimm8 or simm8 */
2361 if (!value_in_range_p (opnd->imm.value, -128, 255))
2362 {
2363 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2364 return 0;
2365 }
2366 }
2367 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2368 {
2369 /* uimm64 is not
2370 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2371 ffffffffgggggggghhhhhhhh'. */
2372 set_other_error (mismatch_detail, idx,
2373 _("invalid value for immediate"));
2374 return 0;
2375 }
2376 /* Is the shift amount valid? */
2377 switch (opnd->shifter.kind)
2378 {
2379 case AARCH64_MOD_LSL:
2380 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2381 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2382 {
2383 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2384 (size - 1) * 8);
2385 return 0;
2386 }
2387 if (!value_aligned_p (opnd->shifter.amount, 8))
2388 {
2389 set_unaligned_error (mismatch_detail, idx, 8);
2390 return 0;
2391 }
2392 break;
2393 case AARCH64_MOD_MSL:
2394 /* Only 8 and 16 are valid shift amount. */
2395 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2396 {
2397 set_other_error (mismatch_detail, idx,
2398 _("shift amount must be 0 or 16"));
2399 return 0;
2400 }
2401 break;
2402 default:
2403 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2404 {
2405 set_other_error (mismatch_detail, idx,
2406 _("invalid shift operator"));
2407 return 0;
2408 }
2409 break;
2410 }
2411 break;
2412
2413 case AARCH64_OPND_FPIMM:
2414 case AARCH64_OPND_SIMD_FPIMM:
2415 case AARCH64_OPND_SVE_FPIMM8:
2416 if (opnd->imm.is_fp == 0)
2417 {
2418 set_other_error (mismatch_detail, idx,
2419 _("floating-point immediate expected"));
2420 return 0;
2421 }
2422 /* The value is expected to be an 8-bit floating-point constant with
2423 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2424 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2425 instruction). */
2426 if (!value_in_range_p (opnd->imm.value, 0, 255))
2427 {
2428 set_other_error (mismatch_detail, idx,
2429 _("immediate out of range"));
2430 return 0;
2431 }
2432 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2433 {
2434 set_other_error (mismatch_detail, idx,
2435 _("invalid shift operator"));
2436 return 0;
2437 }
2438 break;
2439
2440 case AARCH64_OPND_SVE_AIMM:
2441 min_value = 0;
2442 sve_aimm:
2443 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2444 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2445 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2446 uvalue = opnd->imm.value;
2447 shift = opnd->shifter.amount;
2448 if (size == 1)
2449 {
2450 if (shift != 0)
2451 {
2452 set_other_error (mismatch_detail, idx,
2453 _("no shift amount allowed for"
2454 " 8-bit constants"));
2455 return 0;
2456 }
2457 }
2458 else
2459 {
2460 if (shift != 0 && shift != 8)
2461 {
2462 set_other_error (mismatch_detail, idx,
2463 _("shift amount must be 0 or 8"));
2464 return 0;
2465 }
2466 if (shift == 0 && (uvalue & 0xff) == 0)
2467 {
2468 shift = 8;
2469 uvalue = (int64_t) uvalue / 256;
2470 }
2471 }
2472 mask >>= shift;
2473 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2474 {
2475 set_other_error (mismatch_detail, idx,
2476 _("immediate too big for element size"));
2477 return 0;
2478 }
2479 uvalue = (uvalue - min_value) & mask;
2480 if (uvalue > 0xff)
2481 {
2482 set_other_error (mismatch_detail, idx,
2483 _("invalid arithmetic immediate"));
2484 return 0;
2485 }
2486 break;
2487
2488 case AARCH64_OPND_SVE_ASIMM:
2489 min_value = -128;
2490 goto sve_aimm;
2491
2492 case AARCH64_OPND_SVE_I1_HALF_ONE:
2493 assert (opnd->imm.is_fp);
2494 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2495 {
2496 set_other_error (mismatch_detail, idx,
2497 _("floating-point value must be 0.5 or 1.0"));
2498 return 0;
2499 }
2500 break;
2501
2502 case AARCH64_OPND_SVE_I1_HALF_TWO:
2503 assert (opnd->imm.is_fp);
2504 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2505 {
2506 set_other_error (mismatch_detail, idx,
2507 _("floating-point value must be 0.5 or 2.0"));
2508 return 0;
2509 }
2510 break;
2511
2512 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2513 assert (opnd->imm.is_fp);
2514 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2515 {
2516 set_other_error (mismatch_detail, idx,
2517 _("floating-point value must be 0.0 or 1.0"));
2518 return 0;
2519 }
2520 break;
2521
2522 case AARCH64_OPND_SVE_INV_LIMM:
2523 {
2524 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2525 uint64_t uimm = ~opnd->imm.value;
2526 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2527 {
2528 set_other_error (mismatch_detail, idx,
2529 _("immediate out of range"));
2530 return 0;
2531 }
2532 }
2533 break;
2534
2535 case AARCH64_OPND_SVE_LIMM_MOV:
2536 {
2537 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2538 uint64_t uimm = opnd->imm.value;
2539 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2540 {
2541 set_other_error (mismatch_detail, idx,
2542 _("immediate out of range"));
2543 return 0;
2544 }
2545 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2546 {
2547 set_other_error (mismatch_detail, idx,
2548 _("invalid replicated MOV immediate"));
2549 return 0;
2550 }
2551 }
2552 break;
2553
2554 case AARCH64_OPND_SVE_PATTERN_SCALED:
2555 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2556 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2557 {
2558 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2559 return 0;
2560 }
2561 break;
2562
2563 case AARCH64_OPND_SVE_SHLIMM_PRED:
2564 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2565 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2566 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2567 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2568 {
2569 set_imm_out_of_range_error (mismatch_detail, idx,
2570 0, 8 * size - 1);
2571 return 0;
2572 }
2573 break;
2574
2575 case AARCH64_OPND_SVE_SHRIMM_PRED:
2576 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2577 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2578 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2579 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2580 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2581 {
2582 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2583 return 0;
2584 }
2585 break;
2586
2587 default:
2588 break;
2589 }
2590 break;
2591
2592 case AARCH64_OPND_CLASS_SYSTEM:
2593 switch (type)
2594 {
2595 case AARCH64_OPND_PSTATEFIELD:
2596 for (i = 0; aarch64_pstatefields[i].name; ++i)
2597 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2598 break;
2599 assert (aarch64_pstatefields[i].name);
2600 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2601 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2602 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2603 {
2604 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2605 return 0;
2606 }
2607 break;
2608 default:
2609 break;
2610 }
2611 break;
2612
2613 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2614 /* Get the upper bound for the element index. */
2615 if (opcode->op == OP_FCMLA_ELEM)
2616 /* FCMLA index range depends on the vector size of other operands
2617 and is halfed because complex numbers take two elements. */
2618 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2619 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2620 else
2621 num = 16;
2622 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2623 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2624
2625 /* Index out-of-range. */
2626 if (!value_in_range_p (opnd->reglane.index, 0, num))
2627 {
2628 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2629 return 0;
2630 }
2631 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2632 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2633 number is encoded in "size:M:Rm":
2634 size <Vm>
2635 00 RESERVED
2636 01 0:Rm
2637 10 M:Rm
2638 11 RESERVED */
2639 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2640 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2641 {
2642 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2643 return 0;
2644 }
2645 break;
2646
2647 case AARCH64_OPND_CLASS_MODIFIED_REG:
2648 assert (idx == 1 || idx == 2);
2649 switch (type)
2650 {
2651 case AARCH64_OPND_Rm_EXT:
2652 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2653 && opnd->shifter.kind != AARCH64_MOD_LSL)
2654 {
2655 set_other_error (mismatch_detail, idx,
2656 _("extend operator expected"));
2657 return 0;
2658 }
2659 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2660 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2661 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2662 case. */
2663 if (!aarch64_stack_pointer_p (opnds + 0)
2664 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2665 {
2666 if (!opnd->shifter.operator_present)
2667 {
2668 set_other_error (mismatch_detail, idx,
2669 _("missing extend operator"));
2670 return 0;
2671 }
2672 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2673 {
2674 set_other_error (mismatch_detail, idx,
2675 _("'LSL' operator not allowed"));
2676 return 0;
2677 }
2678 }
2679 assert (opnd->shifter.operator_present /* Default to LSL. */
2680 || opnd->shifter.kind == AARCH64_MOD_LSL);
2681 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2682 {
2683 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2684 return 0;
2685 }
2686 /* In the 64-bit form, the final register operand is written as Wm
2687 for all but the (possibly omitted) UXTX/LSL and SXTX
2688 operators.
2689 N.B. GAS allows X register to be used with any operator as a
2690 programming convenience. */
2691 if (qualifier == AARCH64_OPND_QLF_X
2692 && opnd->shifter.kind != AARCH64_MOD_LSL
2693 && opnd->shifter.kind != AARCH64_MOD_UXTX
2694 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2695 {
2696 set_other_error (mismatch_detail, idx, _("W register expected"));
2697 return 0;
2698 }
2699 break;
2700
2701 case AARCH64_OPND_Rm_SFT:
2702 /* ROR is not available to the shifted register operand in
2703 arithmetic instructions. */
2704 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2705 {
2706 set_other_error (mismatch_detail, idx,
2707 _("shift operator expected"));
2708 return 0;
2709 }
2710 if (opnd->shifter.kind == AARCH64_MOD_ROR
2711 && opcode->iclass != log_shift)
2712 {
2713 set_other_error (mismatch_detail, idx,
2714 _("'ROR' operator not allowed"));
2715 return 0;
2716 }
2717 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2718 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2719 {
2720 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2721 return 0;
2722 }
2723 break;
2724
2725 default:
2726 break;
2727 }
2728 break;
2729
2730 default:
2731 break;
2732 }
2733
2734 return 1;
2735 }
2736
2737 /* Main entrypoint for the operand constraint checking.
2738
2739 Return 1 if operands of *INST meet the constraint applied by the operand
2740 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2741 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2742 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2743 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2744 error kind when it is notified that an instruction does not pass the check).
2745
2746 Un-determined operand qualifiers may get established during the process. */
2747
2748 int
aarch64_match_operands_constraint(aarch64_inst * inst,aarch64_operand_error * mismatch_detail)2749 aarch64_match_operands_constraint (aarch64_inst *inst,
2750 aarch64_operand_error *mismatch_detail)
2751 {
2752 int i;
2753
2754 DEBUG_TRACE ("enter");
2755
2756 i = inst->opcode->tied_operand;
2757
2758 if (i > 0)
2759 {
2760 /* Check for tied_operands with specific opcode iclass. */
2761 switch (inst->opcode->iclass)
2762 {
2763 /* For SME LDR and STR instructions #imm must have the same numerical
2764 value for both operands.
2765 */
2766 case sme_ldr:
2767 case sme_str:
2768 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array);
2769 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
2770 if (inst->operands[0].za_tile_vector.index.imm
2771 != inst->operands[1].addr.offset.imm)
2772 {
2773 if (mismatch_detail)
2774 {
2775 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
2776 mismatch_detail->index = i;
2777 }
2778 return 0;
2779 }
2780 break;
2781
2782 default:
2783 /* Check for cases where a source register needs to be the same as the
2784 destination register. Do this before matching qualifiers since if
2785 an instruction has both invalid tying and invalid qualifiers,
2786 the error about qualifiers would suggest several alternative
2787 instructions that also have invalid tying. */
2788 if (inst->operands[0].reg.regno
2789 != inst->operands[i].reg.regno)
2790 {
2791 if (mismatch_detail)
2792 {
2793 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2794 mismatch_detail->index = i;
2795 mismatch_detail->error = NULL;
2796 }
2797 return 0;
2798 }
2799 break;
2800 }
2801 }
2802
2803 /* Match operands' qualifier.
2804 *INST has already had qualifier establish for some, if not all, of
2805 its operands; we need to find out whether these established
2806 qualifiers match one of the qualifier sequence in
2807 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2808 with the corresponding qualifier in such a sequence.
2809 Only basic operand constraint checking is done here; the more thorough
2810 constraint checking will carried out by operand_general_constraint_met_p,
2811 which has be to called after this in order to get all of the operands'
2812 qualifiers established. */
2813 if (match_operands_qualifier (inst, true /* update_p */) == 0)
2814 {
2815 DEBUG_TRACE ("FAIL on operand qualifier matching");
2816 if (mismatch_detail)
2817 {
2818 /* Return an error type to indicate that it is the qualifier
2819 matching failure; we don't care about which operand as there
2820 are enough information in the opcode table to reproduce it. */
2821 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2822 mismatch_detail->index = -1;
2823 mismatch_detail->error = NULL;
2824 }
2825 return 0;
2826 }
2827
2828 /* Match operands' constraint. */
2829 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2830 {
2831 enum aarch64_opnd type = inst->opcode->operands[i];
2832 if (type == AARCH64_OPND_NIL)
2833 break;
2834 if (inst->operands[i].skip)
2835 {
2836 DEBUG_TRACE ("skip the incomplete operand %d", i);
2837 continue;
2838 }
2839 if (operand_general_constraint_met_p (inst->operands, i, type,
2840 inst->opcode, mismatch_detail) == 0)
2841 {
2842 DEBUG_TRACE ("FAIL on operand %d", i);
2843 return 0;
2844 }
2845 }
2846
2847 DEBUG_TRACE ("PASS");
2848
2849 return 1;
2850 }
2851
2852 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2853 Also updates the TYPE of each INST->OPERANDS with the corresponding
2854 value of OPCODE->OPERANDS.
2855
2856 Note that some operand qualifiers may need to be manually cleared by
2857 the caller before it further calls the aarch64_opcode_encode; by
2858 doing this, it helps the qualifier matching facilities work
2859 properly. */
2860
2861 const aarch64_opcode*
aarch64_replace_opcode(aarch64_inst * inst,const aarch64_opcode * opcode)2862 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2863 {
2864 int i;
2865 const aarch64_opcode *old = inst->opcode;
2866
2867 inst->opcode = opcode;
2868
2869 /* Update the operand types. */
2870 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2871 {
2872 inst->operands[i].type = opcode->operands[i];
2873 if (opcode->operands[i] == AARCH64_OPND_NIL)
2874 break;
2875 }
2876
2877 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2878
2879 return old;
2880 }
2881
2882 int
aarch64_operand_index(const enum aarch64_opnd * operands,enum aarch64_opnd operand)2883 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2884 {
2885 int i;
2886 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2887 if (operands[i] == operand)
2888 return i;
2889 else if (operands[i] == AARCH64_OPND_NIL)
2890 break;
2891 return -1;
2892 }
2893
2894 /* R0...R30, followed by FOR31. */
2895 #define BANK(R, FOR31) \
2896 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2897 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2898 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2899 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2900 /* [0][0] 32-bit integer regs with sp Wn
2901 [0][1] 64-bit integer regs with sp Xn sf=1
2902 [1][0] 32-bit integer regs with #0 Wn
2903 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2904 static const char *int_reg[2][2][32] = {
2905 #define R32(X) "w" #X
2906 #define R64(X) "x" #X
2907 { BANK (R32, "wsp"), BANK (R64, "sp") },
2908 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2909 #undef R64
2910 #undef R32
2911 };
2912
2913 /* Names of the SVE vector registers, first with .S suffixes,
2914 then with .D suffixes. */
2915
2916 static const char *sve_reg[2][32] = {
2917 #define ZS(X) "z" #X ".s"
2918 #define ZD(X) "z" #X ".d"
2919 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2920 #undef ZD
2921 #undef ZS
2922 };
2923 #undef BANK
2924
2925 /* Return the integer register name.
2926 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2927
2928 static inline const char *
get_int_reg_name(int regno,aarch64_opnd_qualifier_t qualifier,int sp_reg_p)2929 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2930 {
2931 const int has_zr = sp_reg_p ? 0 : 1;
2932 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2933 return int_reg[has_zr][is_64][regno];
2934 }
2935
2936 /* Like get_int_reg_name, but IS_64 is always 1. */
2937
2938 static inline const char *
get_64bit_int_reg_name(int regno,int sp_reg_p)2939 get_64bit_int_reg_name (int regno, int sp_reg_p)
2940 {
2941 const int has_zr = sp_reg_p ? 0 : 1;
2942 return int_reg[has_zr][1][regno];
2943 }
2944
2945 /* Get the name of the integer offset register in OPND, using the shift type
2946 to decide whether it's a word or doubleword. */
2947
2948 static inline const char *
get_offset_int_reg_name(const aarch64_opnd_info * opnd)2949 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2950 {
2951 switch (opnd->shifter.kind)
2952 {
2953 case AARCH64_MOD_UXTW:
2954 case AARCH64_MOD_SXTW:
2955 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2956
2957 case AARCH64_MOD_LSL:
2958 case AARCH64_MOD_SXTX:
2959 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2960
2961 default:
2962 abort ();
2963 }
2964 }
2965
2966 /* Get the name of the SVE vector offset register in OPND, using the operand
2967 qualifier to decide whether the suffix should be .S or .D. */
2968
2969 static inline const char *
get_addr_sve_reg_name(int regno,aarch64_opnd_qualifier_t qualifier)2970 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2971 {
2972 assert (qualifier == AARCH64_OPND_QLF_S_S
2973 || qualifier == AARCH64_OPND_QLF_S_D);
2974 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2975 }
2976
2977 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2978
2979 typedef union
2980 {
2981 uint64_t i;
2982 double d;
2983 } double_conv_t;
2984
2985 typedef union
2986 {
2987 uint32_t i;
2988 float f;
2989 } single_conv_t;
2990
2991 typedef union
2992 {
2993 uint32_t i;
2994 float f;
2995 } half_conv_t;
2996
2997 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2998 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2999 (depending on the type of the instruction). IMM8 will be expanded to a
3000 single-precision floating-point value (SIZE == 4) or a double-precision
3001 floating-point value (SIZE == 8). A half-precision floating-point value
3002 (SIZE == 2) is expanded to a single-precision floating-point value. The
3003 expanded value is returned. */
3004
3005 static uint64_t
expand_fp_imm(int size,uint32_t imm8)3006 expand_fp_imm (int size, uint32_t imm8)
3007 {
3008 uint64_t imm = 0;
3009 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3010
3011 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3012 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3013 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3014 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3015 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3016 if (size == 8)
3017 {
3018 imm = (imm8_7 << (63-32)) /* imm8<7> */
3019 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3020 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3021 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3022 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3023 imm <<= 32;
3024 }
3025 else if (size == 4 || size == 2)
3026 {
3027 imm = (imm8_7 << 31) /* imm8<7> */
3028 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3029 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3030 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3031 }
3032 else
3033 {
3034 /* An unsupported size. */
3035 assert (0);
3036 }
3037
3038 return imm;
3039 }
3040
3041 /* Produce the string representation of the register list operand *OPND
3042 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3043 the register name that comes before the register number, such as "v". */
3044 static void
print_register_list(char * buf,size_t size,const aarch64_opnd_info * opnd,const char * prefix)3045 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3046 const char *prefix)
3047 {
3048 const int num_regs = opnd->reglist.num_regs;
3049 const int first_reg = opnd->reglist.first_regno;
3050 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3051 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3052 char tb[8]; /* Temporary buffer. */
3053
3054 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3055 assert (num_regs >= 1 && num_regs <= 4);
3056
3057 /* Prepare the index if any. */
3058 if (opnd->reglist.has_index)
3059 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3060 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3061 else
3062 tb[0] = '\0';
3063
3064 /* The hyphenated form is preferred for disassembly if there are
3065 more than two registers in the list, and the register numbers
3066 are monotonically increasing in increments of one. */
3067 if (num_regs > 2 && last_reg > first_reg)
3068 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3069 prefix, last_reg, qlf_name, tb);
3070 else
3071 {
3072 const int reg0 = first_reg;
3073 const int reg1 = (first_reg + 1) & 0x1f;
3074 const int reg2 = (first_reg + 2) & 0x1f;
3075 const int reg3 = (first_reg + 3) & 0x1f;
3076
3077 switch (num_regs)
3078 {
3079 case 1:
3080 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3081 break;
3082 case 2:
3083 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3084 prefix, reg1, qlf_name, tb);
3085 break;
3086 case 3:
3087 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3088 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3089 prefix, reg2, qlf_name, tb);
3090 break;
3091 case 4:
3092 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3093 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3094 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3095 break;
3096 }
3097 }
3098 }
3099
3100 /* Print the register+immediate address in OPND to BUF, which has SIZE
3101 characters. BASE is the name of the base register. */
3102
3103 static void
print_immediate_offset_address(char * buf,size_t size,const aarch64_opnd_info * opnd,const char * base)3104 print_immediate_offset_address (char *buf, size_t size,
3105 const aarch64_opnd_info *opnd,
3106 const char *base)
3107 {
3108 if (opnd->addr.writeback)
3109 {
3110 if (opnd->addr.preind)
3111 {
3112 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3113 snprintf (buf, size, "[%s]!", base);
3114 else
3115 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3116 }
3117 else
3118 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3119 }
3120 else
3121 {
3122 if (opnd->shifter.operator_present)
3123 {
3124 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3125 snprintf (buf, size, "[%s, #%d, mul vl]",
3126 base, opnd->addr.offset.imm);
3127 }
3128 else if (opnd->addr.offset.imm)
3129 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3130 else
3131 snprintf (buf, size, "[%s]", base);
3132 }
3133 }
3134
3135 /* Produce the string representation of the register offset address operand
3136 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3137 the names of the base and offset registers. */
3138 static void
print_register_offset_address(char * buf,size_t size,const aarch64_opnd_info * opnd,const char * base,const char * offset)3139 print_register_offset_address (char *buf, size_t size,
3140 const aarch64_opnd_info *opnd,
3141 const char *base, const char *offset)
3142 {
3143 char tb[16]; /* Temporary buffer. */
3144 bool print_extend_p = true;
3145 bool print_amount_p = true;
3146 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3147
3148 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3149 || !opnd->shifter.amount_present))
3150 {
3151 /* Not print the shift/extend amount when the amount is zero and
3152 when it is not the special case of 8-bit load/store instruction. */
3153 print_amount_p = false;
3154 /* Likewise, no need to print the shift operator LSL in such a
3155 situation. */
3156 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3157 print_extend_p = false;
3158 }
3159
3160 /* Prepare for the extend/shift. */
3161 if (print_extend_p)
3162 {
3163 if (print_amount_p)
3164 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3165 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3166 (opnd->shifter.amount % 100));
3167 else
3168 snprintf (tb, sizeof (tb), ", %s", shift_name);
3169 }
3170 else
3171 tb[0] = '\0';
3172
3173 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3174 }
3175
3176 /* Print ZA tiles from imm8 in ZERO instruction.
3177
3178 The preferred disassembly of this instruction uses the shortest list of tile
3179 names that represent the encoded immediate mask.
3180
3181 For example:
3182 * An all-ones immediate is disassembled as {ZA}.
3183 * An all-zeros immediate is disassembled as an empty list { }.
3184 */
3185 static void
print_sme_za_list(char * buf,size_t size,int mask)3186 print_sme_za_list(char *buf, size_t size, int mask)
3187 {
3188 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3189 "za1.s", "za2.s", "za3.s", "za0.d",
3190 "za1.d", "za2.d", "za3.d", "za4.d",
3191 "za5.d", "za6.d", "za7.d", " " };
3192 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3193 0x22, 0x44, 0x88, 0x01,
3194 0x02, 0x04, 0x08, 0x10,
3195 0x20, 0x40, 0x80, 0x00 };
3196 int i, k;
3197 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3198
3199 k = snprintf (buf, size, "{");
3200 for (i = 0; i < ZAN_SIZE; i++)
3201 {
3202 if ((mask & zan_v[i]) == zan_v[i])
3203 {
3204 mask &= ~zan_v[i];
3205 if (k > 1)
3206 k += snprintf (buf + k, size - k, ", %s", zan[i]);
3207 else
3208 k += snprintf (buf + k, size - k, "%s", zan[i]);
3209 }
3210 if (mask == 0)
3211 break;
3212 }
3213 snprintf (buf + k, size - k, "}");
3214 }
3215
3216 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3217 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3218 PC, PCREL_P and ADDRESS are used to pass in and return information about
3219 the PC-relative address calculation, where the PC value is passed in
3220 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3221 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3222 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3223
3224 The function serves both the disassembler and the assembler diagnostics
3225 issuer, which is the reason why it lives in this file. */
3226
3227 void
aarch64_print_operand(char * buf,size_t size,bfd_vma pc,const aarch64_opcode * opcode,const aarch64_opnd_info * opnds,int idx,int * pcrel_p,bfd_vma * address,char ** notes,char * comment,size_t comment_size,aarch64_feature_set features)3228 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3229 const aarch64_opcode *opcode,
3230 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3231 bfd_vma *address, char** notes,
3232 char *comment, size_t comment_size,
3233 aarch64_feature_set features)
3234 {
3235 unsigned int i, num_conds;
3236 const char *name = NULL;
3237 const aarch64_opnd_info *opnd = opnds + idx;
3238 enum aarch64_modifier_kind kind;
3239 uint64_t addr, enum_value;
3240
3241 if (comment != NULL)
3242 {
3243 assert (comment_size > 0);
3244 comment[0] = '\0';
3245 }
3246 else
3247 assert (comment_size == 0);
3248
3249 buf[0] = '\0';
3250 if (pcrel_p)
3251 *pcrel_p = 0;
3252
3253 switch (opnd->type)
3254 {
3255 case AARCH64_OPND_Rd:
3256 case AARCH64_OPND_Rn:
3257 case AARCH64_OPND_Rm:
3258 case AARCH64_OPND_Rt:
3259 case AARCH64_OPND_Rt2:
3260 case AARCH64_OPND_Rs:
3261 case AARCH64_OPND_Ra:
3262 case AARCH64_OPND_Rt_LS64:
3263 case AARCH64_OPND_Rt_SYS:
3264 case AARCH64_OPND_PAIRREG:
3265 case AARCH64_OPND_SVE_Rm:
3266 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3267 the <ic_op>, therefore we use opnd->present to override the
3268 generic optional-ness information. */
3269 if (opnd->type == AARCH64_OPND_Rt_SYS)
3270 {
3271 if (!opnd->present)
3272 break;
3273 }
3274 /* Omit the operand, e.g. RET. */
3275 else if (optional_operand_p (opcode, idx)
3276 && (opnd->reg.regno
3277 == get_optional_operand_default_value (opcode)))
3278 break;
3279 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3280 || opnd->qualifier == AARCH64_OPND_QLF_X);
3281 snprintf (buf, size, "%s",
3282 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3283 break;
3284
3285 case AARCH64_OPND_Rd_SP:
3286 case AARCH64_OPND_Rn_SP:
3287 case AARCH64_OPND_Rt_SP:
3288 case AARCH64_OPND_SVE_Rn_SP:
3289 case AARCH64_OPND_Rm_SP:
3290 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3291 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3292 || opnd->qualifier == AARCH64_OPND_QLF_X
3293 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3294 snprintf (buf, size, "%s",
3295 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3296 break;
3297
3298 case AARCH64_OPND_Rm_EXT:
3299 kind = opnd->shifter.kind;
3300 assert (idx == 1 || idx == 2);
3301 if ((aarch64_stack_pointer_p (opnds)
3302 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3303 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3304 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3305 && kind == AARCH64_MOD_UXTW)
3306 || (opnd->qualifier == AARCH64_OPND_QLF_X
3307 && kind == AARCH64_MOD_UXTX)))
3308 {
3309 /* 'LSL' is the preferred form in this case. */
3310 kind = AARCH64_MOD_LSL;
3311 if (opnd->shifter.amount == 0)
3312 {
3313 /* Shifter omitted. */
3314 snprintf (buf, size, "%s",
3315 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3316 break;
3317 }
3318 }
3319 if (opnd->shifter.amount)
3320 snprintf (buf, size, "%s, %s #%" PRIi64,
3321 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3322 aarch64_operand_modifiers[kind].name,
3323 opnd->shifter.amount);
3324 else
3325 snprintf (buf, size, "%s, %s",
3326 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3327 aarch64_operand_modifiers[kind].name);
3328 break;
3329
3330 case AARCH64_OPND_Rm_SFT:
3331 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3332 || opnd->qualifier == AARCH64_OPND_QLF_X);
3333 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3334 snprintf (buf, size, "%s",
3335 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3336 else
3337 snprintf (buf, size, "%s, %s #%" PRIi64,
3338 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3339 aarch64_operand_modifiers[opnd->shifter.kind].name,
3340 opnd->shifter.amount);
3341 break;
3342
3343 case AARCH64_OPND_Fd:
3344 case AARCH64_OPND_Fn:
3345 case AARCH64_OPND_Fm:
3346 case AARCH64_OPND_Fa:
3347 case AARCH64_OPND_Ft:
3348 case AARCH64_OPND_Ft2:
3349 case AARCH64_OPND_Sd:
3350 case AARCH64_OPND_Sn:
3351 case AARCH64_OPND_Sm:
3352 case AARCH64_OPND_SVE_VZn:
3353 case AARCH64_OPND_SVE_Vd:
3354 case AARCH64_OPND_SVE_Vm:
3355 case AARCH64_OPND_SVE_Vn:
3356 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3357 opnd->reg.regno);
3358 break;
3359
3360 case AARCH64_OPND_Va:
3361 case AARCH64_OPND_Vd:
3362 case AARCH64_OPND_Vn:
3363 case AARCH64_OPND_Vm:
3364 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3365 aarch64_get_qualifier_name (opnd->qualifier));
3366 break;
3367
3368 case AARCH64_OPND_Ed:
3369 case AARCH64_OPND_En:
3370 case AARCH64_OPND_Em:
3371 case AARCH64_OPND_Em16:
3372 case AARCH64_OPND_SM3_IMM2:
3373 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3374 aarch64_get_qualifier_name (opnd->qualifier),
3375 opnd->reglane.index);
3376 break;
3377
3378 case AARCH64_OPND_VdD1:
3379 case AARCH64_OPND_VnD1:
3380 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3381 break;
3382
3383 case AARCH64_OPND_LVn:
3384 case AARCH64_OPND_LVt:
3385 case AARCH64_OPND_LVt_AL:
3386 case AARCH64_OPND_LEt:
3387 print_register_list (buf, size, opnd, "v");
3388 break;
3389
3390 case AARCH64_OPND_SVE_Pd:
3391 case AARCH64_OPND_SVE_Pg3:
3392 case AARCH64_OPND_SVE_Pg4_5:
3393 case AARCH64_OPND_SVE_Pg4_10:
3394 case AARCH64_OPND_SVE_Pg4_16:
3395 case AARCH64_OPND_SVE_Pm:
3396 case AARCH64_OPND_SVE_Pn:
3397 case AARCH64_OPND_SVE_Pt:
3398 case AARCH64_OPND_SME_Pm:
3399 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3400 snprintf (buf, size, "p%d", opnd->reg.regno);
3401 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3402 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3403 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3404 aarch64_get_qualifier_name (opnd->qualifier));
3405 else
3406 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3407 aarch64_get_qualifier_name (opnd->qualifier));
3408 break;
3409
3410 case AARCH64_OPND_SVE_Za_5:
3411 case AARCH64_OPND_SVE_Za_16:
3412 case AARCH64_OPND_SVE_Zd:
3413 case AARCH64_OPND_SVE_Zm_5:
3414 case AARCH64_OPND_SVE_Zm_16:
3415 case AARCH64_OPND_SVE_Zn:
3416 case AARCH64_OPND_SVE_Zt:
3417 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3418 snprintf (buf, size, "z%d", opnd->reg.regno);
3419 else
3420 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3421 aarch64_get_qualifier_name (opnd->qualifier));
3422 break;
3423
3424 case AARCH64_OPND_SVE_ZnxN:
3425 case AARCH64_OPND_SVE_ZtxN:
3426 print_register_list (buf, size, opnd, "z");
3427 break;
3428
3429 case AARCH64_OPND_SVE_Zm3_INDEX:
3430 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3431 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3432 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3433 case AARCH64_OPND_SVE_Zm4_INDEX:
3434 case AARCH64_OPND_SVE_Zn_INDEX:
3435 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3436 aarch64_get_qualifier_name (opnd->qualifier),
3437 opnd->reglane.index);
3438 break;
3439
3440 case AARCH64_OPND_SME_ZAda_2b:
3441 case AARCH64_OPND_SME_ZAda_3b:
3442 snprintf (buf, size, "za%d.%s", opnd->reg.regno,
3443 aarch64_get_qualifier_name (opnd->qualifier));
3444 break;
3445
3446 case AARCH64_OPND_SME_ZA_HV_idx_src:
3447 case AARCH64_OPND_SME_ZA_HV_idx_dest:
3448 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
3449 snprintf (buf, size, "%sza%d%c.%s[w%d, %d]%s",
3450 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
3451 opnd->za_tile_vector.regno,
3452 opnd->za_tile_vector.v == 1 ? 'v' : 'h',
3453 aarch64_get_qualifier_name (opnd->qualifier),
3454 opnd->za_tile_vector.index.regno,
3455 opnd->za_tile_vector.index.imm,
3456 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
3457 break;
3458
3459 case AARCH64_OPND_SME_list_of_64bit_tiles:
3460 print_sme_za_list (buf, size, opnd->reg.regno);
3461 break;
3462
3463 case AARCH64_OPND_SME_ZA_array:
3464 snprintf (buf, size, "za[w%d, %d]",
3465 opnd->za_tile_vector.index.regno,
3466 opnd->za_tile_vector.index.imm);
3467 break;
3468
3469 case AARCH64_OPND_SME_SM_ZA:
3470 snprintf (buf, size, "%s", opnd->reg.regno == 's' ? "sm" : "za");
3471 break;
3472
3473 case AARCH64_OPND_SME_PnT_Wm_imm:
3474 snprintf (buf, size, "p%d.%s[w%d, %d]",
3475 opnd->za_tile_vector.regno,
3476 aarch64_get_qualifier_name (opnd->qualifier),
3477 opnd->za_tile_vector.index.regno,
3478 opnd->za_tile_vector.index.imm);
3479 break;
3480
3481 case AARCH64_OPND_CRn:
3482 case AARCH64_OPND_CRm:
3483 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3484 break;
3485
3486 case AARCH64_OPND_IDX:
3487 case AARCH64_OPND_MASK:
3488 case AARCH64_OPND_IMM:
3489 case AARCH64_OPND_IMM_2:
3490 case AARCH64_OPND_WIDTH:
3491 case AARCH64_OPND_UIMM3_OP1:
3492 case AARCH64_OPND_UIMM3_OP2:
3493 case AARCH64_OPND_BIT_NUM:
3494 case AARCH64_OPND_IMM_VLSL:
3495 case AARCH64_OPND_IMM_VLSR:
3496 case AARCH64_OPND_SHLL_IMM:
3497 case AARCH64_OPND_IMM0:
3498 case AARCH64_OPND_IMMR:
3499 case AARCH64_OPND_IMMS:
3500 case AARCH64_OPND_UNDEFINED:
3501 case AARCH64_OPND_FBITS:
3502 case AARCH64_OPND_TME_UIMM16:
3503 case AARCH64_OPND_SIMM5:
3504 case AARCH64_OPND_SVE_SHLIMM_PRED:
3505 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3506 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3507 case AARCH64_OPND_SVE_SHRIMM_PRED:
3508 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3509 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3510 case AARCH64_OPND_SVE_SIMM5:
3511 case AARCH64_OPND_SVE_SIMM5B:
3512 case AARCH64_OPND_SVE_SIMM6:
3513 case AARCH64_OPND_SVE_SIMM8:
3514 case AARCH64_OPND_SVE_UIMM3:
3515 case AARCH64_OPND_SVE_UIMM7:
3516 case AARCH64_OPND_SVE_UIMM8:
3517 case AARCH64_OPND_SVE_UIMM8_53:
3518 case AARCH64_OPND_IMM_ROT1:
3519 case AARCH64_OPND_IMM_ROT2:
3520 case AARCH64_OPND_IMM_ROT3:
3521 case AARCH64_OPND_SVE_IMM_ROT1:
3522 case AARCH64_OPND_SVE_IMM_ROT2:
3523 case AARCH64_OPND_SVE_IMM_ROT3:
3524 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3525 break;
3526
3527 case AARCH64_OPND_SVE_I1_HALF_ONE:
3528 case AARCH64_OPND_SVE_I1_HALF_TWO:
3529 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3530 {
3531 single_conv_t c;
3532 c.i = opnd->imm.value;
3533 snprintf (buf, size, "#%.1f", c.f);
3534 break;
3535 }
3536
3537 case AARCH64_OPND_SVE_PATTERN:
3538 if (optional_operand_p (opcode, idx)
3539 && opnd->imm.value == get_optional_operand_default_value (opcode))
3540 break;
3541 enum_value = opnd->imm.value;
3542 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3543 if (aarch64_sve_pattern_array[enum_value])
3544 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3545 else
3546 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3547 break;
3548
3549 case AARCH64_OPND_SVE_PATTERN_SCALED:
3550 if (optional_operand_p (opcode, idx)
3551 && !opnd->shifter.operator_present
3552 && opnd->imm.value == get_optional_operand_default_value (opcode))
3553 break;
3554 enum_value = opnd->imm.value;
3555 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3556 if (aarch64_sve_pattern_array[opnd->imm.value])
3557 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3558 else
3559 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3560 if (opnd->shifter.operator_present)
3561 {
3562 size_t len = strlen (buf);
3563 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3564 aarch64_operand_modifiers[opnd->shifter.kind].name,
3565 opnd->shifter.amount);
3566 }
3567 break;
3568
3569 case AARCH64_OPND_SVE_PRFOP:
3570 enum_value = opnd->imm.value;
3571 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3572 if (aarch64_sve_prfop_array[enum_value])
3573 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3574 else
3575 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3576 break;
3577
3578 case AARCH64_OPND_IMM_MOV:
3579 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3580 {
3581 case 4: /* e.g. MOV Wd, #<imm32>. */
3582 {
3583 int imm32 = opnd->imm.value;
3584 snprintf (buf, size, "#0x%-20x", imm32);
3585 snprintf (comment, comment_size, "#%d", imm32);
3586 }
3587 break;
3588 case 8: /* e.g. MOV Xd, #<imm64>. */
3589 snprintf (buf, size, "#0x%-20" PRIx64, opnd->imm.value);
3590 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
3591 break;
3592 default:
3593 snprintf (buf, size, "<invalid>");
3594 break;
3595 }
3596 break;
3597
3598 case AARCH64_OPND_FPIMM0:
3599 snprintf (buf, size, "#0.0");
3600 break;
3601
3602 case AARCH64_OPND_LIMM:
3603 case AARCH64_OPND_AIMM:
3604 case AARCH64_OPND_HALF:
3605 case AARCH64_OPND_SVE_INV_LIMM:
3606 case AARCH64_OPND_SVE_LIMM:
3607 case AARCH64_OPND_SVE_LIMM_MOV:
3608 if (opnd->shifter.amount)
3609 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3610 opnd->shifter.amount);
3611 else
3612 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3613 break;
3614
3615 case AARCH64_OPND_SIMD_IMM:
3616 case AARCH64_OPND_SIMD_IMM_SFT:
3617 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3618 || opnd->shifter.kind == AARCH64_MOD_NONE)
3619 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3620 else
3621 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3622 aarch64_operand_modifiers[opnd->shifter.kind].name,
3623 opnd->shifter.amount);
3624 break;
3625
3626 case AARCH64_OPND_SVE_AIMM:
3627 case AARCH64_OPND_SVE_ASIMM:
3628 if (opnd->shifter.amount)
3629 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3630 opnd->shifter.amount);
3631 else
3632 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3633 break;
3634
3635 case AARCH64_OPND_FPIMM:
3636 case AARCH64_OPND_SIMD_FPIMM:
3637 case AARCH64_OPND_SVE_FPIMM8:
3638 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3639 {
3640 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3641 {
3642 half_conv_t c;
3643 c.i = expand_fp_imm (2, opnd->imm.value);
3644 snprintf (buf, size, "#%.18e", c.f);
3645 }
3646 break;
3647 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3648 {
3649 single_conv_t c;
3650 c.i = expand_fp_imm (4, opnd->imm.value);
3651 snprintf (buf, size, "#%.18e", c.f);
3652 }
3653 break;
3654 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3655 {
3656 double_conv_t c;
3657 c.i = expand_fp_imm (8, opnd->imm.value);
3658 snprintf (buf, size, "#%.18e", c.d);
3659 }
3660 break;
3661 default:
3662 snprintf (buf, size, "<invalid>");
3663 break;
3664 }
3665 break;
3666
3667 case AARCH64_OPND_CCMP_IMM:
3668 case AARCH64_OPND_NZCV:
3669 case AARCH64_OPND_EXCEPTION:
3670 case AARCH64_OPND_UIMM4:
3671 case AARCH64_OPND_UIMM4_ADDG:
3672 case AARCH64_OPND_UIMM7:
3673 case AARCH64_OPND_UIMM10:
3674 if (optional_operand_p (opcode, idx)
3675 && (opnd->imm.value ==
3676 (int64_t) get_optional_operand_default_value (opcode)))
3677 /* Omit the operand, e.g. DCPS1. */
3678 break;
3679 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3680 break;
3681
3682 case AARCH64_OPND_COND:
3683 case AARCH64_OPND_COND1:
3684 snprintf (buf, size, "%s", opnd->cond->names[0]);
3685 num_conds = ARRAY_SIZE (opnd->cond->names);
3686 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3687 {
3688 size_t len = comment != NULL ? strlen (comment) : 0;
3689 if (i == 1)
3690 snprintf (comment + len, comment_size - len, "%s = %s",
3691 opnd->cond->names[0], opnd->cond->names[i]);
3692 else
3693 snprintf (comment + len, comment_size - len, ", %s",
3694 opnd->cond->names[i]);
3695 }
3696 break;
3697
3698 case AARCH64_OPND_ADDR_ADRP:
3699 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3700 + opnd->imm.value;
3701 if (pcrel_p)
3702 *pcrel_p = 1;
3703 if (address)
3704 *address = addr;
3705 /* This is not necessary during the disassembling, as print_address_func
3706 in the disassemble_info will take care of the printing. But some
3707 other callers may be still interested in getting the string in *STR,
3708 so here we do snprintf regardless. */
3709 snprintf (buf, size, "#0x%" PRIx64, addr);
3710 break;
3711
3712 case AARCH64_OPND_ADDR_PCREL14:
3713 case AARCH64_OPND_ADDR_PCREL19:
3714 case AARCH64_OPND_ADDR_PCREL21:
3715 case AARCH64_OPND_ADDR_PCREL26:
3716 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3717 if (pcrel_p)
3718 *pcrel_p = 1;
3719 if (address)
3720 *address = addr;
3721 /* This is not necessary during the disassembling, as print_address_func
3722 in the disassemble_info will take care of the printing. But some
3723 other callers may be still interested in getting the string in *STR,
3724 so here we do snprintf regardless. */
3725 snprintf (buf, size, "#0x%" PRIx64, addr);
3726 break;
3727
3728 case AARCH64_OPND_ADDR_SIMPLE:
3729 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3730 case AARCH64_OPND_SIMD_ADDR_POST:
3731 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3732 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3733 {
3734 if (opnd->addr.offset.is_reg)
3735 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3736 else
3737 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3738 }
3739 else
3740 snprintf (buf, size, "[%s]", name);
3741 break;
3742
3743 case AARCH64_OPND_ADDR_REGOFF:
3744 case AARCH64_OPND_SVE_ADDR_R:
3745 case AARCH64_OPND_SVE_ADDR_RR:
3746 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3747 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3748 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3749 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
3750 case AARCH64_OPND_SVE_ADDR_RX:
3751 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3752 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3753 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3754 print_register_offset_address
3755 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3756 get_offset_int_reg_name (opnd));
3757 break;
3758
3759 case AARCH64_OPND_SVE_ADDR_ZX:
3760 print_register_offset_address
3761 (buf, size, opnd,
3762 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3763 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3764 break;
3765
3766 case AARCH64_OPND_SVE_ADDR_RZ:
3767 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3768 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3769 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3770 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3771 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3772 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3773 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3774 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3775 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3776 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3777 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3778 print_register_offset_address
3779 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3780 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3781 break;
3782
3783 case AARCH64_OPND_ADDR_SIMM7:
3784 case AARCH64_OPND_ADDR_SIMM9:
3785 case AARCH64_OPND_ADDR_SIMM9_2:
3786 case AARCH64_OPND_ADDR_SIMM10:
3787 case AARCH64_OPND_ADDR_SIMM11:
3788 case AARCH64_OPND_ADDR_SIMM13:
3789 case AARCH64_OPND_ADDR_OFFSET:
3790 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
3791 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3792 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3793 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3794 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3795 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3796 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3797 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3798 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3799 case AARCH64_OPND_SVE_ADDR_RI_U6:
3800 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3801 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3802 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3803 print_immediate_offset_address
3804 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3805 break;
3806
3807 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3808 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3809 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3810 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3811 print_immediate_offset_address
3812 (buf, size, opnd,
3813 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3814 break;
3815
3816 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3817 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3818 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3819 print_register_offset_address
3820 (buf, size, opnd,
3821 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3822 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3823 break;
3824
3825 case AARCH64_OPND_ADDR_UIMM12:
3826 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3827 if (opnd->addr.offset.imm)
3828 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3829 else
3830 snprintf (buf, size, "[%s]", name);
3831 break;
3832
3833 case AARCH64_OPND_SYSREG:
3834 for (i = 0; aarch64_sys_regs[i].name; ++i)
3835 {
3836 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
3837
3838 bool exact_match
3839 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
3840 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
3841 && AARCH64_CPU_HAS_FEATURE (features, sr->features);
3842
3843 /* Try and find an exact match, But if that fails, return the first
3844 partial match that was found. */
3845 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3846 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
3847 && (name == NULL || exact_match))
3848 {
3849 name = aarch64_sys_regs[i].name;
3850 if (exact_match)
3851 {
3852 if (notes)
3853 *notes = NULL;
3854 break;
3855 }
3856
3857 /* If we didn't match exactly, that means the presense of a flag
3858 indicates what we didn't want for this instruction. e.g. If
3859 F_REG_READ is there, that means we were looking for a write
3860 register. See aarch64_ext_sysreg. */
3861 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3862 *notes = _("reading from a write-only register");
3863 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3864 *notes = _("writing to a read-only register");
3865 }
3866 }
3867
3868 if (name)
3869 snprintf (buf, size, "%s", name);
3870 else
3871 {
3872 /* Implementation defined system register. */
3873 unsigned int value = opnd->sysreg.value;
3874 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3875 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3876 value & 0x7);
3877 }
3878 break;
3879
3880 case AARCH64_OPND_PSTATEFIELD:
3881 for (i = 0; aarch64_pstatefields[i].name; ++i)
3882 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3883 {
3884 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
3885 SVCRZA and SVCRSMZA. */
3886 uint32_t flags = aarch64_pstatefields[i].flags;
3887 if (flags & F_REG_IN_CRM
3888 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
3889 != PSTATE_DECODE_CRM (flags)))
3890 continue;
3891 break;
3892 }
3893 assert (aarch64_pstatefields[i].name);
3894 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3895 break;
3896
3897 case AARCH64_OPND_SYSREG_AT:
3898 case AARCH64_OPND_SYSREG_DC:
3899 case AARCH64_OPND_SYSREG_IC:
3900 case AARCH64_OPND_SYSREG_TLBI:
3901 case AARCH64_OPND_SYSREG_SR:
3902 snprintf (buf, size, "%s", opnd->sysins_op->name);
3903 break;
3904
3905 case AARCH64_OPND_BARRIER:
3906 case AARCH64_OPND_BARRIER_DSB_NXS:
3907 snprintf (buf, size, "%s", opnd->barrier->name);
3908 break;
3909
3910 case AARCH64_OPND_BARRIER_ISB:
3911 /* Operand can be omitted, e.g. in DCPS1. */
3912 if (! optional_operand_p (opcode, idx)
3913 || (opnd->barrier->value
3914 != get_optional_operand_default_value (opcode)))
3915 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3916 break;
3917
3918 case AARCH64_OPND_PRFOP:
3919 if (opnd->prfop->name != NULL)
3920 snprintf (buf, size, "%s", opnd->prfop->name);
3921 else
3922 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3923 break;
3924
3925 case AARCH64_OPND_BARRIER_PSB:
3926 snprintf (buf, size, "csync");
3927 break;
3928
3929 case AARCH64_OPND_BTI_TARGET:
3930 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3931 snprintf (buf, size, "%s", opnd->hint_option->name);
3932 break;
3933
3934 case AARCH64_OPND_MOPS_ADDR_Rd:
3935 case AARCH64_OPND_MOPS_ADDR_Rs:
3936 snprintf (buf, size, "[%s]!",
3937 get_int_reg_name (opnd->reg.regno, AARCH64_OPND_QLF_X, 0));
3938 break;
3939
3940 case AARCH64_OPND_MOPS_WB_Rn:
3941 snprintf (buf, size, "%s!",
3942 get_int_reg_name (opnd->reg.regno, AARCH64_OPND_QLF_X, 0));
3943 break;
3944
3945 default:
3946 snprintf (buf, size, "<invalid>");
3947 break;
3948 }
3949 }
3950
3951 #define CPENC(op0,op1,crn,crm,op2) \
3952 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3953 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3954 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3955 /* for 3.9.10 System Instructions */
3956 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3957
3958 #define C0 0
3959 #define C1 1
3960 #define C2 2
3961 #define C3 3
3962 #define C4 4
3963 #define C5 5
3964 #define C6 6
3965 #define C7 7
3966 #define C8 8
3967 #define C9 9
3968 #define C10 10
3969 #define C11 11
3970 #define C12 12
3971 #define C13 13
3972 #define C14 14
3973 #define C15 15
3974
3975 #define SYSREG(name, encoding, flags, features) \
3976 { name, encoding, flags, features }
3977
3978 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
3979
3980 #define SR_FEAT(n,e,f,feat) \
3981 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
3982
3983 #define SR_FEAT2(n,e,f,fe1,fe2) \
3984 SYSREG ((n), (e), (f) | F_ARCHEXT, \
3985 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
3986
3987 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
3988 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
3989
3990 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
3991 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
3992 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
3993 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
3994 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
3995 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
3996 #define SR_V8_6(n,e,f) SR_FEAT (n,e,f,V8_6)
3997 #define SR_V8_7(n,e,f) SR_FEAT (n,e,f,V8_7)
3998 #define SR_V8_8(n,e,f) SR_FEAT (n,e,f,V8_8)
3999 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4000 #define SR_GIC(n,e,f) SR_CORE (n,e,f)
4001 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4002 #define SR_AMU(n,e,f) SR_FEAT (n,e,f,V8_4)
4003 #define SR_LOR(n,e,f) SR_FEAT (n,e,f,LOR)
4004 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
4005 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
4006 #define SR_RNG(n,e,f) SR_FEAT (n,e,f,RNG)
4007 #define SR_SME(n,e,f) SR_FEAT (n,e,f,SME)
4008 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
4009 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
4010 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
4011 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
4012 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
4013 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
4014
4015 #define SR_EXPAND_ELx(f,x) \
4016 f (x, 1), \
4017 f (x, 2), \
4018 f (x, 3), \
4019 f (x, 4), \
4020 f (x, 5), \
4021 f (x, 6), \
4022 f (x, 7), \
4023 f (x, 8), \
4024 f (x, 9), \
4025 f (x, 10), \
4026 f (x, 11), \
4027 f (x, 12), \
4028 f (x, 13), \
4029 f (x, 14), \
4030 f (x, 15),
4031
4032 #define SR_EXPAND_EL12(f) \
4033 SR_EXPAND_ELx (f,1) \
4034 SR_EXPAND_ELx (f,2)
4035
4036 /* TODO there is one more issues need to be resolved
4037 1. handle cpu-implementation-defined system registers.
4038
4039 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4040 respectively. If neither of these are set then the register is read-write. */
4041 const aarch64_sys_reg aarch64_sys_regs [] =
4042 {
4043 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
4044 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
4045 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
4046 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
4047 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
4048 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
4049 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
4050 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
4051 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
4052 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
4053 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
4054 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
4055 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
4056 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
4057 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
4058 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
4059 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
4060 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
4061 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
4062 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
4063 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
4064 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
4065 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
4066 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
4067 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
4068 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
4069 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
4070 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
4071 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
4072 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
4073 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
4074 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
4075 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
4076 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
4077 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
4078 SR_CORE ("id_dfr1_el1", CPENC (3,0,C0,C3,5), F_REG_READ),
4079 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
4080 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
4081 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
4082 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
4083 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
4084 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
4085 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
4086 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
4087 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
4088 SR_CORE ("id_mmfr5_el1", CPENC (3,0,C0,C3,6), F_REG_READ),
4089 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
4090 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
4091 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
4092 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
4093 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
4094 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
4095 SR_CORE ("id_isar6_el1", CPENC (3,0,C0,C2,7), F_REG_READ),
4096 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
4097 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
4098 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
4099 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
4100 SR_V8_3 ("ccsidr2_el1", CPENC (3,1,C0,C0,2), F_REG_READ),
4101 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
4102 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
4103 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
4104 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
4105 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
4106 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
4107 SR_CORE ("id_aa64isar2_el1", CPENC (3,0,C0,C6,2), F_REG_READ),
4108 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
4109 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
4110 SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
4111 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
4112 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
4113 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
4114 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
4115 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
4116 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
4117 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
4118 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
4119 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
4120 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
4121 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
4122 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
4123 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
4124 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
4125 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
4126 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
4127 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
4128 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
4129 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
4130 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
4131 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
4132 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
4133 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
4134 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
4135 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
4136 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
4137 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
4138 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
4139 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
4140 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
4141 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
4142 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
4143 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
4144 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
4145 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
4146 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
4147 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
4148 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
4149 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
4150 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
4151 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
4152 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
4153 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
4154 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
4155 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
4156 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
4157 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
4158 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
4159 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
4160 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
4161 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
4162 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
4163 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
4164 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
4165 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
4166 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
4167 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
4168 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
4169 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
4170 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
4171 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
4172 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4173 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4174 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4175 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4176 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4177 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4178 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4179 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4180 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4181 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4182 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4183 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4184 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5,C5,2), 0),
4185 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5,C5,3), 0),
4186 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5,C4,6), 0),
4187 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5,C4,5), 0),
4188 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5,C4,4), F_REG_READ),
4189 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4190 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4191 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4192 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4193 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4194 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4195 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4196 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4197 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4198 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4199 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4200 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4201 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4202 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4203 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4204 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4205 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4206 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4207 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4208 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4209 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4210 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4211 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4212 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4213 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4214 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4215 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4216 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4217 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4218 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4219 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4220 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4221 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4222 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4223 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4224 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4225 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4226 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4227 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4228 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4229 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4230 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4231 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4232 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4233 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4234 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4235 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4236 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4237 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4238 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4239 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4240 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4241 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4242 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4243 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4244 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4245 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4246 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4247 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4248 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4249 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4250 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4251 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4252 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4253 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4254 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4255 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4256 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4257 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4258 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4259 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4260 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4261 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4262 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4263 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4264 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4265 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4266 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4267 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4268 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4269 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4270 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4271 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4272 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4273 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4274 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4275 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4276 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4277 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4278 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4279 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4280 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4281 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4282 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4283 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4284 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4285 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4286 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4287 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4288 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4289 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4290 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4291 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4292 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4293 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4294 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4295 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4296 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4297 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4298 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4299 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4300 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4301 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4302 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4303 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4304 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4305 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4306 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4307 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4308 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4309 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4310 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4311 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4312 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4313 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4314 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4315 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4316 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4317 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4318 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4319 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4320 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4321 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4322 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4323 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4324 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4325 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4326 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4327 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4328 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4329 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4330 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4331 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4332 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4333 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4334 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4335 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4336 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4337 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4338 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4339 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4340 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4341 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4342 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4343 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4344 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4345 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4346 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4347 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4348 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4349 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4350 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4351 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4352 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4353 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4354 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4355 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4356 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4357 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4358 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4359 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4360 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4361 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4362 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4363 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4364 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4365 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), F_REG_READ),
4366 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4367 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4368 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4369 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4370 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4371 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4372 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4373 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4374 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4375 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4376 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4377 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4378 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4379 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4380 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4381 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4382 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4383 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4384 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4385 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4386 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4387 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4388 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4389 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4390 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4391 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
4392 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
4393 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
4394 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
4395 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
4396 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
4397 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
4398 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
4399 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
4400 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
4401 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
4402 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
4403 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
4404 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
4405 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
4406 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
4407 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
4408 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
4409 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
4410 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
4411 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
4412 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
4413 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
4414 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
4415 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
4416 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
4417 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
4418 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
4419 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
4420 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
4421 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
4422 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
4423 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
4424 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
4425 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
4426 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
4427 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
4428 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
4429 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
4430 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
4431 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
4432 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
4433 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
4434 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
4435 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
4436 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
4437 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
4438 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
4439 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
4440 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
4441 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
4442 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
4443 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
4444 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
4445 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
4446
4447 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
4448 SR_V8_4 ("trfcr_el1", CPENC (3,0,C1,C2,1), 0),
4449 SR_V8_4 ("pmmir_el1", CPENC (3,0,C9,C14,6), F_REG_READ),
4450 SR_V8_4 ("trfcr_el2", CPENC (3,4,C1,C2,1), 0),
4451 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
4452 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
4453 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
4454 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
4455 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
4456 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
4457 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
4458 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
4459 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
4460 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
4461 SR_V8_4 ("trfcr_el12", CPENC (3,5,C1,C2,1), 0),
4462
4463 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
4464 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
4465 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
4466 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
4467 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
4468 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
4469 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
4470 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
4471 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
4472 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
4473 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
4474 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
4475 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
4476 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
4477 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
4478 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
4479
4480 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
4481 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
4482 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
4483 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
4484
4485 #define ENC_BARLAR(x,n,lar) \
4486 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4487
4488 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4489 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4490
4491 SR_EXPAND_EL12 (PRBARn_ELx)
4492 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
4493 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
4494 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
4495 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
4496 SR_EXPAND_EL12 (PRLARn_ELx)
4497 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
4498 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
4499 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
4500
4501 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
4502 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
4503 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
4504 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
4505 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
4506 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
4507 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
4508
4509 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4510 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
4511 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
4512 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
4513 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
4514 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
4515 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
4516 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
4517 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
4518 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
4519 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
4520 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
4521 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
4522 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
4523 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
4524 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
4525 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
4526 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
4527 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
4528 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
4529 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
4530 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
4531 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
4532 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
4533 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
4534 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
4535 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
4536 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
4537 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
4538 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
4539 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
4540 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
4541 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
4542 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
4543 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
4544 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
4545 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
4546 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
4547 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
4548 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
4549 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
4550 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
4551 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
4552 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
4553 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
4554 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
4555 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
4556 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
4557 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
4558 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
4559 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
4560 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
4561 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
4562 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
4563 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
4564 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
4565 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
4566 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
4567 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
4568 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
4569 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
4570 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
4571 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
4572 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
4573 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
4574 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
4575 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
4576 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
4577 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
4578 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
4579 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
4580 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
4581 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
4582 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
4583 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
4584 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
4585 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
4586 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
4587 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
4588 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
4589 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
4590 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
4591 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
4592 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
4593 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
4594 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
4595 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
4596 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
4597 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
4598 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
4599 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
4600 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
4601 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
4602 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
4603 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
4604 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
4605 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
4606 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
4607 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
4608 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
4609 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
4610 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
4611 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
4612 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
4613 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
4614 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
4615 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
4616 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
4617 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
4618 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
4619 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
4620 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
4621 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
4622 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
4623 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4624 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
4625 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4626 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4627 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4628 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4629 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
4630 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
4631 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
4632 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
4633 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
4634 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
4635 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
4636 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
4637 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
4638 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
4639 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
4640 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
4641 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
4642 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
4643 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
4644 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
4645 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
4646 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
4647 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
4648 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
4649 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
4650 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
4651 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
4652 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
4653 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
4654 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
4655 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
4656 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
4657 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
4658 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
4659 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
4660 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
4661 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
4662 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
4663 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
4664 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
4665 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
4666 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
4667 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
4668 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
4669 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
4670 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
4671 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
4672 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
4673 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
4674 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
4675 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
4676 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
4677 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
4678 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
4679 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
4680 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
4681 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
4682 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
4683 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
4684 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
4685 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
4686 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
4687 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
4688 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
4689 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
4690 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
4691 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
4692 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
4693 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
4694 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
4695 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
4696 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
4697 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
4698 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
4699 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
4700 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
4701 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
4702 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
4703 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
4704 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
4705 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
4706 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
4707 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
4708 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
4709 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
4710 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
4711 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
4712 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
4713 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
4714 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
4715 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
4716 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
4717 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
4718 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
4719 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
4720 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
4721 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
4722 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
4723 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
4724
4725 SR_CORE ("csrcr_el0", CPENC (2,3,C8,C0,0), 0),
4726 SR_CORE ("csrptr_el0", CPENC (2,3,C8,C0,1), 0),
4727 SR_CORE ("csridr_el0", CPENC (2,3,C8,C0,2), F_REG_READ),
4728 SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3), F_REG_READ),
4729 SR_CORE ("csrcr_el1", CPENC (2,0,C8,C0,0), 0),
4730 SR_CORE ("csrcr_el12", CPENC (2,5,C8,C0,0), 0),
4731 SR_CORE ("csrptr_el1", CPENC (2,0,C8,C0,1), 0),
4732 SR_CORE ("csrptr_el12", CPENC (2,5,C8,C0,1), 0),
4733 SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3), F_REG_READ),
4734 SR_CORE ("csrcr_el2", CPENC (2,4,C8,C0,0), 0),
4735 SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
4736 SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
4737
4738 SR_LOR ("lorid_el1", CPENC (3,0,C10,C4,7), F_REG_READ),
4739 SR_LOR ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
4740 SR_LOR ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
4741 SR_LOR ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
4742 SR_LOR ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
4743
4744 SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
4745 SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
4746 SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
4747 SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
4748 SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
4749
4750 SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
4751 SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
4752 SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
4753 SR_CORE ("brbts_el1", CPENC (2,1,C9,C0,2), 0),
4754 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0), 0),
4755 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1), 0),
4756 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2), 0),
4757 SR_CORE ("brbidr0_el1", CPENC (2,1,C9,C2,0), F_REG_READ),
4758 SR_CORE ("brbcr_el2", CPENC (2,4,C9,C0,0), 0),
4759 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8,C0,1), F_REG_READ),
4760 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8,C1,1), F_REG_READ),
4761 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8,C2,1), F_REG_READ),
4762 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8,C3,1), F_REG_READ),
4763 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8,C4,1), F_REG_READ),
4764 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8,C5,1), F_REG_READ),
4765 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8,C6,1), F_REG_READ),
4766 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8,C7,1), F_REG_READ),
4767 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8,C8,1), F_REG_READ),
4768 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8,C9,1), F_REG_READ),
4769 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8,C10,1), F_REG_READ),
4770 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8,C11,1), F_REG_READ),
4771 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8,C12,1), F_REG_READ),
4772 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8,C13,1), F_REG_READ),
4773 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8,C14,1), F_REG_READ),
4774 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8,C15,1), F_REG_READ),
4775 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8,C0,5), F_REG_READ),
4776 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8,C1,5), F_REG_READ),
4777 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8,C2,5), F_REG_READ),
4778 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8,C3,5), F_REG_READ),
4779 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8,C4,5), F_REG_READ),
4780 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8,C5,5), F_REG_READ),
4781 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8,C6,5), F_REG_READ),
4782 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8,C7,5), F_REG_READ),
4783 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8,C8,5), F_REG_READ),
4784 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8,C9,5), F_REG_READ),
4785 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8,C10,5), F_REG_READ),
4786 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8,C11,5), F_REG_READ),
4787 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8,C12,5), F_REG_READ),
4788 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8,C13,5), F_REG_READ),
4789 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8,C14,5), F_REG_READ),
4790 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8,C15,5), F_REG_READ),
4791 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8,C0,2), F_REG_READ),
4792 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8,C1,2), F_REG_READ),
4793 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8,C2,2), F_REG_READ),
4794 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8,C3,2), F_REG_READ),
4795 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8,C4,2), F_REG_READ),
4796 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8,C5,2), F_REG_READ),
4797 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8,C6,2), F_REG_READ),
4798 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8,C7,2), F_REG_READ),
4799 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8,C8,2), F_REG_READ),
4800 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8,C9,2), F_REG_READ),
4801 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8,C10,2), F_REG_READ),
4802 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8,C11,2), F_REG_READ),
4803 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8,C12,2), F_REG_READ),
4804 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8,C13,2), F_REG_READ),
4805 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8,C14,2), F_REG_READ),
4806 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8,C15,2), F_REG_READ),
4807 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8,C0,6), F_REG_READ),
4808 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8,C1,6), F_REG_READ),
4809 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8,C2,6), F_REG_READ),
4810 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8,C3,6), F_REG_READ),
4811 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8,C4,6), F_REG_READ),
4812 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8,C5,6), F_REG_READ),
4813 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8,C6,6), F_REG_READ),
4814 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8,C7,6), F_REG_READ),
4815 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8,C8,6), F_REG_READ),
4816 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8,C9,6), F_REG_READ),
4817 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8,C10,6), F_REG_READ),
4818 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8,C11,6), F_REG_READ),
4819 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8,C12,6), F_REG_READ),
4820 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8,C13,6), F_REG_READ),
4821 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8,C14,6), F_REG_READ),
4822 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8,C15,6), F_REG_READ),
4823 SR_CORE ("brbinf0_el1", CPENC (2,1,C8,C0,0), F_REG_READ),
4824 SR_CORE ("brbinf1_el1", CPENC (2,1,C8,C1,0), F_REG_READ),
4825 SR_CORE ("brbinf2_el1", CPENC (2,1,C8,C2,0), F_REG_READ),
4826 SR_CORE ("brbinf3_el1", CPENC (2,1,C8,C3,0), F_REG_READ),
4827 SR_CORE ("brbinf4_el1", CPENC (2,1,C8,C4,0), F_REG_READ),
4828 SR_CORE ("brbinf5_el1", CPENC (2,1,C8,C5,0), F_REG_READ),
4829 SR_CORE ("brbinf6_el1", CPENC (2,1,C8,C6,0), F_REG_READ),
4830 SR_CORE ("brbinf7_el1", CPENC (2,1,C8,C7,0), F_REG_READ),
4831 SR_CORE ("brbinf8_el1", CPENC (2,1,C8,C8,0), F_REG_READ),
4832 SR_CORE ("brbinf9_el1", CPENC (2,1,C8,C9,0), F_REG_READ),
4833 SR_CORE ("brbinf10_el1", CPENC (2,1,C8,C10,0), F_REG_READ),
4834 SR_CORE ("brbinf11_el1", CPENC (2,1,C8,C11,0), F_REG_READ),
4835 SR_CORE ("brbinf12_el1", CPENC (2,1,C8,C12,0), F_REG_READ),
4836 SR_CORE ("brbinf13_el1", CPENC (2,1,C8,C13,0), F_REG_READ),
4837 SR_CORE ("brbinf14_el1", CPENC (2,1,C8,C14,0), F_REG_READ),
4838 SR_CORE ("brbinf15_el1", CPENC (2,1,C8,C15,0), F_REG_READ),
4839 SR_CORE ("brbinf16_el1", CPENC (2,1,C8,C0,4), F_REG_READ),
4840 SR_CORE ("brbinf17_el1", CPENC (2,1,C8,C1,4), F_REG_READ),
4841 SR_CORE ("brbinf18_el1", CPENC (2,1,C8,C2,4), F_REG_READ),
4842 SR_CORE ("brbinf19_el1", CPENC (2,1,C8,C3,4), F_REG_READ),
4843 SR_CORE ("brbinf20_el1", CPENC (2,1,C8,C4,4), F_REG_READ),
4844 SR_CORE ("brbinf21_el1", CPENC (2,1,C8,C5,4), F_REG_READ),
4845 SR_CORE ("brbinf22_el1", CPENC (2,1,C8,C6,4), F_REG_READ),
4846 SR_CORE ("brbinf23_el1", CPENC (2,1,C8,C7,4), F_REG_READ),
4847 SR_CORE ("brbinf24_el1", CPENC (2,1,C8,C8,4), F_REG_READ),
4848 SR_CORE ("brbinf25_el1", CPENC (2,1,C8,C9,4), F_REG_READ),
4849 SR_CORE ("brbinf26_el1", CPENC (2,1,C8,C10,4), F_REG_READ),
4850 SR_CORE ("brbinf27_el1", CPENC (2,1,C8,C11,4), F_REG_READ),
4851 SR_CORE ("brbinf28_el1", CPENC (2,1,C8,C12,4), F_REG_READ),
4852 SR_CORE ("brbinf29_el1", CPENC (2,1,C8,C13,4), F_REG_READ),
4853 SR_CORE ("brbinf30_el1", CPENC (2,1,C8,C14,4), F_REG_READ),
4854 SR_CORE ("brbinf31_el1", CPENC (2,1,C8,C15,4), F_REG_READ),
4855
4856 SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
4857
4858 SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), 0),
4859 SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
4860 SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
4861
4862 SR_SME ("svcr", CPENC (3,3,C4,C2,2), 0),
4863 SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5), F_REG_READ),
4864 SR_SME ("smcr_el1", CPENC (3,0,C1,C2,6), 0),
4865 SR_SME ("smcr_el12", CPENC (3,5,C1,C2,6), 0),
4866 SR_SME ("smcr_el2", CPENC (3,4,C1,C2,6), 0),
4867 SR_SME ("smcr_el3", CPENC (3,6,C1,C2,6), 0),
4868 SR_SME ("smpri_el1", CPENC (3,0,C1,C2,4), 0),
4869 SR_SME ("smprimap_el2", CPENC (3,4,C1,C2,5), 0),
4870 SR_SME ("smidr_el1", CPENC (3,1,C0,C0,6), F_REG_READ),
4871 SR_SME ("tpidr2_el0", CPENC (3,3,C13,C0,5), 0),
4872 SR_SME ("mpamsm_el1", CPENC (3,0,C10,C5,3), 0),
4873
4874 SR_AMU ("amcr_el0", CPENC (3,3,C13,C2,0), 0),
4875 SR_AMU ("amcfgr_el0", CPENC (3,3,C13,C2,1), F_REG_READ),
4876 SR_AMU ("amcgcr_el0", CPENC (3,3,C13,C2,2), F_REG_READ),
4877 SR_AMU ("amuserenr_el0", CPENC (3,3,C13,C2,3), 0),
4878 SR_AMU ("amcntenclr0_el0", CPENC (3,3,C13,C2,4), 0),
4879 SR_AMU ("amcntenset0_el0", CPENC (3,3,C13,C2,5), 0),
4880 SR_AMU ("amcntenclr1_el0", CPENC (3,3,C13,C3,0), 0),
4881 SR_AMU ("amcntenset1_el0", CPENC (3,3,C13,C3,1), 0),
4882 SR_AMU ("amevcntr00_el0", CPENC (3,3,C13,C4,0), 0),
4883 SR_AMU ("amevcntr01_el0", CPENC (3,3,C13,C4,1), 0),
4884 SR_AMU ("amevcntr02_el0", CPENC (3,3,C13,C4,2), 0),
4885 SR_AMU ("amevcntr03_el0", CPENC (3,3,C13,C4,3), 0),
4886 SR_AMU ("amevtyper00_el0", CPENC (3,3,C13,C6,0), F_REG_READ),
4887 SR_AMU ("amevtyper01_el0", CPENC (3,3,C13,C6,1), F_REG_READ),
4888 SR_AMU ("amevtyper02_el0", CPENC (3,3,C13,C6,2), F_REG_READ),
4889 SR_AMU ("amevtyper03_el0", CPENC (3,3,C13,C6,3), F_REG_READ),
4890 SR_AMU ("amevcntr10_el0", CPENC (3,3,C13,C12,0), 0),
4891 SR_AMU ("amevcntr11_el0", CPENC (3,3,C13,C12,1), 0),
4892 SR_AMU ("amevcntr12_el0", CPENC (3,3,C13,C12,2), 0),
4893 SR_AMU ("amevcntr13_el0", CPENC (3,3,C13,C12,3), 0),
4894 SR_AMU ("amevcntr14_el0", CPENC (3,3,C13,C12,4), 0),
4895 SR_AMU ("amevcntr15_el0", CPENC (3,3,C13,C12,5), 0),
4896 SR_AMU ("amevcntr16_el0", CPENC (3,3,C13,C12,6), 0),
4897 SR_AMU ("amevcntr17_el0", CPENC (3,3,C13,C12,7), 0),
4898 SR_AMU ("amevcntr18_el0", CPENC (3,3,C13,C13,0), 0),
4899 SR_AMU ("amevcntr19_el0", CPENC (3,3,C13,C13,1), 0),
4900 SR_AMU ("amevcntr110_el0", CPENC (3,3,C13,C13,2), 0),
4901 SR_AMU ("amevcntr111_el0", CPENC (3,3,C13,C13,3), 0),
4902 SR_AMU ("amevcntr112_el0", CPENC (3,3,C13,C13,4), 0),
4903 SR_AMU ("amevcntr113_el0", CPENC (3,3,C13,C13,5), 0),
4904 SR_AMU ("amevcntr114_el0", CPENC (3,3,C13,C13,6), 0),
4905 SR_AMU ("amevcntr115_el0", CPENC (3,3,C13,C13,7), 0),
4906 SR_AMU ("amevtyper10_el0", CPENC (3,3,C13,C14,0), 0),
4907 SR_AMU ("amevtyper11_el0", CPENC (3,3,C13,C14,1), 0),
4908 SR_AMU ("amevtyper12_el0", CPENC (3,3,C13,C14,2), 0),
4909 SR_AMU ("amevtyper13_el0", CPENC (3,3,C13,C14,3), 0),
4910 SR_AMU ("amevtyper14_el0", CPENC (3,3,C13,C14,4), 0),
4911 SR_AMU ("amevtyper15_el0", CPENC (3,3,C13,C14,5), 0),
4912 SR_AMU ("amevtyper16_el0", CPENC (3,3,C13,C14,6), 0),
4913 SR_AMU ("amevtyper17_el0", CPENC (3,3,C13,C14,7), 0),
4914 SR_AMU ("amevtyper18_el0", CPENC (3,3,C13,C15,0), 0),
4915 SR_AMU ("amevtyper19_el0", CPENC (3,3,C13,C15,1), 0),
4916 SR_AMU ("amevtyper110_el0", CPENC (3,3,C13,C15,2), 0),
4917 SR_AMU ("amevtyper111_el0", CPENC (3,3,C13,C15,3), 0),
4918 SR_AMU ("amevtyper112_el0", CPENC (3,3,C13,C15,4), 0),
4919 SR_AMU ("amevtyper113_el0", CPENC (3,3,C13,C15,5), 0),
4920 SR_AMU ("amevtyper114_el0", CPENC (3,3,C13,C15,6), 0),
4921 SR_AMU ("amevtyper115_el0", CPENC (3,3,C13,C15,7), 0),
4922
4923 SR_GIC ("icc_pmr_el1", CPENC (3,0,C4,C6,0), 0),
4924 SR_GIC ("icc_iar0_el1", CPENC (3,0,C12,C8,0), F_REG_READ),
4925 SR_GIC ("icc_eoir0_el1", CPENC (3,0,C12,C8,1), F_REG_WRITE),
4926 SR_GIC ("icc_hppir0_el1", CPENC (3,0,C12,C8,2), F_REG_READ),
4927 SR_GIC ("icc_bpr0_el1", CPENC (3,0,C12,C8,3), 0),
4928 SR_GIC ("icc_ap0r0_el1", CPENC (3,0,C12,C8,4), 0),
4929 SR_GIC ("icc_ap0r1_el1", CPENC (3,0,C12,C8,5), 0),
4930 SR_GIC ("icc_ap0r2_el1", CPENC (3,0,C12,C8,6), 0),
4931 SR_GIC ("icc_ap0r3_el1", CPENC (3,0,C12,C8,7), 0),
4932 SR_GIC ("icc_ap1r0_el1", CPENC (3,0,C12,C9,0), 0),
4933 SR_GIC ("icc_ap1r1_el1", CPENC (3,0,C12,C9,1), 0),
4934 SR_GIC ("icc_ap1r2_el1", CPENC (3,0,C12,C9,2), 0),
4935 SR_GIC ("icc_ap1r3_el1", CPENC (3,0,C12,C9,3), 0),
4936 SR_GIC ("icc_dir_el1", CPENC (3,0,C12,C11,1), F_REG_WRITE),
4937 SR_GIC ("icc_rpr_el1", CPENC (3,0,C12,C11,3), F_REG_READ),
4938 SR_GIC ("icc_sgi1r_el1", CPENC (3,0,C12,C11,5), F_REG_WRITE),
4939 SR_GIC ("icc_asgi1r_el1", CPENC (3,0,C12,C11,6), F_REG_WRITE),
4940 SR_GIC ("icc_sgi0r_el1", CPENC (3,0,C12,C11,7), F_REG_WRITE),
4941 SR_GIC ("icc_iar1_el1", CPENC (3,0,C12,C12,0), F_REG_READ),
4942 SR_GIC ("icc_eoir1_el1", CPENC (3,0,C12,C12,1), F_REG_WRITE),
4943 SR_GIC ("icc_hppir1_el1", CPENC (3,0,C12,C12,2), F_REG_READ),
4944 SR_GIC ("icc_bpr1_el1", CPENC (3,0,C12,C12,3), 0),
4945 SR_GIC ("icc_ctlr_el1", CPENC (3,0,C12,C12,4), 0),
4946 SR_GIC ("icc_igrpen0_el1", CPENC (3,0,C12,C12,6), 0),
4947 SR_GIC ("icc_igrpen1_el1", CPENC (3,0,C12,C12,7), 0),
4948 SR_GIC ("ich_ap0r0_el2", CPENC (3,4,C12,C8,0), 0),
4949 SR_GIC ("ich_ap0r1_el2", CPENC (3,4,C12,C8,1), 0),
4950 SR_GIC ("ich_ap0r2_el2", CPENC (3,4,C12,C8,2), 0),
4951 SR_GIC ("ich_ap0r3_el2", CPENC (3,4,C12,C8,3), 0),
4952 SR_GIC ("ich_ap1r0_el2", CPENC (3,4,C12,C9,0), 0),
4953 SR_GIC ("ich_ap1r1_el2", CPENC (3,4,C12,C9,1), 0),
4954 SR_GIC ("ich_ap1r2_el2", CPENC (3,4,C12,C9,2), 0),
4955 SR_GIC ("ich_ap1r3_el2", CPENC (3,4,C12,C9,3), 0),
4956 SR_GIC ("ich_hcr_el2", CPENC (3,4,C12,C11,0), 0),
4957 SR_GIC ("ich_misr_el2", CPENC (3,4,C12,C11,2), F_REG_READ),
4958 SR_GIC ("ich_eisr_el2", CPENC (3,4,C12,C11,3), F_REG_READ),
4959 SR_GIC ("ich_elrsr_el2", CPENC (3,4,C12,C11,5), F_REG_READ),
4960 SR_GIC ("ich_vmcr_el2", CPENC (3,4,C12,C11,7), 0),
4961 SR_GIC ("ich_lr0_el2", CPENC (3,4,C12,C12,0), 0),
4962 SR_GIC ("ich_lr1_el2", CPENC (3,4,C12,C12,1), 0),
4963 SR_GIC ("ich_lr2_el2", CPENC (3,4,C12,C12,2), 0),
4964 SR_GIC ("ich_lr3_el2", CPENC (3,4,C12,C12,3), 0),
4965 SR_GIC ("ich_lr4_el2", CPENC (3,4,C12,C12,4), 0),
4966 SR_GIC ("ich_lr5_el2", CPENC (3,4,C12,C12,5), 0),
4967 SR_GIC ("ich_lr6_el2", CPENC (3,4,C12,C12,6), 0),
4968 SR_GIC ("ich_lr7_el2", CPENC (3,4,C12,C12,7), 0),
4969 SR_GIC ("ich_lr8_el2", CPENC (3,4,C12,C13,0), 0),
4970 SR_GIC ("ich_lr9_el2", CPENC (3,4,C12,C13,1), 0),
4971 SR_GIC ("ich_lr10_el2", CPENC (3,4,C12,C13,2), 0),
4972 SR_GIC ("ich_lr11_el2", CPENC (3,4,C12,C13,3), 0),
4973 SR_GIC ("ich_lr12_el2", CPENC (3,4,C12,C13,4), 0),
4974 SR_GIC ("ich_lr13_el2", CPENC (3,4,C12,C13,5), 0),
4975 SR_GIC ("ich_lr14_el2", CPENC (3,4,C12,C13,6), 0),
4976 SR_GIC ("ich_lr15_el2", CPENC (3,4,C12,C13,7), 0),
4977 SR_GIC ("icc_igrpen1_el3", CPENC (3,6,C12,C12,7), 0),
4978
4979 SR_V8_6 ("amcg1idr_el0", CPENC (3,3,C13,C2,6), F_REG_READ),
4980 SR_V8_6 ("cntpctss_el0", CPENC (3,3,C14,C0,5), F_REG_READ),
4981 SR_V8_6 ("cntvctss_el0", CPENC (3,3,C14,C0,6), F_REG_READ),
4982 SR_V8_6 ("hfgrtr_el2", CPENC (3,4,C1,C1,4), 0),
4983 SR_V8_6 ("hfgwtr_el2", CPENC (3,4,C1,C1,5), 0),
4984 SR_V8_6 ("hfgitr_el2", CPENC (3,4,C1,C1,6), 0),
4985 SR_V8_6 ("hdfgrtr_el2", CPENC (3,4,C3,C1,4), 0),
4986 SR_V8_6 ("hdfgwtr_el2", CPENC (3,4,C3,C1,5), 0),
4987 SR_V8_6 ("hafgrtr_el2", CPENC (3,4,C3,C1,6), 0),
4988 SR_V8_6 ("amevcntvoff00_el2", CPENC (3,4,C13,C8,0), 0),
4989 SR_V8_6 ("amevcntvoff01_el2", CPENC (3,4,C13,C8,1), 0),
4990 SR_V8_6 ("amevcntvoff02_el2", CPENC (3,4,C13,C8,2), 0),
4991 SR_V8_6 ("amevcntvoff03_el2", CPENC (3,4,C13,C8,3), 0),
4992 SR_V8_6 ("amevcntvoff04_el2", CPENC (3,4,C13,C8,4), 0),
4993 SR_V8_6 ("amevcntvoff05_el2", CPENC (3,4,C13,C8,5), 0),
4994 SR_V8_6 ("amevcntvoff06_el2", CPENC (3,4,C13,C8,6), 0),
4995 SR_V8_6 ("amevcntvoff07_el2", CPENC (3,4,C13,C8,7), 0),
4996 SR_V8_6 ("amevcntvoff08_el2", CPENC (3,4,C13,C9,0), 0),
4997 SR_V8_6 ("amevcntvoff09_el2", CPENC (3,4,C13,C9,1), 0),
4998 SR_V8_6 ("amevcntvoff010_el2", CPENC (3,4,C13,C9,2), 0),
4999 SR_V8_6 ("amevcntvoff011_el2", CPENC (3,4,C13,C9,3), 0),
5000 SR_V8_6 ("amevcntvoff012_el2", CPENC (3,4,C13,C9,4), 0),
5001 SR_V8_6 ("amevcntvoff013_el2", CPENC (3,4,C13,C9,5), 0),
5002 SR_V8_6 ("amevcntvoff014_el2", CPENC (3,4,C13,C9,6), 0),
5003 SR_V8_6 ("amevcntvoff015_el2", CPENC (3,4,C13,C9,7), 0),
5004 SR_V8_6 ("amevcntvoff10_el2", CPENC (3,4,C13,C10,0), 0),
5005 SR_V8_6 ("amevcntvoff11_el2", CPENC (3,4,C13,C10,1), 0),
5006 SR_V8_6 ("amevcntvoff12_el2", CPENC (3,4,C13,C10,2), 0),
5007 SR_V8_6 ("amevcntvoff13_el2", CPENC (3,4,C13,C10,3), 0),
5008 SR_V8_6 ("amevcntvoff14_el2", CPENC (3,4,C13,C10,4), 0),
5009 SR_V8_6 ("amevcntvoff15_el2", CPENC (3,4,C13,C10,5), 0),
5010 SR_V8_6 ("amevcntvoff16_el2", CPENC (3,4,C13,C10,6), 0),
5011 SR_V8_6 ("amevcntvoff17_el2", CPENC (3,4,C13,C10,7), 0),
5012 SR_V8_6 ("amevcntvoff18_el2", CPENC (3,4,C13,C11,0), 0),
5013 SR_V8_6 ("amevcntvoff19_el2", CPENC (3,4,C13,C11,1), 0),
5014 SR_V8_6 ("amevcntvoff110_el2", CPENC (3,4,C13,C11,2), 0),
5015 SR_V8_6 ("amevcntvoff111_el2", CPENC (3,4,C13,C11,3), 0),
5016 SR_V8_6 ("amevcntvoff112_el2", CPENC (3,4,C13,C11,4), 0),
5017 SR_V8_6 ("amevcntvoff113_el2", CPENC (3,4,C13,C11,5), 0),
5018 SR_V8_6 ("amevcntvoff114_el2", CPENC (3,4,C13,C11,6), 0),
5019 SR_V8_6 ("amevcntvoff115_el2", CPENC (3,4,C13,C11,7), 0),
5020 SR_V8_6 ("cntpoff_el2", CPENC (3,4,C14,C0,6), 0),
5021
5022 SR_V8_7 ("pmsnevfr_el1", CPENC (3,0,C9,C9,1), 0),
5023 SR_V8_7 ("hcrx_el2", CPENC (3,4,C1,C2,2), 0),
5024
5025 SR_V8_8 ("allint", CPENC (3,0,C4,C3,0), 0),
5026 SR_V8_8 ("icc_nmiar1_el1", CPENC (3,0,C12,C9,5), F_REG_READ),
5027
5028 { 0, CPENC (0,0,0,0,0), 0, 0 }
5029 };
5030
5031 bool
aarch64_sys_reg_deprecated_p(const uint32_t reg_flags)5032 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
5033 {
5034 return (reg_flags & F_DEPRECATED) != 0;
5035 }
5036
5037 /* The CPENC below is fairly misleading, the fields
5038 here are not in CPENC form. They are in op2op1 form. The fields are encoded
5039 by ins_pstatefield, which just shifts the value by the width of the fields
5040 in a loop. So if you CPENC them only the first value will be set, the rest
5041 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5042 value of 0b110000000001000000 (0x30040) while what you want is
5043 0b011010 (0x1a). */
5044 const aarch64_sys_reg aarch64_pstatefields [] =
5045 {
5046 SR_CORE ("spsel", 0x05, F_REG_MAX_VALUE (1)),
5047 SR_CORE ("daifset", 0x1e, F_REG_MAX_VALUE (15)),
5048 SR_CORE ("daifclr", 0x1f, F_REG_MAX_VALUE (15)),
5049 SR_PAN ("pan", 0x04, F_REG_MAX_VALUE (1)),
5050 SR_V8_2 ("uao", 0x03, F_REG_MAX_VALUE (1)),
5051 SR_SSBS ("ssbs", 0x19, F_REG_MAX_VALUE (1)),
5052 SR_V8_4 ("dit", 0x1a, F_REG_MAX_VALUE (1)),
5053 SR_MEMTAG ("tco", 0x1c, F_REG_MAX_VALUE (1)),
5054 SR_SME ("svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)
5055 | F_REG_MAX_VALUE (1)),
5056 SR_SME ("svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)
5057 | F_REG_MAX_VALUE (1)),
5058 SR_SME ("svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)
5059 | F_REG_MAX_VALUE (1)),
5060 SR_V8_8 ("allint", 0x08, F_REG_MAX_VALUE (1)),
5061 { 0, CPENC (0,0,0,0,0), 0, 0 },
5062 };
5063
5064 bool
aarch64_pstatefield_supported_p(const aarch64_feature_set features,const aarch64_sys_reg * reg)5065 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5066 const aarch64_sys_reg *reg)
5067 {
5068 if (!(reg->flags & F_ARCHEXT))
5069 return true;
5070
5071 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5072 }
5073
5074 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5075 {
5076 { "ialluis", CPENS(0,C7,C1,0), 0 },
5077 { "iallu", CPENS(0,C7,C5,0), 0 },
5078 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
5079 { 0, CPENS(0,0,0,0), 0 }
5080 };
5081
5082 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5083 {
5084 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
5085 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
5086 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
5087 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
5088 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
5089 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
5090 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
5091 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
5092 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
5093 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
5094 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
5095 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
5096 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
5097 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
5098 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
5099 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
5100 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
5101 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
5102 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
5103 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
5104 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
5105 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
5106 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
5107 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
5108 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
5109 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
5110 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
5111 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
5112 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
5113 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
5114 { 0, CPENS(0,0,0,0), 0 }
5115 };
5116
5117 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5118 {
5119 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
5120 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
5121 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
5122 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
5123 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
5124 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
5125 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
5126 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
5127 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
5128 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
5129 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
5130 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
5131 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
5132 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
5133 { 0, CPENS(0,0,0,0), 0 }
5134 };
5135
5136 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5137 {
5138 { "vmalle1", CPENS(0,C8,C7,0), 0 },
5139 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
5140 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
5141 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
5142 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
5143 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
5144 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
5145 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
5146 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
5147 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
5148 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
5149 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
5150 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
5151 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
5152 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
5153 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
5154 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
5155 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
5156 { "alle2", CPENS(4,C8,C7,0), 0 },
5157 { "alle2is", CPENS(4,C8,C3,0), 0 },
5158 { "alle1", CPENS(4,C8,C7,4), 0 },
5159 { "alle1is", CPENS(4,C8,C3,4), 0 },
5160 { "alle3", CPENS(6,C8,C7,0), 0 },
5161 { "alle3is", CPENS(6,C8,C3,0), 0 },
5162 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
5163 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
5164 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
5165 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
5166 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
5167 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
5168 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
5169 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
5170
5171 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
5172 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
5173 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
5174 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
5175 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
5176 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
5177 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
5178 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
5179 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
5180 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
5181 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
5182 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
5183 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
5184 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
5185 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
5186 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
5187
5188 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
5189 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
5190 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5191 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5192 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5193 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5194 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5195 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5196 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5197 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5198 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5199 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5200 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5201 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5202 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5203 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5204 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5205 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5206 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5207 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5208 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5209 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5210 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5211 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5212 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5213 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5214 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5215 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5216 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5217 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5218
5219 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
5220 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
5221 { "paallos", CPENS (6, C8, C1, 4), 0},
5222 { "paall", CPENS (6, C8, C7, 4), 0},
5223
5224 { 0, CPENS(0,0,0,0), 0 }
5225 };
5226
5227 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5228 {
5229 /* RCTX is somewhat unique in a way that it has different values
5230 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5231 Thus op2 is masked out and instead encoded directly in the
5232 aarch64_opcode_table entries for the respective instructions. */
5233 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5234
5235 { 0, CPENS(0,0,0,0), 0 }
5236 };
5237
5238 bool
aarch64_sys_ins_reg_has_xt(const aarch64_sys_ins_reg * sys_ins_reg)5239 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5240 {
5241 return (sys_ins_reg->flags & F_HASXT) != 0;
5242 }
5243
5244 extern bool
aarch64_sys_ins_reg_supported_p(const aarch64_feature_set features,const char * reg_name,aarch64_insn reg_value,uint32_t reg_flags,aarch64_feature_set reg_features)5245 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5246 const char *reg_name,
5247 aarch64_insn reg_value,
5248 uint32_t reg_flags,
5249 aarch64_feature_set reg_features)
5250 {
5251 /* Armv8-R has no EL3. */
5252 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5253 {
5254 const char *suffix = strrchr (reg_name, '_');
5255 if (suffix && !strcmp (suffix, "_el3"))
5256 return false;
5257 }
5258
5259 if (!(reg_flags & F_ARCHEXT))
5260 return true;
5261
5262 if (reg_features
5263 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5264 return true;
5265
5266 /* ARMv8.4 TLB instructions. */
5267 if ((reg_value == CPENS (0, C8, C1, 0)
5268 || reg_value == CPENS (0, C8, C1, 1)
5269 || reg_value == CPENS (0, C8, C1, 2)
5270 || reg_value == CPENS (0, C8, C1, 3)
5271 || reg_value == CPENS (0, C8, C1, 5)
5272 || reg_value == CPENS (0, C8, C1, 7)
5273 || reg_value == CPENS (4, C8, C4, 0)
5274 || reg_value == CPENS (4, C8, C4, 4)
5275 || reg_value == CPENS (4, C8, C1, 1)
5276 || reg_value == CPENS (4, C8, C1, 5)
5277 || reg_value == CPENS (4, C8, C1, 6)
5278 || reg_value == CPENS (6, C8, C1, 1)
5279 || reg_value == CPENS (6, C8, C1, 5)
5280 || reg_value == CPENS (4, C8, C1, 0)
5281 || reg_value == CPENS (4, C8, C1, 4)
5282 || reg_value == CPENS (6, C8, C1, 0)
5283 || reg_value == CPENS (0, C8, C6, 1)
5284 || reg_value == CPENS (0, C8, C6, 3)
5285 || reg_value == CPENS (0, C8, C6, 5)
5286 || reg_value == CPENS (0, C8, C6, 7)
5287 || reg_value == CPENS (0, C8, C2, 1)
5288 || reg_value == CPENS (0, C8, C2, 3)
5289 || reg_value == CPENS (0, C8, C2, 5)
5290 || reg_value == CPENS (0, C8, C2, 7)
5291 || reg_value == CPENS (0, C8, C5, 1)
5292 || reg_value == CPENS (0, C8, C5, 3)
5293 || reg_value == CPENS (0, C8, C5, 5)
5294 || reg_value == CPENS (0, C8, C5, 7)
5295 || reg_value == CPENS (4, C8, C0, 2)
5296 || reg_value == CPENS (4, C8, C0, 6)
5297 || reg_value == CPENS (4, C8, C4, 2)
5298 || reg_value == CPENS (4, C8, C4, 6)
5299 || reg_value == CPENS (4, C8, C4, 3)
5300 || reg_value == CPENS (4, C8, C4, 7)
5301 || reg_value == CPENS (4, C8, C6, 1)
5302 || reg_value == CPENS (4, C8, C6, 5)
5303 || reg_value == CPENS (4, C8, C2, 1)
5304 || reg_value == CPENS (4, C8, C2, 5)
5305 || reg_value == CPENS (4, C8, C5, 1)
5306 || reg_value == CPENS (4, C8, C5, 5)
5307 || reg_value == CPENS (6, C8, C6, 1)
5308 || reg_value == CPENS (6, C8, C6, 5)
5309 || reg_value == CPENS (6, C8, C2, 1)
5310 || reg_value == CPENS (6, C8, C2, 5)
5311 || reg_value == CPENS (6, C8, C5, 1)
5312 || reg_value == CPENS (6, C8, C5, 5))
5313 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5314 return true;
5315
5316 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5317 if (reg_value == CPENS (3, C7, C12, 1)
5318 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5319 return true;
5320
5321 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5322 if (reg_value == CPENS (3, C7, C13, 1)
5323 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5324 return true;
5325
5326 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5327 if ((reg_value == CPENS (0, C7, C6, 3)
5328 || reg_value == CPENS (0, C7, C6, 4)
5329 || reg_value == CPENS (0, C7, C10, 4)
5330 || reg_value == CPENS (0, C7, C14, 4)
5331 || reg_value == CPENS (3, C7, C10, 3)
5332 || reg_value == CPENS (3, C7, C12, 3)
5333 || reg_value == CPENS (3, C7, C13, 3)
5334 || reg_value == CPENS (3, C7, C14, 3)
5335 || reg_value == CPENS (3, C7, C4, 3)
5336 || reg_value == CPENS (0, C7, C6, 5)
5337 || reg_value == CPENS (0, C7, C6, 6)
5338 || reg_value == CPENS (0, C7, C10, 6)
5339 || reg_value == CPENS (0, C7, C14, 6)
5340 || reg_value == CPENS (3, C7, C10, 5)
5341 || reg_value == CPENS (3, C7, C12, 5)
5342 || reg_value == CPENS (3, C7, C13, 5)
5343 || reg_value == CPENS (3, C7, C14, 5)
5344 || reg_value == CPENS (3, C7, C4, 4))
5345 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5346 return true;
5347
5348 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5349 if ((reg_value == CPENS (0, C7, C9, 0)
5350 || reg_value == CPENS (0, C7, C9, 1))
5351 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5352 return true;
5353
5354 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5355 if (reg_value == CPENS (3, C7, C3, 0)
5356 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5357 return true;
5358
5359 return false;
5360 }
5361
5362 #undef C0
5363 #undef C1
5364 #undef C2
5365 #undef C3
5366 #undef C4
5367 #undef C5
5368 #undef C6
5369 #undef C7
5370 #undef C8
5371 #undef C9
5372 #undef C10
5373 #undef C11
5374 #undef C12
5375 #undef C13
5376 #undef C14
5377 #undef C15
5378
5379 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5380 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5381
5382 static enum err_type
verify_ldpsw(const struct aarch64_inst * inst ATTRIBUTE_UNUSED,const aarch64_insn insn,bfd_vma pc ATTRIBUTE_UNUSED,bool encoding ATTRIBUTE_UNUSED,aarch64_operand_error * mismatch_detail ATTRIBUTE_UNUSED,aarch64_instr_sequence * insn_sequence ATTRIBUTE_UNUSED)5383 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5384 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5385 bool encoding ATTRIBUTE_UNUSED,
5386 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5387 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5388 {
5389 int t = BITS (insn, 4, 0);
5390 int n = BITS (insn, 9, 5);
5391 int t2 = BITS (insn, 14, 10);
5392
5393 if (BIT (insn, 23))
5394 {
5395 /* Write back enabled. */
5396 if ((t == n || t2 == n) && n != 31)
5397 return ERR_UND;
5398 }
5399
5400 if (BIT (insn, 22))
5401 {
5402 /* Load */
5403 if (t == t2)
5404 return ERR_UND;
5405 }
5406
5407 return ERR_OK;
5408 }
5409
5410 /* Verifier for vector by element 3 operands functions where the
5411 conditions `if sz:L == 11 then UNDEFINED` holds. */
5412
5413 static enum err_type
verify_elem_sd(const struct aarch64_inst * inst,const aarch64_insn insn,bfd_vma pc ATTRIBUTE_UNUSED,bool encoding,aarch64_operand_error * mismatch_detail ATTRIBUTE_UNUSED,aarch64_instr_sequence * insn_sequence ATTRIBUTE_UNUSED)5414 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5415 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5416 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5417 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5418 {
5419 const aarch64_insn undef_pattern = 0x3;
5420 aarch64_insn value;
5421
5422 assert (inst->opcode);
5423 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5424 value = encoding ? inst->value : insn;
5425 assert (value);
5426
5427 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5428 return ERR_UND;
5429
5430 return ERR_OK;
5431 }
5432
5433 /* Check an instruction that takes three register operands and that
5434 requires the register numbers to be distinct from one another. */
5435
5436 static enum err_type
verify_three_different_regs(const struct aarch64_inst * inst,const aarch64_insn insn ATTRIBUTE_UNUSED,bfd_vma pc ATTRIBUTE_UNUSED,bool encoding ATTRIBUTE_UNUSED,aarch64_operand_error * mismatch_detail ATTRIBUTE_UNUSED,aarch64_instr_sequence * insn_sequence ATTRIBUTE_UNUSED)5437 verify_three_different_regs (const struct aarch64_inst *inst,
5438 const aarch64_insn insn ATTRIBUTE_UNUSED,
5439 bfd_vma pc ATTRIBUTE_UNUSED,
5440 bool encoding ATTRIBUTE_UNUSED,
5441 aarch64_operand_error *mismatch_detail
5442 ATTRIBUTE_UNUSED,
5443 aarch64_instr_sequence *insn_sequence
5444 ATTRIBUTE_UNUSED)
5445 {
5446 int rd, rs, rn;
5447
5448 rd = inst->operands[0].reg.regno;
5449 rs = inst->operands[1].reg.regno;
5450 rn = inst->operands[2].reg.regno;
5451 if (rd == rs || rd == rn || rs == rn)
5452 {
5453 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5454 mismatch_detail->error
5455 = _("the three register operands must be distinct from one another");
5456 mismatch_detail->index = -1;
5457 return ERR_UND;
5458 }
5459
5460 return ERR_OK;
5461 }
5462
5463 /* Add INST to the end of INSN_SEQUENCE. */
5464
5465 static void
add_insn_to_sequence(const struct aarch64_inst * inst,aarch64_instr_sequence * insn_sequence)5466 add_insn_to_sequence (const struct aarch64_inst *inst,
5467 aarch64_instr_sequence *insn_sequence)
5468 {
5469 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5470 }
5471
5472 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5473 If INST is NULL the given insn_sequence is cleared and the sequence is left
5474 uninitialized. */
5475
5476 void
init_insn_sequence(const struct aarch64_inst * inst,aarch64_instr_sequence * insn_sequence)5477 init_insn_sequence (const struct aarch64_inst *inst,
5478 aarch64_instr_sequence *insn_sequence)
5479 {
5480 int num_req_entries = 0;
5481
5482 if (insn_sequence->instr)
5483 {
5484 XDELETE (insn_sequence->instr);
5485 insn_sequence->instr = NULL;
5486 }
5487
5488 /* Handle all the cases here. May need to think of something smarter than
5489 a giant if/else chain if this grows. At that time, a lookup table may be
5490 best. */
5491 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5492 num_req_entries = 1;
5493 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5494 num_req_entries = 2;
5495
5496 insn_sequence->num_added_insns = 0;
5497 insn_sequence->num_allocated_insns = num_req_entries;
5498
5499 if (num_req_entries != 0)
5500 {
5501 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5502 add_insn_to_sequence (inst, insn_sequence);
5503 }
5504 }
5505
5506 /* Subroutine of verify_constraints. Check whether the instruction
5507 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5508 expectations are met. Return true if the check passes, otherwise
5509 describe the problem in MISMATCH_DETAIL.
5510
5511 IS_NEW_SECTION is true if INST is assumed to start a new section.
5512 The other arguments are as for verify_constraints. */
5513
5514 static bool
verify_mops_pme_sequence(const struct aarch64_inst * inst,bool is_new_section,aarch64_operand_error * mismatch_detail,aarch64_instr_sequence * insn_sequence)5515 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5516 bool is_new_section,
5517 aarch64_operand_error *mismatch_detail,
5518 aarch64_instr_sequence *insn_sequence)
5519 {
5520 const struct aarch64_opcode *opcode;
5521 const struct aarch64_inst *prev_insn;
5522 int i;
5523
5524 opcode = inst->opcode;
5525 if (insn_sequence->instr)
5526 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5527 else
5528 prev_insn = NULL;
5529
5530 if (prev_insn
5531 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5532 && prev_insn->opcode != opcode - 1)
5533 {
5534 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5535 mismatch_detail->error = NULL;
5536 mismatch_detail->index = -1;
5537 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5538 mismatch_detail->data[1].s = prev_insn->opcode->name;
5539 mismatch_detail->non_fatal = true;
5540 return false;
5541 }
5542
5543 if (opcode->constraints & C_SCAN_MOPS_PME)
5544 {
5545 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5546 {
5547 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5548 mismatch_detail->error = NULL;
5549 mismatch_detail->index = -1;
5550 mismatch_detail->data[0].s = opcode->name;
5551 mismatch_detail->data[1].s = opcode[-1].name;
5552 mismatch_detail->non_fatal = true;
5553 return false;
5554 }
5555
5556 for (i = 0; i < 3; ++i)
5557 /* There's no specific requirement for the data register to be
5558 the same between consecutive SET* instructions. */
5559 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5560 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5561 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5562 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5563 {
5564 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5565 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5566 mismatch_detail->error = _("destination register differs from "
5567 "preceding instruction");
5568 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5569 mismatch_detail->error = _("source register differs from "
5570 "preceding instruction");
5571 else
5572 mismatch_detail->error = _("size register differs from "
5573 "preceding instruction");
5574 mismatch_detail->index = i;
5575 mismatch_detail->non_fatal = true;
5576 return false;
5577 }
5578 }
5579
5580 return true;
5581 }
5582
5583 /* This function verifies that the instruction INST adheres to its specified
5584 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5585 returned and MISMATCH_DETAIL contains the reason why verification failed.
5586
5587 The function is called both during assembly and disassembly. If assembling
5588 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5589 and will contain the PC of the current instruction w.r.t to the section.
5590
5591 If ENCODING and PC=0 then you are at a start of a section. The constraints
5592 are verified against the given state insn_sequence which is updated as it
5593 transitions through the verification. */
5594
5595 enum err_type
verify_constraints(const struct aarch64_inst * inst,const aarch64_insn insn ATTRIBUTE_UNUSED,bfd_vma pc,bool encoding,aarch64_operand_error * mismatch_detail,aarch64_instr_sequence * insn_sequence)5596 verify_constraints (const struct aarch64_inst *inst,
5597 const aarch64_insn insn ATTRIBUTE_UNUSED,
5598 bfd_vma pc,
5599 bool encoding,
5600 aarch64_operand_error *mismatch_detail,
5601 aarch64_instr_sequence *insn_sequence)
5602 {
5603 assert (inst);
5604 assert (inst->opcode);
5605
5606 const struct aarch64_opcode *opcode = inst->opcode;
5607 if (!opcode->constraints && !insn_sequence->instr)
5608 return ERR_OK;
5609
5610 assert (insn_sequence);
5611
5612 enum err_type res = ERR_OK;
5613
5614 /* This instruction puts a constraint on the insn_sequence. */
5615 if (opcode->flags & F_SCAN)
5616 {
5617 if (insn_sequence->instr)
5618 {
5619 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5620 mismatch_detail->error = _("instruction opens new dependency "
5621 "sequence without ending previous one");
5622 mismatch_detail->index = -1;
5623 mismatch_detail->non_fatal = true;
5624 res = ERR_VFI;
5625 }
5626
5627 init_insn_sequence (inst, insn_sequence);
5628 return res;
5629 }
5630
5631 bool is_new_section = (!encoding && pc == 0);
5632 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5633 insn_sequence))
5634 {
5635 res = ERR_VFI;
5636 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5637 init_insn_sequence (NULL, insn_sequence);
5638 }
5639
5640 /* Verify constraints on an existing sequence. */
5641 if (insn_sequence->instr)
5642 {
5643 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5644 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5645 closed a previous one that we should have. */
5646 if (is_new_section && res == ERR_OK)
5647 {
5648 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5649 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5650 mismatch_detail->index = -1;
5651 mismatch_detail->non_fatal = true;
5652 res = ERR_VFI;
5653 /* Reset the sequence. */
5654 init_insn_sequence (NULL, insn_sequence);
5655 return res;
5656 }
5657
5658 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5659 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5660 {
5661 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5662 instruction for better error messages. */
5663 if (!opcode->avariant
5664 || !(*opcode->avariant &
5665 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5666 {
5667 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5668 mismatch_detail->error = _("SVE instruction expected after "
5669 "`movprfx'");
5670 mismatch_detail->index = -1;
5671 mismatch_detail->non_fatal = true;
5672 res = ERR_VFI;
5673 goto done;
5674 }
5675
5676 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5677 instruction that is allowed to be used with a MOVPRFX. */
5678 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5679 {
5680 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5681 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5682 "expected");
5683 mismatch_detail->index = -1;
5684 mismatch_detail->non_fatal = true;
5685 res = ERR_VFI;
5686 goto done;
5687 }
5688
5689 /* Next check for usage of the predicate register. */
5690 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5691 aarch64_opnd_info blk_pred, inst_pred;
5692 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5693 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5694 bool predicated = false;
5695 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5696
5697 /* Determine if the movprfx instruction used is predicated or not. */
5698 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5699 {
5700 predicated = true;
5701 blk_pred = insn_sequence->instr->operands[1];
5702 }
5703
5704 unsigned char max_elem_size = 0;
5705 unsigned char current_elem_size;
5706 int num_op_used = 0, last_op_usage = 0;
5707 int i, inst_pred_idx = -1;
5708 int num_ops = aarch64_num_of_operands (opcode);
5709 for (i = 0; i < num_ops; i++)
5710 {
5711 aarch64_opnd_info inst_op = inst->operands[i];
5712 switch (inst_op.type)
5713 {
5714 case AARCH64_OPND_SVE_Zd:
5715 case AARCH64_OPND_SVE_Zm_5:
5716 case AARCH64_OPND_SVE_Zm_16:
5717 case AARCH64_OPND_SVE_Zn:
5718 case AARCH64_OPND_SVE_Zt:
5719 case AARCH64_OPND_SVE_Vm:
5720 case AARCH64_OPND_SVE_Vn:
5721 case AARCH64_OPND_Va:
5722 case AARCH64_OPND_Vn:
5723 case AARCH64_OPND_Vm:
5724 case AARCH64_OPND_Sn:
5725 case AARCH64_OPND_Sm:
5726 if (inst_op.reg.regno == blk_dest.reg.regno)
5727 {
5728 num_op_used++;
5729 last_op_usage = i;
5730 }
5731 current_elem_size
5732 = aarch64_get_qualifier_esize (inst_op.qualifier);
5733 if (current_elem_size > max_elem_size)
5734 max_elem_size = current_elem_size;
5735 break;
5736 case AARCH64_OPND_SVE_Pd:
5737 case AARCH64_OPND_SVE_Pg3:
5738 case AARCH64_OPND_SVE_Pg4_5:
5739 case AARCH64_OPND_SVE_Pg4_10:
5740 case AARCH64_OPND_SVE_Pg4_16:
5741 case AARCH64_OPND_SVE_Pm:
5742 case AARCH64_OPND_SVE_Pn:
5743 case AARCH64_OPND_SVE_Pt:
5744 case AARCH64_OPND_SME_Pm:
5745 inst_pred = inst_op;
5746 inst_pred_idx = i;
5747 break;
5748 default:
5749 break;
5750 }
5751 }
5752
5753 assert (max_elem_size != 0);
5754 aarch64_opnd_info inst_dest = inst->operands[0];
5755 /* Determine the size that should be used to compare against the
5756 movprfx size. */
5757 current_elem_size
5758 = opcode->constraints & C_MAX_ELEM
5759 ? max_elem_size
5760 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5761
5762 /* If movprfx is predicated do some extra checks. */
5763 if (predicated)
5764 {
5765 /* The instruction must be predicated. */
5766 if (inst_pred_idx < 0)
5767 {
5768 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5769 mismatch_detail->error = _("predicated instruction expected "
5770 "after `movprfx'");
5771 mismatch_detail->index = -1;
5772 mismatch_detail->non_fatal = true;
5773 res = ERR_VFI;
5774 goto done;
5775 }
5776
5777 /* The instruction must have a merging predicate. */
5778 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5779 {
5780 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5781 mismatch_detail->error = _("merging predicate expected due "
5782 "to preceding `movprfx'");
5783 mismatch_detail->index = inst_pred_idx;
5784 mismatch_detail->non_fatal = true;
5785 res = ERR_VFI;
5786 goto done;
5787 }
5788
5789 /* The same register must be used in instruction. */
5790 if (blk_pred.reg.regno != inst_pred.reg.regno)
5791 {
5792 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5793 mismatch_detail->error = _("predicate register differs "
5794 "from that in preceding "
5795 "`movprfx'");
5796 mismatch_detail->index = inst_pred_idx;
5797 mismatch_detail->non_fatal = true;
5798 res = ERR_VFI;
5799 goto done;
5800 }
5801 }
5802
5803 /* Destructive operations by definition must allow one usage of the
5804 same register. */
5805 int allowed_usage
5806 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5807
5808 /* Operand is not used at all. */
5809 if (num_op_used == 0)
5810 {
5811 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5812 mismatch_detail->error = _("output register of preceding "
5813 "`movprfx' not used in current "
5814 "instruction");
5815 mismatch_detail->index = 0;
5816 mismatch_detail->non_fatal = true;
5817 res = ERR_VFI;
5818 goto done;
5819 }
5820
5821 /* We now know it's used, now determine exactly where it's used. */
5822 if (blk_dest.reg.regno != inst_dest.reg.regno)
5823 {
5824 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5825 mismatch_detail->error = _("output register of preceding "
5826 "`movprfx' expected as output");
5827 mismatch_detail->index = 0;
5828 mismatch_detail->non_fatal = true;
5829 res = ERR_VFI;
5830 goto done;
5831 }
5832
5833 /* Operand used more than allowed for the specific opcode type. */
5834 if (num_op_used > allowed_usage)
5835 {
5836 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5837 mismatch_detail->error = _("output register of preceding "
5838 "`movprfx' used as input");
5839 mismatch_detail->index = last_op_usage;
5840 mismatch_detail->non_fatal = true;
5841 res = ERR_VFI;
5842 goto done;
5843 }
5844
5845 /* Now the only thing left is the qualifiers checks. The register
5846 must have the same maximum element size. */
5847 if (inst_dest.qualifier
5848 && blk_dest.qualifier
5849 && current_elem_size
5850 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5851 {
5852 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5853 mismatch_detail->error = _("register size not compatible with "
5854 "previous `movprfx'");
5855 mismatch_detail->index = 0;
5856 mismatch_detail->non_fatal = true;
5857 res = ERR_VFI;
5858 goto done;
5859 }
5860 }
5861
5862 done:
5863 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
5864 /* We've checked the last instruction in the sequence and so
5865 don't need the sequence any more. */
5866 init_insn_sequence (NULL, insn_sequence);
5867 else
5868 add_insn_to_sequence (inst, insn_sequence);
5869 }
5870
5871 return res;
5872 }
5873
5874
5875 /* Return true if VALUE cannot be moved into an SVE register using DUP
5876 (with any element size, not just ESIZE) and if using DUPM would
5877 therefore be OK. ESIZE is the number of bytes in the immediate. */
5878
5879 bool
aarch64_sve_dupm_mov_immediate_p(uint64_t uvalue,int esize)5880 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5881 {
5882 int64_t svalue = uvalue;
5883 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5884
5885 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5886 return false;
5887 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5888 {
5889 svalue = (int32_t) uvalue;
5890 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5891 {
5892 svalue = (int16_t) uvalue;
5893 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5894 return false;
5895 }
5896 }
5897 if ((svalue & 0xff) == 0)
5898 svalue /= 256;
5899 return svalue < -128 || svalue >= 128;
5900 }
5901
5902 /* Include the opcode description table as well as the operand description
5903 table. */
5904 #define VERIFIER(x) verify_##x
5905 #include "aarch64-tbl.h"
5906