1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
26
27 /* Utilities. */
28
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
32
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
38
39 static inline void
insert_fields(aarch64_insn * code,aarch64_insn value,aarch64_insn mask,...)40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 {
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
46
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
51 {
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
56 }
57 va_end (va);
58 }
59
60 /* Insert a raw field value VALUE into all fields in SELF->fields after START.
61 The least significant bit goes in the final field. */
62
63 static void
insert_all_fields_after(const aarch64_operand * self,unsigned int start,aarch64_insn * code,aarch64_insn value)64 insert_all_fields_after (const aarch64_operand *self, unsigned int start,
65 aarch64_insn *code, aarch64_insn value)
66 {
67 unsigned int i;
68 enum aarch64_field_kind kind;
69
70 for (i = ARRAY_SIZE (self->fields); i-- > start; )
71 if (self->fields[i] != FLD_NIL)
72 {
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
76 }
77 }
78
79 /* Insert a raw field value VALUE into all fields in SELF->fields.
80 The least significant bit goes in the final field. */
81
82 static void
insert_all_fields(const aarch64_operand * self,aarch64_insn * code,aarch64_insn value)83 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
84 aarch64_insn value)
85 {
86 return insert_all_fields_after (self, 0, code, value);
87 }
88
89 /* Operand inserters. */
90
91 /* Insert nothing. */
92 bool
aarch64_ins_none(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info ATTRIBUTE_UNUSED,aarch64_insn * code ATTRIBUTE_UNUSED,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)93 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
94 const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
95 aarch64_insn *code ATTRIBUTE_UNUSED,
96 const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98 {
99 return true;
100 }
101
102 /* Insert register number. */
103 bool
aarch64_ins_regno(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)104 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
105 aarch64_insn *code,
106 const aarch64_inst *inst ATTRIBUTE_UNUSED,
107 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
108 {
109 int val = info->reg.regno - get_operand_specific_data (self);
110 insert_field (self->fields[0], code, val, 0);
111 return true;
112 }
113
114 /* Insert register number, index and/or other data for SIMD register element
115 operand, e.g. the last source operand in
116 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
117 bool
aarch64_ins_reglane(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)118 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
119 aarch64_insn *code, const aarch64_inst *inst,
120 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
121 {
122 /* regno */
123 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
124 /* index and/or type */
125 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
126 {
127 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
128 if (info->type == AARCH64_OPND_En
129 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
130 {
131 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
132 assert (info->idx == 1); /* Vn */
133 aarch64_insn value = info->reglane.index << pos;
134 insert_field (FLD_imm4_11, code, value, 0);
135 }
136 else
137 {
138 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
139 imm5<3:0> <V>
140 0000 RESERVED
141 xxx1 B
142 xx10 H
143 x100 S
144 1000 D */
145 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
146 insert_field (FLD_imm5, code, value, 0);
147 }
148 }
149 else if (inst->opcode->iclass == dotproduct)
150 {
151 unsigned reglane_index = info->reglane.index;
152 switch (info->qualifier)
153 {
154 case AARCH64_OPND_QLF_S_4B:
155 case AARCH64_OPND_QLF_S_2H:
156 /* L:H */
157 assert (reglane_index < 4);
158 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
159 break;
160 default:
161 return false;
162 }
163 }
164 else if (inst->opcode->iclass == cryptosm3)
165 {
166 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
167 unsigned reglane_index = info->reglane.index;
168 assert (reglane_index < 4);
169 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
170 }
171 else
172 {
173 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
174 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
175 unsigned reglane_index = info->reglane.index;
176
177 if (inst->opcode->op == OP_FCMLA_ELEM)
178 /* Complex operand takes two elements. */
179 reglane_index *= 2;
180
181 switch (info->qualifier)
182 {
183 case AARCH64_OPND_QLF_S_H:
184 /* H:L:M */
185 assert (reglane_index < 8);
186 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
187 break;
188 case AARCH64_OPND_QLF_S_S:
189 /* H:L */
190 assert (reglane_index < 4);
191 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
192 break;
193 case AARCH64_OPND_QLF_S_D:
194 /* H */
195 assert (reglane_index < 2);
196 insert_field (FLD_H, code, reglane_index, 0);
197 break;
198 default:
199 return false;
200 }
201 }
202 return true;
203 }
204
205 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
206 bool
aarch64_ins_reglist(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)207 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
208 aarch64_insn *code,
209 const aarch64_inst *inst ATTRIBUTE_UNUSED,
210 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
211 {
212 /* R */
213 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
214 /* len */
215 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
216 return true;
217 }
218
219 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
220 in AdvSIMD load/store instructions. */
221 bool
aarch64_ins_ldst_reglist(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)222 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
223 const aarch64_opnd_info *info, aarch64_insn *code,
224 const aarch64_inst *inst,
225 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
226 {
227 aarch64_insn value = 0;
228 /* Number of elements in each structure to be loaded/stored. */
229 unsigned num = get_opcode_dependent_value (inst->opcode);
230
231 /* Rt */
232 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
233 /* opcode */
234 switch (num)
235 {
236 case 1:
237 switch (info->reglist.num_regs)
238 {
239 case 1: value = 0x7; break;
240 case 2: value = 0xa; break;
241 case 3: value = 0x6; break;
242 case 4: value = 0x2; break;
243 default: return false;
244 }
245 break;
246 case 2:
247 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
248 break;
249 case 3:
250 value = 0x4;
251 break;
252 case 4:
253 value = 0x0;
254 break;
255 default:
256 return false;
257 }
258 insert_field (FLD_opcode, code, value, 0);
259
260 return true;
261 }
262
263 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
264 single structure to all lanes instructions. */
265 bool
aarch64_ins_ldst_reglist_r(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)266 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
267 const aarch64_opnd_info *info, aarch64_insn *code,
268 const aarch64_inst *inst,
269 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
270 {
271 aarch64_insn value;
272 /* The opcode dependent area stores the number of elements in
273 each structure to be loaded/stored. */
274 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
275
276 /* Rt */
277 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
278 /* S */
279 value = (aarch64_insn) 0;
280 if (is_ld1r && info->reglist.num_regs == 2)
281 /* OP_LD1R does not have alternating variant, but have "two consecutive"
282 instead. */
283 value = (aarch64_insn) 1;
284 insert_field (FLD_S, code, value, 0);
285
286 return true;
287 }
288
289 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
290 operand e.g. Vt in AdvSIMD load/store single element instructions. */
291 bool
aarch64_ins_ldst_elemlist(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)292 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
293 const aarch64_opnd_info *info, aarch64_insn *code,
294 const aarch64_inst *inst ATTRIBUTE_UNUSED,
295 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
296 {
297 aarch64_field field = {0, 0};
298 aarch64_insn QSsize = 0; /* fields Q:S:size. */
299 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
300
301 assert (info->reglist.has_index);
302
303 /* Rt */
304 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
305 /* Encode the index, opcode<2:1> and size. */
306 switch (info->qualifier)
307 {
308 case AARCH64_OPND_QLF_S_B:
309 /* Index encoded in "Q:S:size". */
310 QSsize = info->reglist.index;
311 opcodeh2 = 0x0;
312 break;
313 case AARCH64_OPND_QLF_S_H:
314 /* Index encoded in "Q:S:size<1>". */
315 QSsize = info->reglist.index << 1;
316 opcodeh2 = 0x1;
317 break;
318 case AARCH64_OPND_QLF_S_S:
319 /* Index encoded in "Q:S". */
320 QSsize = info->reglist.index << 2;
321 opcodeh2 = 0x2;
322 break;
323 case AARCH64_OPND_QLF_S_D:
324 /* Index encoded in "Q". */
325 QSsize = info->reglist.index << 3 | 0x1;
326 opcodeh2 = 0x2;
327 break;
328 default:
329 return false;
330 }
331 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
332 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
333 insert_field_2 (&field, code, opcodeh2, 0);
334
335 return true;
336 }
337
338 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
339 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
340 or SSHR <V><d>, <V><n>, #<shift>. */
341 bool
aarch64_ins_advsimd_imm_shift(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)342 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
343 const aarch64_opnd_info *info,
344 aarch64_insn *code, const aarch64_inst *inst,
345 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
346 {
347 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
348 aarch64_insn Q, imm;
349
350 if (inst->opcode->iclass == asimdshf)
351 {
352 /* Q
353 immh Q <T>
354 0000 x SEE AdvSIMD modified immediate
355 0001 0 8B
356 0001 1 16B
357 001x 0 4H
358 001x 1 8H
359 01xx 0 2S
360 01xx 1 4S
361 1xxx 0 RESERVED
362 1xxx 1 2D */
363 Q = (val & 0x1) ? 1 : 0;
364 insert_field (FLD_Q, code, Q, inst->opcode->mask);
365 val >>= 1;
366 }
367
368 assert (info->type == AARCH64_OPND_IMM_VLSR
369 || info->type == AARCH64_OPND_IMM_VLSL);
370
371 if (info->type == AARCH64_OPND_IMM_VLSR)
372 /* immh:immb
373 immh <shift>
374 0000 SEE AdvSIMD modified immediate
375 0001 (16-UInt(immh:immb))
376 001x (32-UInt(immh:immb))
377 01xx (64-UInt(immh:immb))
378 1xxx (128-UInt(immh:immb)) */
379 imm = (16 << (unsigned)val) - info->imm.value;
380 else
381 /* immh:immb
382 immh <shift>
383 0000 SEE AdvSIMD modified immediate
384 0001 (UInt(immh:immb)-8)
385 001x (UInt(immh:immb)-16)
386 01xx (UInt(immh:immb)-32)
387 1xxx (UInt(immh:immb)-64) */
388 imm = info->imm.value + (8 << (unsigned)val);
389 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
390
391 return true;
392 }
393
394 /* Insert fields for e.g. the immediate operands in
395 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
396 bool
aarch64_ins_imm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)397 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
398 aarch64_insn *code,
399 const aarch64_inst *inst ATTRIBUTE_UNUSED,
400 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
401 {
402 int64_t imm;
403
404 imm = info->imm.value;
405 if (operand_need_shift_by_two (self))
406 imm >>= 2;
407 if (operand_need_shift_by_three (self))
408 imm >>= 3;
409 if (operand_need_shift_by_four (self))
410 imm >>= 4;
411 insert_all_fields (self, code, imm);
412 return true;
413 }
414
415 /* Insert immediate and its shift amount for e.g. the last operand in
416 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
417 bool
aarch64_ins_imm_half(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors)418 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
419 aarch64_insn *code, const aarch64_inst *inst,
420 aarch64_operand_error *errors)
421 {
422 /* imm16 */
423 aarch64_ins_imm (self, info, code, inst, errors);
424 /* hw */
425 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
426 return true;
427 }
428
429 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
430 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
431 bool
aarch64_ins_advsimd_imm_modified(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)432 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
433 const aarch64_opnd_info *info,
434 aarch64_insn *code,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED,
436 aarch64_operand_error *errors
437 ATTRIBUTE_UNUSED)
438 {
439 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
440 uint64_t imm = info->imm.value;
441 enum aarch64_modifier_kind kind = info->shifter.kind;
442 int amount = info->shifter.amount;
443 aarch64_field field = {0, 0};
444
445 /* a:b:c:d:e:f:g:h */
446 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
447 {
448 /* Either MOVI <Dd>, #<imm>
449 or MOVI <Vd>.2D, #<imm>.
450 <imm> is a 64-bit immediate
451 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
452 encoded in "a:b:c:d:e:f:g:h". */
453 imm = aarch64_shrink_expanded_imm8 (imm);
454 assert ((int)imm >= 0);
455 }
456 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
457
458 if (kind == AARCH64_MOD_NONE)
459 return true;
460
461 /* shift amount partially in cmode */
462 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
463 if (kind == AARCH64_MOD_LSL)
464 {
465 /* AARCH64_MOD_LSL: shift zeros. */
466 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
467 assert (esize == 4 || esize == 2 || esize == 1);
468 /* For 8-bit move immediate, the optional LSL #0 does not require
469 encoding. */
470 if (esize == 1)
471 return true;
472 amount >>= 3;
473 if (esize == 4)
474 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
475 else
476 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
477 }
478 else
479 {
480 /* AARCH64_MOD_MSL: shift ones. */
481 amount >>= 4;
482 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
483 }
484 insert_field_2 (&field, code, amount, 0);
485
486 return true;
487 }
488
489 /* Insert fields for an 8-bit floating-point immediate. */
490 bool
aarch64_ins_fpimm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)491 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
492 aarch64_insn *code,
493 const aarch64_inst *inst ATTRIBUTE_UNUSED,
494 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
495 {
496 insert_all_fields (self, code, info->imm.value);
497 return true;
498 }
499
500 /* Insert 1-bit rotation immediate (#90 or #270). */
501 bool
aarch64_ins_imm_rotate1(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)502 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
503 const aarch64_opnd_info *info,
504 aarch64_insn *code, const aarch64_inst *inst,
505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
506 {
507 uint64_t rot = (info->imm.value - 90) / 180;
508 assert (rot < 2U);
509 insert_field (self->fields[0], code, rot, inst->opcode->mask);
510 return true;
511 }
512
513 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
514 bool
aarch64_ins_imm_rotate2(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)515 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
516 const aarch64_opnd_info *info,
517 aarch64_insn *code, const aarch64_inst *inst,
518 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
519 {
520 uint64_t rot = info->imm.value / 90;
521 assert (rot < 4U);
522 insert_field (self->fields[0], code, rot, inst->opcode->mask);
523 return true;
524 }
525
526 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
527 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
528 bool
aarch64_ins_fbits(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)529 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
530 aarch64_insn *code,
531 const aarch64_inst *inst ATTRIBUTE_UNUSED,
532 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
533 {
534 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
535 return true;
536 }
537
538 /* Insert arithmetic immediate for e.g. the last operand in
539 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
540 bool
aarch64_ins_aimm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)541 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
542 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
543 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
544 {
545 /* shift */
546 aarch64_insn value = info->shifter.amount ? 1 : 0;
547 insert_field (self->fields[0], code, value, 0);
548 /* imm12 (unsigned) */
549 insert_field (self->fields[1], code, info->imm.value, 0);
550 return true;
551 }
552
553 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
554 the operand should be inverted before encoding. */
555 static bool
aarch64_ins_limm_1(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,bool invert_p,aarch64_operand_error * errors ATTRIBUTE_UNUSED)556 aarch64_ins_limm_1 (const aarch64_operand *self,
557 const aarch64_opnd_info *info, aarch64_insn *code,
558 const aarch64_inst *inst, bool invert_p,
559 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
560 {
561 bool res;
562 aarch64_insn value;
563 uint64_t imm = info->imm.value;
564 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
565
566 if (invert_p)
567 imm = ~imm;
568 /* The constraint check should guarantee that this will work. */
569 res = aarch64_logical_immediate_p (imm, esize, &value);
570 if (res)
571 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
572 self->fields[0]);
573 return res;
574 }
575
576 /* Insert logical/bitmask immediate for e.g. the last operand in
577 ORR <Wd|WSP>, <Wn>, #<imm>. */
578 bool
aarch64_ins_limm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)579 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
580 aarch64_insn *code, const aarch64_inst *inst,
581 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582 {
583 return aarch64_ins_limm_1 (self, info, code, inst,
584 inst->opcode->op == OP_BIC, errors);
585 }
586
587 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
588 bool
aarch64_ins_inv_limm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)589 aarch64_ins_inv_limm (const aarch64_operand *self,
590 const aarch64_opnd_info *info, aarch64_insn *code,
591 const aarch64_inst *inst,
592 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
593 {
594 return aarch64_ins_limm_1 (self, info, code, inst, true, errors);
595 }
596
597 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
598 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
599 bool
aarch64_ins_ft(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors)600 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
601 aarch64_insn *code, const aarch64_inst *inst,
602 aarch64_operand_error *errors)
603 {
604 aarch64_insn value = 0;
605
606 assert (info->idx == 0);
607
608 /* Rt */
609 aarch64_ins_regno (self, info, code, inst, errors);
610 if (inst->opcode->iclass == ldstpair_indexed
611 || inst->opcode->iclass == ldstnapair_offs
612 || inst->opcode->iclass == ldstpair_off
613 || inst->opcode->iclass == loadlit)
614 {
615 /* size */
616 switch (info->qualifier)
617 {
618 case AARCH64_OPND_QLF_S_S: value = 0; break;
619 case AARCH64_OPND_QLF_S_D: value = 1; break;
620 case AARCH64_OPND_QLF_S_Q: value = 2; break;
621 default: return false;
622 }
623 insert_field (FLD_ldst_size, code, value, 0);
624 }
625 else
626 {
627 /* opc[1]:size */
628 value = aarch64_get_qualifier_standard_value (info->qualifier);
629 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
630 }
631
632 return true;
633 }
634
635 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
636 bool
aarch64_ins_addr_simple(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)637 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 const aarch64_opnd_info *info, aarch64_insn *code,
639 const aarch64_inst *inst ATTRIBUTE_UNUSED,
640 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
641 {
642 /* Rn */
643 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
644 return true;
645 }
646
647 /* Encode the address operand for e.g.
648 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
649 bool
aarch64_ins_addr_regoff(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)650 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
651 const aarch64_opnd_info *info, aarch64_insn *code,
652 const aarch64_inst *inst ATTRIBUTE_UNUSED,
653 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
654 {
655 aarch64_insn S;
656 enum aarch64_modifier_kind kind = info->shifter.kind;
657
658 /* Rn */
659 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
660 /* Rm */
661 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
662 /* option */
663 if (kind == AARCH64_MOD_LSL)
664 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
665 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
666 /* S */
667 if (info->qualifier != AARCH64_OPND_QLF_S_B)
668 S = info->shifter.amount != 0;
669 else
670 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
671 S <amount>
672 0 [absent]
673 1 #0
674 Must be #0 if <extend> is explicitly LSL. */
675 S = info->shifter.operator_present && info->shifter.amount_present;
676 insert_field (FLD_S, code, S, 0);
677
678 return true;
679 }
680
681 /* Encode the address operand for e.g.
682 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
683 bool
aarch64_ins_addr_offset(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)684 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
685 const aarch64_opnd_info *info, aarch64_insn *code,
686 const aarch64_inst *inst ATTRIBUTE_UNUSED,
687 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
688 {
689 /* Rn */
690 insert_field (self->fields[0], code, info->addr.base_regno, 0);
691
692 /* simm9 */
693 int imm = info->addr.offset.imm;
694 insert_field (self->fields[1], code, imm, 0);
695
696 /* writeback */
697 if (info->addr.writeback)
698 {
699 assert (info->addr.preind == 1 && info->addr.postind == 0);
700 insert_field (self->fields[2], code, 1, 0);
701 }
702 return true;
703 }
704
705 /* Encode the address operand for e.g.
706 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
707 bool
aarch64_ins_rcpc3_addr_offset(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)708 aarch64_ins_rcpc3_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
709 const aarch64_opnd_info *info, aarch64_insn *code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED,
711 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
712 {
713 /* Rn */
714 insert_field (self->fields[0], code, info->addr.base_regno, 0);
715
716 /* simm9 */
717 int imm = info->addr.offset.imm;
718 insert_field (self->fields[1], code, imm, 0);
719
720 return true;
721 }
722
723 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
724 bool
aarch64_ins_addr_simm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)725 aarch64_ins_addr_simm (const aarch64_operand *self,
726 const aarch64_opnd_info *info,
727 aarch64_insn *code,
728 const aarch64_inst *inst ATTRIBUTE_UNUSED,
729 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
730 {
731 int imm;
732
733 /* Rn */
734 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
735 /* simm (imm9 or imm7) */
736 imm = info->addr.offset.imm;
737 if (self->fields[0] == FLD_imm7
738 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
739 /* scaled immediate in ld/st pair instructions.. */
740 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
741 insert_field (self->fields[0], code, imm, 0);
742 /* pre/post- index */
743 if (info->addr.writeback)
744 {
745 assert (inst->opcode->iclass != ldst_unscaled
746 && inst->opcode->iclass != ldstnapair_offs
747 && inst->opcode->iclass != ldstpair_off
748 && inst->opcode->iclass != ldst_unpriv);
749 assert (info->addr.preind != info->addr.postind);
750 if (info->addr.preind)
751 insert_field (self->fields[1], code, 1, 0);
752 }
753
754 return true;
755 }
756
757 /* Encode the address operand, potentially offset by the load/store ammount,
758 e.g. LDIAPP <Xt>, <Xt2> [<Xn|SP>, #<simm>]
759 and STILP <Xt>, <Xt2> [<Xn|SP>], #<simm>.*/
760 bool
aarch64_ins_rcpc3_addr_opt_offset(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)761 aarch64_ins_rcpc3_addr_opt_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
762 const aarch64_opnd_info *info,
763 aarch64_insn *code,
764 const aarch64_inst *inst ATTRIBUTE_UNUSED,
765 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
766 {
767 int imm;
768
769 /* Rn */
770 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
771 /* simm */
772 imm = info->addr.offset.imm;
773 if (!imm)
774 insert_field (FLD_opc2, code, 1, 0);
775
776 return true;
777 }
778
779 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
780 bool
aarch64_ins_addr_simm10(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)781 aarch64_ins_addr_simm10 (const aarch64_operand *self,
782 const aarch64_opnd_info *info,
783 aarch64_insn *code,
784 const aarch64_inst *inst ATTRIBUTE_UNUSED,
785 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
786 {
787 int imm;
788
789 /* Rn */
790 insert_field (self->fields[0], code, info->addr.base_regno, 0);
791 /* simm10 */
792 imm = info->addr.offset.imm >> 3;
793 insert_field (self->fields[1], code, imm >> 9, 0);
794 insert_field (self->fields[2], code, imm, 0);
795 /* writeback */
796 if (info->addr.writeback)
797 {
798 assert (info->addr.preind == 1 && info->addr.postind == 0);
799 insert_field (self->fields[3], code, 1, 0);
800 }
801 return true;
802 }
803
804 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
805 bool
aarch64_ins_addr_uimm12(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)806 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
807 const aarch64_opnd_info *info,
808 aarch64_insn *code,
809 const aarch64_inst *inst ATTRIBUTE_UNUSED,
810 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
811 {
812 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
813
814 /* Rn */
815 insert_field (self->fields[0], code, info->addr.base_regno, 0);
816 /* uimm12 */
817 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
818 return true;
819 }
820
821 /* Encode the address operand for e.g.
822 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
823 bool
aarch64_ins_simd_addr_post(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)824 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
825 const aarch64_opnd_info *info, aarch64_insn *code,
826 const aarch64_inst *inst ATTRIBUTE_UNUSED,
827 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
828 {
829 /* Rn */
830 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
831 /* Rm | #<amount> */
832 if (info->addr.offset.is_reg)
833 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
834 else
835 insert_field (FLD_Rm, code, 0x1f, 0);
836 return true;
837 }
838
839 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
840 bool
aarch64_ins_cond(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)841 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
842 const aarch64_opnd_info *info, aarch64_insn *code,
843 const aarch64_inst *inst ATTRIBUTE_UNUSED,
844 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
845 {
846 /* cond */
847 insert_field (FLD_cond, code, info->cond->value, 0);
848 return true;
849 }
850
851 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
852 bool
aarch64_ins_sysreg(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * detail ATTRIBUTE_UNUSED)853 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
854 const aarch64_opnd_info *info, aarch64_insn *code,
855 const aarch64_inst *inst,
856 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
857 {
858 /* If a system instruction check if we have any restrictions on which
859 registers it can use. */
860 if (inst->opcode->iclass == ic_system)
861 {
862 uint64_t opcode_flags
863 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
864 uint32_t sysreg_flags
865 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
866
867 /* Check to see if it's read-only, else check if it's write only.
868 if it's both or unspecified don't care. */
869 if (opcode_flags == F_SYS_READ
870 && sysreg_flags
871 && sysreg_flags != F_REG_READ)
872 {
873 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
874 detail->error = _("specified register cannot be read from");
875 detail->index = info->idx;
876 detail->non_fatal = true;
877 }
878 else if (opcode_flags == F_SYS_WRITE
879 && sysreg_flags
880 && sysreg_flags != F_REG_WRITE)
881 {
882 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
883 detail->error = _("specified register cannot be written to");
884 detail->index = info->idx;
885 detail->non_fatal = true;
886 }
887 }
888 /* op0:op1:CRn:CRm:op2 */
889 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
890 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
891 return true;
892 }
893
894 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
895 bool
aarch64_ins_pstatefield(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)896 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
897 const aarch64_opnd_info *info, aarch64_insn *code,
898 const aarch64_inst *inst ATTRIBUTE_UNUSED,
899 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
900 {
901 /* op1:op2 */
902 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
903 FLD_op2, FLD_op1);
904
905 /* Extra CRm mask. */
906 if (info->sysreg.flags | F_REG_IN_CRM)
907 insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0);
908 return true;
909 }
910
911 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
912 bool
aarch64_ins_sysins_op(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)913 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
914 const aarch64_opnd_info *info, aarch64_insn *code,
915 const aarch64_inst *inst ATTRIBUTE_UNUSED,
916 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
917 {
918 /* op1:CRn:CRm:op2 */
919 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
920 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
921 return true;
922 }
923
924 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
925
926 bool
aarch64_ins_barrier(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)927 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
928 const aarch64_opnd_info *info, aarch64_insn *code,
929 const aarch64_inst *inst ATTRIBUTE_UNUSED,
930 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
931 {
932 /* CRm */
933 insert_field (FLD_CRm, code, info->barrier->value, 0);
934 return true;
935 }
936
937 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
938
939 bool
aarch64_ins_barrier_dsb_nxs(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)940 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
941 const aarch64_opnd_info *info, aarch64_insn *code,
942 const aarch64_inst *inst ATTRIBUTE_UNUSED,
943 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
944 {
945 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
946 encoded in CRm<3:2>. */
947 aarch64_insn value = (info->barrier->value >> 2) - 4;
948 insert_field (FLD_CRm_dsb_nxs, code, value, 0);
949 return true;
950 }
951
952 /* Encode the prefetch operation option operand for e.g.
953 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
954
955 bool
aarch64_ins_prfop(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)956 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
957 const aarch64_opnd_info *info, aarch64_insn *code,
958 const aarch64_inst *inst ATTRIBUTE_UNUSED,
959 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
960 {
961 /* prfop in Rt */
962 insert_field (FLD_Rt, code, info->prfop->value, 0);
963 return true;
964 }
965
966 /* Encode the hint number for instructions that alias HINT but take an
967 operand. */
968
969 bool
aarch64_ins_hint(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)970 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
971 const aarch64_opnd_info *info, aarch64_insn *code,
972 const aarch64_inst *inst ATTRIBUTE_UNUSED,
973 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
974 {
975 /* CRm:op2. */
976 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
977 return true;
978 }
979
980 /* Encode the extended register operand for e.g.
981 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
982 bool
aarch64_ins_reg_extended(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)983 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
984 const aarch64_opnd_info *info, aarch64_insn *code,
985 const aarch64_inst *inst ATTRIBUTE_UNUSED,
986 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
987 {
988 enum aarch64_modifier_kind kind;
989
990 /* Rm */
991 insert_field (FLD_Rm, code, info->reg.regno, 0);
992 /* option */
993 kind = info->shifter.kind;
994 if (kind == AARCH64_MOD_LSL)
995 kind = info->qualifier == AARCH64_OPND_QLF_W
996 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
997 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
998 /* imm3 */
999 insert_field (FLD_imm3_10, code, info->shifter.amount, 0);
1000
1001 return true;
1002 }
1003
1004 /* Encode the shifted register operand for e.g.
1005 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1006 bool
aarch64_ins_reg_shifted(const aarch64_operand * self ATTRIBUTE_UNUSED,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1007 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1008 const aarch64_opnd_info *info, aarch64_insn *code,
1009 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1010 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1011 {
1012 /* Rm */
1013 insert_field (FLD_Rm, code, info->reg.regno, 0);
1014 /* shift */
1015 insert_field (FLD_shift, code,
1016 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
1017 /* imm6 */
1018 insert_field (FLD_imm6_10, code, info->shifter.amount, 0);
1019
1020 return true;
1021 }
1022
1023 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1024 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1025 SELF's operand-dependent value. fields[0] specifies the field that
1026 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1027 bool
aarch64_ins_sve_addr_ri_s4xvl(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1028 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
1029 const aarch64_opnd_info *info,
1030 aarch64_insn *code,
1031 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1032 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1033 {
1034 int factor = 1 + get_operand_specific_data (self);
1035 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1036 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1037 return true;
1038 }
1039
1040 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1041 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1042 SELF's operand-dependent value. fields[0] specifies the field that
1043 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1044 bool
aarch64_ins_sve_addr_ri_s6xvl(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1045 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
1046 const aarch64_opnd_info *info,
1047 aarch64_insn *code,
1048 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1049 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1050 {
1051 int factor = 1 + get_operand_specific_data (self);
1052 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1053 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1054 return true;
1055 }
1056
1057 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1058 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1059 SELF's operand-dependent value. fields[0] specifies the field that
1060 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1061 and imm3 fields, with imm3 being the less-significant part. */
1062 bool
aarch64_ins_sve_addr_ri_s9xvl(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1063 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
1064 const aarch64_opnd_info *info,
1065 aarch64_insn *code,
1066 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1067 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1068 {
1069 int factor = 1 + get_operand_specific_data (self);
1070 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1071 insert_fields (code, info->addr.offset.imm / factor, 0,
1072 2, FLD_imm3_10, FLD_SVE_imm6);
1073 return true;
1074 }
1075
1076 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1077 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1078 value. fields[0] specifies the base register field. */
1079 bool
aarch64_ins_sve_addr_ri_s4(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1080 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1081 const aarch64_opnd_info *info, aarch64_insn *code,
1082 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1083 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1084 {
1085 int factor = 1 << get_operand_specific_data (self);
1086 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1087 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1088 return true;
1089 }
1090
1091 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1092 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1093 value. fields[0] specifies the base register field. */
1094 bool
aarch64_ins_sve_addr_ri_u6(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1095 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1096 const aarch64_opnd_info *info, aarch64_insn *code,
1097 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1098 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1099 {
1100 int factor = 1 << get_operand_specific_data (self);
1101 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1102 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1103 return true;
1104 }
1105
1106 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1107 is SELF's operand-dependent value. fields[0] specifies the base
1108 register field and fields[1] specifies the offset register field. */
1109 bool
aarch64_ins_sve_addr_rr_lsl(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1110 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1111 const aarch64_opnd_info *info, aarch64_insn *code,
1112 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1113 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1114 {
1115 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1116 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1117 return true;
1118 }
1119
1120 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1121 <shift> is SELF's operand-dependent value. fields[0] specifies the
1122 base register field, fields[1] specifies the offset register field and
1123 fields[2] is a single-bit field that selects SXTW over UXTW. */
1124 bool
aarch64_ins_sve_addr_rz_xtw(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1125 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1126 const aarch64_opnd_info *info, aarch64_insn *code,
1127 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1128 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1129 {
1130 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1131 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1132 if (info->shifter.kind == AARCH64_MOD_UXTW)
1133 insert_field (self->fields[2], code, 0, 0);
1134 else
1135 insert_field (self->fields[2], code, 1, 0);
1136 return true;
1137 }
1138
1139 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1140 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1141 fields[0] specifies the base register field. */
1142 bool
aarch64_ins_sve_addr_zi_u5(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1143 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1144 const aarch64_opnd_info *info, aarch64_insn *code,
1145 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1146 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1147 {
1148 int factor = 1 << get_operand_specific_data (self);
1149 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1150 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1151 return true;
1152 }
1153
1154 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1155 where <modifier> is fixed by the instruction and where <msz> is a
1156 2-bit unsigned number. fields[0] specifies the base register field
1157 and fields[1] specifies the offset register field. */
1158 static bool
aarch64_ext_sve_addr_zz(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1159 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1160 const aarch64_opnd_info *info, aarch64_insn *code,
1161 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1162 {
1163 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1164 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1165 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1166 return true;
1167 }
1168
1169 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1170 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1171 field and fields[1] specifies the offset register field. */
1172 bool
aarch64_ins_sve_addr_zz_lsl(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors)1173 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1174 const aarch64_opnd_info *info, aarch64_insn *code,
1175 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1176 aarch64_operand_error *errors)
1177 {
1178 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1179 }
1180
1181 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1182 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1183 field and fields[1] specifies the offset register field. */
1184 bool
aarch64_ins_sve_addr_zz_sxtw(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors)1185 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1186 const aarch64_opnd_info *info,
1187 aarch64_insn *code,
1188 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1189 aarch64_operand_error *errors)
1190 {
1191 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1192 }
1193
1194 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1195 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1196 field and fields[1] specifies the offset register field. */
1197 bool
aarch64_ins_sve_addr_zz_uxtw(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors)1198 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1199 const aarch64_opnd_info *info,
1200 aarch64_insn *code,
1201 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1202 aarch64_operand_error *errors)
1203 {
1204 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1205 }
1206
1207 /* Encode an SVE ADD/SUB immediate. */
1208 bool
aarch64_ins_sve_aimm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1209 aarch64_ins_sve_aimm (const aarch64_operand *self,
1210 const aarch64_opnd_info *info, aarch64_insn *code,
1211 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1212 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1213 {
1214 if (info->shifter.amount == 8)
1215 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1216 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1217 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1218 else
1219 insert_all_fields (self, code, info->imm.value & 0xff);
1220 return true;
1221 }
1222
1223 bool
aarch64_ins_sve_aligned_reglist(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1224 aarch64_ins_sve_aligned_reglist (const aarch64_operand *self,
1225 const aarch64_opnd_info *info,
1226 aarch64_insn *code,
1227 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1228 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1229 {
1230 unsigned int num_regs = get_operand_specific_data (self);
1231 unsigned int val = info->reglist.first_regno;
1232 insert_field (self->fields[0], code, val / num_regs, 0);
1233 return true;
1234 }
1235
1236 /* Encode an SVE CPY/DUP immediate. */
1237 bool
aarch64_ins_sve_asimm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors)1238 aarch64_ins_sve_asimm (const aarch64_operand *self,
1239 const aarch64_opnd_info *info, aarch64_insn *code,
1240 const aarch64_inst *inst,
1241 aarch64_operand_error *errors)
1242 {
1243 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1244 }
1245
1246 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1247 array specifies which field to use for Zn. MM is encoded in the
1248 concatenation of imm5 and SVE_tszh, with imm5 being the less
1249 significant part. */
1250 bool
aarch64_ins_sve_index(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1251 aarch64_ins_sve_index (const aarch64_operand *self,
1252 const aarch64_opnd_info *info, aarch64_insn *code,
1253 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1254 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1255 {
1256 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1257 insert_field (self->fields[0], code, info->reglane.regno, 0);
1258 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1259 2, FLD_imm5, FLD_SVE_tszh);
1260 return true;
1261 }
1262
1263 /* Encode Zn.<T>[<imm>], where <imm> is an immediate with range of 0 to one less
1264 than the number of elements in 128 bit, which can encode il:tsz. */
1265 bool
aarch64_ins_sve_index_imm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1266 aarch64_ins_sve_index_imm (const aarch64_operand *self,
1267 const aarch64_opnd_info *info, aarch64_insn *code,
1268 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1269 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1270 {
1271 insert_field (self->fields[0], code, info->reglane.regno, 0);
1272 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1273 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1274 2, self->fields[1],self->fields[2]);
1275 return true;
1276 }
1277
1278 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1279 bool
aarch64_ins_sve_limm_mov(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors)1280 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1281 const aarch64_opnd_info *info, aarch64_insn *code,
1282 const aarch64_inst *inst,
1283 aarch64_operand_error *errors)
1284 {
1285 return aarch64_ins_limm (self, info, code, inst, errors);
1286 }
1287
1288 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1289 and where MM occupies the most-significant part. The operand-dependent
1290 value specifies the number of bits in Zn. */
1291 bool
aarch64_ins_sve_quad_index(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1292 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1293 const aarch64_opnd_info *info, aarch64_insn *code,
1294 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1295 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1296 {
1297 unsigned int reg_bits = get_operand_specific_data (self);
1298 assert (info->reglane.regno < (1U << reg_bits));
1299 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1300 insert_all_fields (self, code, val);
1301 return true;
1302 }
1303
1304 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1305 to use for Zn. */
1306 bool
aarch64_ins_sve_reglist(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1307 aarch64_ins_sve_reglist (const aarch64_operand *self,
1308 const aarch64_opnd_info *info, aarch64_insn *code,
1309 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1310 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1311 {
1312 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1313 return true;
1314 }
1315
1316 /* Encode a strided register list. The first field holds the top bit
1317 (0 or 16) and the second field holds the lower bits. The stride is
1318 16 divided by the list length. */
1319 bool
aarch64_ins_sve_strided_reglist(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1320 aarch64_ins_sve_strided_reglist (const aarch64_operand *self,
1321 const aarch64_opnd_info *info,
1322 aarch64_insn *code,
1323 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1324 aarch64_operand_error *errors
1325 ATTRIBUTE_UNUSED)
1326 {
1327 unsigned int num_regs = get_operand_specific_data (self);
1328 unsigned int mask ATTRIBUTE_UNUSED = 16 | (16 / num_regs - 1);
1329 unsigned int val = info->reglist.first_regno;
1330 assert ((val & mask) == val);
1331 insert_field (self->fields[0], code, val >> 4, 0);
1332 insert_field (self->fields[1], code, val & 15, 0);
1333 return true;
1334 }
1335
1336 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1337 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1338 field. */
1339 bool
aarch64_ins_sve_scale(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1340 aarch64_ins_sve_scale (const aarch64_operand *self,
1341 const aarch64_opnd_info *info, aarch64_insn *code,
1342 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1343 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1344 {
1345 insert_all_fields (self, code, info->imm.value);
1346 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1347 return true;
1348 }
1349
1350 /* Encode an SVE shift left immediate. */
1351 bool
aarch64_ins_sve_shlimm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1352 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1353 const aarch64_opnd_info *info, aarch64_insn *code,
1354 const aarch64_inst *inst,
1355 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1356 {
1357 const aarch64_opnd_info *prev_operand;
1358 unsigned int esize;
1359
1360 assert (info->idx > 0);
1361 prev_operand = &inst->operands[info->idx - 1];
1362 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1363 insert_all_fields (self, code, 8 * esize + info->imm.value);
1364 return true;
1365 }
1366
1367 /* Encode an SVE shift right immediate. */
1368 bool
aarch64_ins_sve_shrimm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1369 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1370 const aarch64_opnd_info *info, aarch64_insn *code,
1371 const aarch64_inst *inst,
1372 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1373 {
1374 const aarch64_opnd_info *prev_operand;
1375 unsigned int esize;
1376
1377 unsigned int opnd_backshift = get_operand_specific_data (self);
1378 assert (info->idx >= (int)opnd_backshift);
1379 prev_operand = &inst->operands[info->idx - opnd_backshift];
1380 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1381 insert_all_fields (self, code, 16 * esize - info->imm.value);
1382 return true;
1383 }
1384
1385 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1386 The fields array specifies which field to use. */
1387 bool
aarch64_ins_sve_float_half_one(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1388 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1389 const aarch64_opnd_info *info,
1390 aarch64_insn *code,
1391 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1392 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1393 {
1394 if (info->imm.value == 0x3f000000)
1395 insert_field (self->fields[0], code, 0, 0);
1396 else
1397 insert_field (self->fields[0], code, 1, 0);
1398 return true;
1399 }
1400
1401 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1402 The fields array specifies which field to use. */
1403 bool
aarch64_ins_sve_float_half_two(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1404 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1405 const aarch64_opnd_info *info,
1406 aarch64_insn *code,
1407 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1408 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1409 {
1410 if (info->imm.value == 0x3f000000)
1411 insert_field (self->fields[0], code, 0, 0);
1412 else
1413 insert_field (self->fields[0], code, 1, 0);
1414 return true;
1415 }
1416
1417 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1418 The fields array specifies which field to use. */
1419 bool
aarch64_ins_sve_float_zero_one(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1420 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1421 const aarch64_opnd_info *info,
1422 aarch64_insn *code,
1423 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1424 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1425 {
1426 if (info->imm.value == 0)
1427 insert_field (self->fields[0], code, 0, 0);
1428 else
1429 insert_field (self->fields[0], code, 1, 0);
1430 return true;
1431 }
1432
1433 bool
aarch64_ins_sme_za_vrs1(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1434 aarch64_ins_sme_za_vrs1 (const aarch64_operand *self,
1435 const aarch64_opnd_info *info,
1436 aarch64_insn *code,
1437 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1438 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1439 {
1440 int za_reg = info->indexed_za.regno;
1441 int regno = info->indexed_za.index.regno & 3;
1442 int imm = info->indexed_za.index.imm;
1443 int v = info->indexed_za.v;
1444 int countm1 = info->indexed_za.index.countm1;
1445
1446 insert_field (self->fields[0], code, v, 0);
1447 insert_field (self->fields[1], code, regno, 0);
1448 switch (info->qualifier)
1449 {
1450 case AARCH64_OPND_QLF_S_B:
1451 insert_field (self->fields[2], code, imm / (countm1 + 1), 0);
1452 break;
1453 case AARCH64_OPND_QLF_S_H:
1454 case AARCH64_OPND_QLF_S_S:
1455 insert_field (self->fields[2], code, za_reg, 0);
1456 insert_field (self->fields[3], code, imm / (countm1 + 1), 0);
1457 break;
1458 case AARCH64_OPND_QLF_S_D:
1459 insert_field (self->fields[2], code, za_reg, 0);
1460 break;
1461 default:
1462 return false;
1463 }
1464
1465 return true;
1466 }
1467
1468 bool
aarch64_ins_sme_za_vrs2(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1469 aarch64_ins_sme_za_vrs2 (const aarch64_operand *self,
1470 const aarch64_opnd_info *info,
1471 aarch64_insn *code,
1472 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1473 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1474 {
1475 int za_reg = info->indexed_za.regno;
1476 int regno = info->indexed_za.index.regno & 3;
1477 int imm = info->indexed_za.index.imm;
1478 int v = info->indexed_za.v;
1479 int countm1 = info->indexed_za.index.countm1;
1480
1481 insert_field (self->fields[0], code, v, 0);
1482 insert_field (self->fields[1], code, regno, 0);
1483 switch (info->qualifier)
1484 {
1485 case AARCH64_OPND_QLF_S_B:
1486 insert_field (self->fields[2], code, imm / (countm1 + 1), 0);
1487 break;
1488 case AARCH64_OPND_QLF_S_H:
1489 insert_field (self->fields[2], code, za_reg, 0);
1490 insert_field (self->fields[3], code, imm / (countm1 + 1), 0);
1491 break;
1492 case AARCH64_OPND_QLF_S_S:
1493 case AARCH64_OPND_QLF_S_D:
1494 insert_field (self->fields[2], code, za_reg, 0);
1495 break;
1496 default:
1497 return false;
1498 }
1499
1500 return true;
1501 }
1502
1503 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1504 vector indicator, vector selector and immediate. */
1505 bool
aarch64_ins_sme_za_hv_tiles(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1506 aarch64_ins_sme_za_hv_tiles (const aarch64_operand *self,
1507 const aarch64_opnd_info *info,
1508 aarch64_insn *code,
1509 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1510 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1511 {
1512 int fld_size;
1513 int fld_q;
1514 int fld_v = info->indexed_za.v;
1515 int fld_rv = info->indexed_za.index.regno - 12;
1516 int fld_zan_imm = info->indexed_za.index.imm;
1517 int regno = info->indexed_za.regno;
1518
1519 switch (info->qualifier)
1520 {
1521 case AARCH64_OPND_QLF_S_B:
1522 fld_size = 0;
1523 fld_q = 0;
1524 break;
1525 case AARCH64_OPND_QLF_S_H:
1526 fld_size = 1;
1527 fld_q = 0;
1528 fld_zan_imm |= regno << 3;
1529 break;
1530 case AARCH64_OPND_QLF_S_S:
1531 fld_size = 2;
1532 fld_q = 0;
1533 fld_zan_imm |= regno << 2;
1534 break;
1535 case AARCH64_OPND_QLF_S_D:
1536 fld_size = 3;
1537 fld_q = 0;
1538 fld_zan_imm |= regno << 1;
1539 break;
1540 case AARCH64_OPND_QLF_S_Q:
1541 fld_size = 3;
1542 fld_q = 1;
1543 fld_zan_imm = regno;
1544 break;
1545 default:
1546 return false;
1547 }
1548
1549 insert_field (self->fields[0], code, fld_size, 0);
1550 insert_field (self->fields[1], code, fld_q, 0);
1551 insert_field (self->fields[2], code, fld_v, 0);
1552 insert_field (self->fields[3], code, fld_rv, 0);
1553 insert_field (self->fields[4], code, fld_zan_imm, 0);
1554
1555 return true;
1556 }
1557
1558 bool
aarch64_ins_sme_za_hv_tiles_range(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1559 aarch64_ins_sme_za_hv_tiles_range (const aarch64_operand *self,
1560 const aarch64_opnd_info *info,
1561 aarch64_insn *code,
1562 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1563 aarch64_operand_error *errors
1564 ATTRIBUTE_UNUSED)
1565 {
1566 int ebytes = aarch64_get_qualifier_esize (info->qualifier);
1567 int range_size = get_opcode_dependent_value (inst->opcode);
1568 int fld_v = info->indexed_za.v;
1569 int fld_rv = info->indexed_za.index.regno - 12;
1570 int imm = info->indexed_za.index.imm;
1571 int max_value = 16 / range_size / ebytes;
1572
1573 if (max_value == 0)
1574 max_value = 1;
1575
1576 assert (imm % range_size == 0 && (imm / range_size) < max_value);
1577 int fld_zan_imm = (info->indexed_za.regno * max_value) | (imm / range_size);
1578 assert (fld_zan_imm < (range_size == 4 && ebytes < 8 ? 4 : 8));
1579
1580 insert_field (self->fields[0], code, fld_v, 0);
1581 insert_field (self->fields[1], code, fld_rv, 0);
1582 insert_field (self->fields[2], code, fld_zan_imm, 0);
1583
1584 return true;
1585 }
1586
1587 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1588 separated by commas, encoded in the "imm8" field.
1589
1590 For programmer convenience an assembler must also accept the names of
1591 32-bit, 16-bit and 8-bit element tiles which are converted into the
1592 corresponding set of 64-bit element tiles.
1593 */
1594 bool
aarch64_ins_sme_za_list(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1595 aarch64_ins_sme_za_list (const aarch64_operand *self,
1596 const aarch64_opnd_info *info,
1597 aarch64_insn *code,
1598 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1599 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1600 {
1601 int fld_mask = info->imm.value;
1602 insert_field (self->fields[0], code, fld_mask, 0);
1603 return true;
1604 }
1605
1606 bool
aarch64_ins_sme_za_array(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1607 aarch64_ins_sme_za_array (const aarch64_operand *self,
1608 const aarch64_opnd_info *info,
1609 aarch64_insn *code,
1610 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1611 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1612 {
1613 int regno = info->indexed_za.index.regno & 3;
1614 int imm = info->indexed_za.index.imm;
1615 int countm1 = info->indexed_za.index.countm1;
1616 assert (imm % (countm1 + 1) == 0);
1617 insert_field (self->fields[0], code, regno, 0);
1618 insert_field (self->fields[1], code, imm / (countm1 + 1), 0);
1619 return true;
1620 }
1621
1622 bool
aarch64_ins_sme_addr_ri_u4xvl(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1623 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand *self,
1624 const aarch64_opnd_info *info,
1625 aarch64_insn *code,
1626 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1627 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1628 {
1629 int regno = info->addr.base_regno;
1630 int imm = info->addr.offset.imm;
1631 insert_field (self->fields[0], code, regno, 0);
1632 insert_field (self->fields[1], code, imm, 0);
1633 return true;
1634 }
1635
1636 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1637 bool
aarch64_ins_sme_sm_za(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1638 aarch64_ins_sme_sm_za (const aarch64_operand *self,
1639 const aarch64_opnd_info *info,
1640 aarch64_insn *code,
1641 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1642 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1643 {
1644 aarch64_insn fld_crm;
1645 /* Set CRm[3:1] bits. */
1646 if (info->reg.regno == 's')
1647 fld_crm = 0x02 ; /* SVCRSM. */
1648 else if (info->reg.regno == 'z')
1649 fld_crm = 0x04; /* SVCRZA. */
1650 else
1651 return false;
1652
1653 insert_field (self->fields[0], code, fld_crm, 0);
1654 return true;
1655 }
1656
1657 /* Encode source scalable predicate register (Pn), name of the index base
1658 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1659 range 0 to one less than the number of vector elements in a 128-bit vector
1660 register, encoded in "i1:tszh:tszl".
1661 */
1662 bool
aarch64_ins_sme_pred_reg_with_index(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1663 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand *self,
1664 const aarch64_opnd_info *info,
1665 aarch64_insn *code,
1666 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1667 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1668 {
1669 int fld_pn = info->indexed_za.regno;
1670 int fld_rm = info->indexed_za.index.regno - 12;
1671 int imm = info->indexed_za.index.imm;
1672 int fld_i1, fld_tszh, fld_tshl;
1673
1674 insert_field (self->fields[0], code, fld_rm, 0);
1675 insert_field (self->fields[1], code, fld_pn, 0);
1676
1677 /* Optional element index, defaulting to 0, in the range 0 to one less than
1678 the number of vector elements in a 128-bit vector register, encoded in
1679 "i1:tszh:tszl".
1680
1681 i1 tszh tszl <T>
1682 0 0 000 RESERVED
1683 x x xx1 B
1684 x x x10 H
1685 x x 100 S
1686 x 1 000 D
1687 */
1688 switch (info->qualifier)
1689 {
1690 case AARCH64_OPND_QLF_S_B:
1691 /* <imm> is 4 bit value. */
1692 fld_i1 = (imm >> 3) & 0x1;
1693 fld_tszh = (imm >> 2) & 0x1;
1694 fld_tshl = ((imm << 1) | 0x1) & 0x7;
1695 break;
1696 case AARCH64_OPND_QLF_S_H:
1697 /* <imm> is 3 bit value. */
1698 fld_i1 = (imm >> 2) & 0x1;
1699 fld_tszh = (imm >> 1) & 0x1;
1700 fld_tshl = ((imm << 2) | 0x2) & 0x7;
1701 break;
1702 case AARCH64_OPND_QLF_S_S:
1703 /* <imm> is 2 bit value. */
1704 fld_i1 = (imm >> 1) & 0x1;
1705 fld_tszh = imm & 0x1;
1706 fld_tshl = 0x4;
1707 break;
1708 case AARCH64_OPND_QLF_S_D:
1709 /* <imm> is 1 bit value. */
1710 fld_i1 = imm & 0x1;
1711 fld_tszh = 0x1;
1712 fld_tshl = 0x0;
1713 break;
1714 default:
1715 return false;
1716 }
1717
1718 insert_field (self->fields[2], code, fld_i1, 0);
1719 insert_field (self->fields[3], code, fld_tszh, 0);
1720 insert_field (self->fields[4], code, fld_tshl, 0);
1721 return true;
1722 }
1723
1724 /* Insert X0-X30. Register 31 is unallocated. */
1725 bool
aarch64_ins_x0_to_x30(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1726 aarch64_ins_x0_to_x30 (const aarch64_operand *self,
1727 const aarch64_opnd_info *info,
1728 aarch64_insn *code,
1729 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1730 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1731 {
1732 assert (info->reg.regno <= 30);
1733 insert_field (self->fields[0], code, info->reg.regno, 0);
1734 return true;
1735 }
1736
1737 /* Insert an indexed register, with the first field being the register
1738 number and the remaining fields being the index. */
1739 bool
aarch64_ins_simple_index(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1740 aarch64_ins_simple_index (const aarch64_operand *self,
1741 const aarch64_opnd_info *info,
1742 aarch64_insn *code,
1743 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1744 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1745 {
1746 int bias = get_operand_specific_data (self);
1747 insert_field (self->fields[0], code, info->reglane.regno - bias, 0);
1748 insert_all_fields_after (self, 1, code, info->reglane.index);
1749 return true;
1750 }
1751
1752 /* Insert a plain shift-right immediate, when there is only a single
1753 element size. */
1754 bool
aarch64_ins_plain_shrimm(const aarch64_operand * self,const aarch64_opnd_info * info,aarch64_insn * code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1755 aarch64_ins_plain_shrimm (const aarch64_operand *self,
1756 const aarch64_opnd_info *info, aarch64_insn *code,
1757 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1758 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1759 {
1760 unsigned int base = 1 << get_operand_field_width (self, 0);
1761 insert_field (self->fields[0], code, base - info->imm.value, 0);
1762 return true;
1763 }
1764
1765 /* Miscellaneous encoding functions. */
1766
1767 /* Encode size[0], i.e. bit 22, for
1768 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1769
1770 static void
encode_asimd_fcvt(aarch64_inst * inst)1771 encode_asimd_fcvt (aarch64_inst *inst)
1772 {
1773 aarch64_insn value;
1774 aarch64_field field = {0, 0};
1775 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL;
1776
1777 switch (inst->opcode->op)
1778 {
1779 case OP_FCVTN:
1780 case OP_FCVTN2:
1781 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1782 qualifier = inst->operands[1].qualifier;
1783 break;
1784 case OP_FCVTL:
1785 case OP_FCVTL2:
1786 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1787 qualifier = inst->operands[0].qualifier;
1788 break;
1789 default:
1790 return;
1791 }
1792 assert (qualifier == AARCH64_OPND_QLF_V_4S
1793 || qualifier == AARCH64_OPND_QLF_V_2D);
1794 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1795 gen_sub_field (FLD_size, 0, 1, &field);
1796 insert_field_2 (&field, &inst->value, value, 0);
1797 }
1798
1799 /* Encode size[0], i.e. bit 22, for
1800 e.g. FCVTXN <Vb><d>, <Va><n>. */
1801
1802 static void
encode_asisd_fcvtxn(aarch64_inst * inst)1803 encode_asisd_fcvtxn (aarch64_inst *inst)
1804 {
1805 aarch64_insn val = 1;
1806 aarch64_field field = {0, 0};
1807 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1808 gen_sub_field (FLD_size, 0, 1, &field);
1809 insert_field_2 (&field, &inst->value, val, 0);
1810 }
1811
1812 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1813 static void
encode_fcvt(aarch64_inst * inst)1814 encode_fcvt (aarch64_inst *inst)
1815 {
1816 aarch64_insn val;
1817 const aarch64_field field = {15, 2};
1818
1819 /* opc dstsize */
1820 switch (inst->operands[0].qualifier)
1821 {
1822 case AARCH64_OPND_QLF_S_S: val = 0; break;
1823 case AARCH64_OPND_QLF_S_D: val = 1; break;
1824 case AARCH64_OPND_QLF_S_H: val = 3; break;
1825 default: abort ();
1826 }
1827 insert_field_2 (&field, &inst->value, val, 0);
1828
1829 return;
1830 }
1831
1832 /* Return the index in qualifiers_list that INST is using. Should only
1833 be called once the qualifiers are known to be valid. */
1834
1835 static int
aarch64_get_variant(struct aarch64_inst * inst)1836 aarch64_get_variant (struct aarch64_inst *inst)
1837 {
1838 int i, nops, variant;
1839
1840 nops = aarch64_num_of_operands (inst->opcode);
1841 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1842 {
1843 for (i = 0; i < nops; ++i)
1844 if (inst->opcode->qualifiers_list[variant][i]
1845 != inst->operands[i].qualifier)
1846 break;
1847 if (i == nops)
1848 return variant;
1849 }
1850 abort ();
1851 }
1852
1853 /* Do miscellaneous encodings that are not common enough to be driven by
1854 flags. */
1855
1856 static void
do_misc_encoding(aarch64_inst * inst)1857 do_misc_encoding (aarch64_inst *inst)
1858 {
1859 unsigned int value;
1860
1861 switch (inst->opcode->op)
1862 {
1863 case OP_FCVT:
1864 encode_fcvt (inst);
1865 break;
1866 case OP_FCVTN:
1867 case OP_FCVTN2:
1868 case OP_FCVTL:
1869 case OP_FCVTL2:
1870 encode_asimd_fcvt (inst);
1871 break;
1872 case OP_FCVTXN_S:
1873 encode_asisd_fcvtxn (inst);
1874 break;
1875 case OP_MOV_P_P:
1876 case OP_MOV_PN_PN:
1877 case OP_MOVS_P_P:
1878 /* Copy Pn to Pm and Pg. */
1879 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1880 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1881 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1882 break;
1883 case OP_MOV_Z_P_Z:
1884 /* Copy Zd to Zm. */
1885 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1886 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1887 break;
1888 case OP_MOV_Z_V:
1889 /* Fill in the zero immediate. */
1890 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1891 2, FLD_imm5, FLD_SVE_tszh);
1892 break;
1893 case OP_MOV_Z_Z:
1894 /* Copy Zn to Zm. */
1895 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1896 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1897 break;
1898 case OP_MOV_Z_Zi:
1899 break;
1900 case OP_MOVM_P_P_P:
1901 /* Copy Pd to Pm. */
1902 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1903 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1904 break;
1905 case OP_MOVZS_P_P_P:
1906 case OP_MOVZ_P_P_P:
1907 /* Copy Pn to Pm. */
1908 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1909 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1910 break;
1911 case OP_NOTS_P_P_P_Z:
1912 case OP_NOT_P_P_P_Z:
1913 /* Copy Pg to Pm. */
1914 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1915 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1916 break;
1917 default: break;
1918 }
1919 }
1920
1921 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1922 static void
encode_sizeq(aarch64_inst * inst)1923 encode_sizeq (aarch64_inst *inst)
1924 {
1925 aarch64_insn sizeq;
1926 enum aarch64_field_kind kind;
1927 int idx;
1928
1929 /* Get the index of the operand whose information we are going to use
1930 to encode the size and Q fields.
1931 This is deduced from the possible valid qualifier lists. */
1932 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1933 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1934 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1935 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1936 /* Q */
1937 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1938 /* size */
1939 if (inst->opcode->iclass == asisdlse
1940 || inst->opcode->iclass == asisdlsep
1941 || inst->opcode->iclass == asisdlso
1942 || inst->opcode->iclass == asisdlsop)
1943 kind = FLD_vldst_size;
1944 else
1945 kind = FLD_size;
1946 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1947 }
1948
1949 /* Opcodes that have fields shared by multiple operands are usually flagged
1950 with flags. In this function, we detect such flags and use the
1951 information in one of the related operands to do the encoding. The 'one'
1952 operand is not any operand but one of the operands that has the enough
1953 information for such an encoding. */
1954
1955 static void
do_special_encoding(struct aarch64_inst * inst)1956 do_special_encoding (struct aarch64_inst *inst)
1957 {
1958 int idx;
1959 aarch64_insn value = 0;
1960
1961 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1962
1963 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1964 if (inst->opcode->flags & F_COND)
1965 {
1966 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1967 }
1968 if (inst->opcode->flags & F_SF)
1969 {
1970 idx = select_operand_for_sf_field_coding (inst->opcode);
1971 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1972 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1973 ? 1 : 0;
1974 insert_field (FLD_sf, &inst->value, value, 0);
1975 if (inst->opcode->flags & F_N)
1976 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1977 }
1978 if (inst->opcode->flags & F_LSE_SZ)
1979 {
1980 idx = select_operand_for_sf_field_coding (inst->opcode);
1981 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1982 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1983 ? 1 : 0;
1984 insert_field (FLD_lse_sz, &inst->value, value, 0);
1985 }
1986 if (inst->opcode->flags & F_RCPC3_SIZE)
1987 {
1988 switch (inst->operands[0].qualifier)
1989 {
1990 case AARCH64_OPND_QLF_W: value = 2; break;
1991 case AARCH64_OPND_QLF_X: value = 3; break;
1992 case AARCH64_OPND_QLF_S_B: value = 0; break;
1993 case AARCH64_OPND_QLF_S_H: value = 1; break;
1994 case AARCH64_OPND_QLF_S_S: value = 2; break;
1995 case AARCH64_OPND_QLF_S_D: value = 3; break;
1996 case AARCH64_OPND_QLF_S_Q: value = 0; break;
1997 default: return;
1998 }
1999 insert_field (FLD_rcpc3_size, &inst->value, value, 0);
2000 }
2001
2002 if (inst->opcode->flags & F_SIZEQ)
2003 encode_sizeq (inst);
2004 if (inst->opcode->flags & F_FPTYPE)
2005 {
2006 idx = select_operand_for_fptype_field_coding (inst->opcode);
2007 switch (inst->operands[idx].qualifier)
2008 {
2009 case AARCH64_OPND_QLF_S_S: value = 0; break;
2010 case AARCH64_OPND_QLF_S_D: value = 1; break;
2011 case AARCH64_OPND_QLF_S_H: value = 3; break;
2012 default: return;
2013 }
2014 insert_field (FLD_type, &inst->value, value, 0);
2015 }
2016 if (inst->opcode->flags & F_SSIZE)
2017 {
2018 enum aarch64_opnd_qualifier qualifier;
2019 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2020 qualifier = inst->operands[idx].qualifier;
2021 assert (qualifier >= AARCH64_OPND_QLF_S_B
2022 && qualifier <= AARCH64_OPND_QLF_S_Q);
2023 value = aarch64_get_qualifier_standard_value (qualifier);
2024 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
2025 }
2026 if (inst->opcode->flags & F_T)
2027 {
2028 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
2029 aarch64_field field = {0, 0};
2030 enum aarch64_opnd_qualifier qualifier;
2031
2032 idx = 0;
2033 qualifier = inst->operands[idx].qualifier;
2034 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2035 == AARCH64_OPND_CLASS_SIMD_REG
2036 && qualifier >= AARCH64_OPND_QLF_V_8B
2037 && qualifier <= AARCH64_OPND_QLF_V_2D);
2038 /* imm5<3:0> q <t>
2039 0000 x reserved
2040 xxx1 0 8b
2041 xxx1 1 16b
2042 xx10 0 4h
2043 xx10 1 8h
2044 x100 0 2s
2045 x100 1 4s
2046 1000 0 reserved
2047 1000 1 2d */
2048 value = aarch64_get_qualifier_standard_value (qualifier);
2049 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
2050 num = (int) value >> 1;
2051 assert (num >= 0 && num <= 3);
2052 gen_sub_field (FLD_imm5, 0, num + 1, &field);
2053 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
2054 }
2055
2056 if ((inst->opcode->flags & F_OPD_SIZE) && inst->opcode->iclass == sve2_urqvs)
2057 {
2058 enum aarch64_opnd_qualifier qualifier[2];
2059 aarch64_insn value1 = 0;
2060 idx = 0;
2061 qualifier[0] = inst->operands[idx].qualifier;
2062 qualifier[1] = inst->operands[idx+2].qualifier;
2063 value = aarch64_get_qualifier_standard_value (qualifier[0]);
2064 value1 = aarch64_get_qualifier_standard_value (qualifier[1]);
2065 assert ((value >> 1) == value1);
2066 insert_field (FLD_size, &inst->value, value1, inst->opcode->mask);
2067 }
2068
2069 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2070 {
2071 /* Use Rt to encode in the case of e.g.
2072 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2073 enum aarch64_opnd_qualifier qualifier;
2074 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2075 if (idx == -1)
2076 /* Otherwise use the result operand, which has to be a integer
2077 register. */
2078 idx = 0;
2079 assert (idx == 0 || idx == 1);
2080 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
2081 == AARCH64_OPND_CLASS_INT_REG);
2082 qualifier = inst->operands[idx].qualifier;
2083 insert_field (FLD_Q, &inst->value,
2084 aarch64_get_qualifier_standard_value (qualifier), 0);
2085 }
2086 if (inst->opcode->flags & F_LDS_SIZE)
2087 {
2088 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
2089 enum aarch64_opnd_qualifier qualifier;
2090 aarch64_field field = {0, 0};
2091 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2092 == AARCH64_OPND_CLASS_INT_REG);
2093 gen_sub_field (FLD_opc, 0, 1, &field);
2094 qualifier = inst->operands[0].qualifier;
2095 insert_field_2 (&field, &inst->value,
2096 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
2097 }
2098 /* Miscellaneous encoding as the last step. */
2099 if (inst->opcode->flags & F_MISC)
2100 do_misc_encoding (inst);
2101
2102 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
2103 }
2104
2105 /* Some instructions (including all SVE ones) use the instruction class
2106 to describe how a qualifiers_list index is represented in the instruction
2107 encoding. If INST is such an instruction, encode the chosen qualifier
2108 variant. */
2109
2110 static void
aarch64_encode_variant_using_iclass(struct aarch64_inst * inst)2111 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
2112 {
2113 int variant = 0;
2114 switch (inst->opcode->iclass)
2115 {
2116 case sme_mov:
2117 case sme_psel:
2118 /* The variant is encoded as part of the immediate. */
2119 break;
2120
2121 case sme_size_12_bhs:
2122 insert_field (FLD_SME_size_12, &inst->value,
2123 aarch64_get_variant (inst), 0);
2124 break;
2125
2126 case sme_size_22:
2127 insert_field (FLD_SME_size_22, &inst->value,
2128 aarch64_get_variant (inst), 0);
2129 break;
2130
2131 case sme_size_22_hsd:
2132 insert_field (FLD_SME_size_22, &inst->value,
2133 aarch64_get_variant (inst) + 1, 0);
2134 break;
2135
2136 case sme_size_12_hs:
2137 insert_field (FLD_SME_size_12, &inst->value,
2138 aarch64_get_variant (inst) + 1, 0);
2139 break;
2140
2141 case sme_sz_23:
2142 insert_field (FLD_SME_sz_23, &inst->value,
2143 aarch64_get_variant (inst), 0);
2144 break;
2145
2146 case sve_cpy:
2147 insert_fields (&inst->value, aarch64_get_variant (inst),
2148 0, 2, FLD_SVE_M_14, FLD_size);
2149 break;
2150
2151 case sme_shift:
2152 case sve_index:
2153 case sve_index1:
2154 case sve_shift_pred:
2155 case sve_shift_unpred:
2156 case sve_shift_tsz_hsd:
2157 case sve_shift_tsz_bhsd:
2158 /* For indices and shift amounts, the variant is encoded as
2159 part of the immediate. */
2160 break;
2161
2162 case sve_limm:
2163 case sme2_mov:
2164 /* For sve_limm, the .B, .H, and .S forms are just a convenience
2165 and depend on the immediate. They don't have a separate
2166 encoding. */
2167 break;
2168
2169 case sme_misc:
2170 case sme2_movaz:
2171 case sve_misc:
2172 /* These instructions have only a single variant. */
2173 break;
2174
2175 case sve_movprfx:
2176 insert_fields (&inst->value, aarch64_get_variant (inst),
2177 0, 2, FLD_SVE_M_16, FLD_size);
2178 break;
2179
2180 case sve_pred_zm:
2181 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
2182 break;
2183
2184 case sve_size_bhs:
2185 case sve_size_bhsd:
2186 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
2187 break;
2188
2189 case sve_size_hsd:
2190 /* MOD 3 For `OP_SVE_Vv_HSD`. */
2191 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) % 3 + 1, 0);
2192 break;
2193
2194 case sme_fp_sd:
2195 case sme_int_sd:
2196 case sve_size_bh:
2197 case sve_size_sd:
2198 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
2199 break;
2200
2201 case sve_size_sd2:
2202 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
2203 break;
2204
2205 case sve_size_hsd2:
2206 insert_field (FLD_SVE_size, &inst->value,
2207 aarch64_get_variant (inst) + 1, 0);
2208 break;
2209
2210 case sve_size_tsz_bhs:
2211 insert_fields (&inst->value,
2212 (1 << aarch64_get_variant (inst)),
2213 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
2214 break;
2215
2216 case sve_size_13:
2217 variant = aarch64_get_variant (inst) + 1;
2218 if (variant == 2)
2219 variant = 3;
2220 insert_field (FLD_size, &inst->value, variant, 0);
2221 break;
2222
2223 default:
2224 break;
2225 }
2226 }
2227
2228 /* Converters converting an alias opcode instruction to its real form. */
2229
2230 /* ROR <Wd>, <Ws>, #<shift>
2231 is equivalent to:
2232 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2233 static void
convert_ror_to_extr(aarch64_inst * inst)2234 convert_ror_to_extr (aarch64_inst *inst)
2235 {
2236 copy_operand_info (inst, 3, 2);
2237 copy_operand_info (inst, 2, 1);
2238 }
2239
2240 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2241 is equivalent to:
2242 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2243 static void
convert_xtl_to_shll(aarch64_inst * inst)2244 convert_xtl_to_shll (aarch64_inst *inst)
2245 {
2246 inst->operands[2].qualifier = inst->operands[1].qualifier;
2247 inst->operands[2].imm.value = 0;
2248 }
2249
2250 /* Convert
2251 LSR <Xd>, <Xn>, #<shift>
2252 to
2253 UBFM <Xd>, <Xn>, #<shift>, #63. */
2254 static void
convert_sr_to_bfm(aarch64_inst * inst)2255 convert_sr_to_bfm (aarch64_inst *inst)
2256 {
2257 inst->operands[3].imm.value =
2258 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2259 }
2260
2261 /* Convert MOV to ORR. */
2262 static void
convert_mov_to_orr(aarch64_inst * inst)2263 convert_mov_to_orr (aarch64_inst *inst)
2264 {
2265 /* MOV <Vd>.<T>, <Vn>.<T>
2266 is equivalent to:
2267 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2268 copy_operand_info (inst, 2, 1);
2269 }
2270
2271 /* When <imms> >= <immr>, the instruction written:
2272 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2273 is equivalent to:
2274 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2275
2276 static void
convert_bfx_to_bfm(aarch64_inst * inst)2277 convert_bfx_to_bfm (aarch64_inst *inst)
2278 {
2279 int64_t lsb, width;
2280
2281 /* Convert the operand. */
2282 lsb = inst->operands[2].imm.value;
2283 width = inst->operands[3].imm.value;
2284 inst->operands[2].imm.value = lsb;
2285 inst->operands[3].imm.value = lsb + width - 1;
2286 }
2287
2288 /* When <imms> < <immr>, the instruction written:
2289 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2290 is equivalent to:
2291 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2292
2293 static void
convert_bfi_to_bfm(aarch64_inst * inst)2294 convert_bfi_to_bfm (aarch64_inst *inst)
2295 {
2296 int64_t lsb, width;
2297
2298 /* Convert the operand. */
2299 lsb = inst->operands[2].imm.value;
2300 width = inst->operands[3].imm.value;
2301 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2302 {
2303 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2304 inst->operands[3].imm.value = width - 1;
2305 }
2306 else
2307 {
2308 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2309 inst->operands[3].imm.value = width - 1;
2310 }
2311 }
2312
2313 /* The instruction written:
2314 BFC <Xd>, #<lsb>, #<width>
2315 is equivalent to:
2316 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2317
2318 static void
convert_bfc_to_bfm(aarch64_inst * inst)2319 convert_bfc_to_bfm (aarch64_inst *inst)
2320 {
2321 int64_t lsb, width;
2322
2323 /* Insert XZR. */
2324 copy_operand_info (inst, 3, 2);
2325 copy_operand_info (inst, 2, 1);
2326 copy_operand_info (inst, 1, 0);
2327 inst->operands[1].reg.regno = 0x1f;
2328
2329 /* Convert the immediate operand. */
2330 lsb = inst->operands[2].imm.value;
2331 width = inst->operands[3].imm.value;
2332 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2333 {
2334 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2335 inst->operands[3].imm.value = width - 1;
2336 }
2337 else
2338 {
2339 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2340 inst->operands[3].imm.value = width - 1;
2341 }
2342 }
2343
2344 /* The instruction written:
2345 LSL <Xd>, <Xn>, #<shift>
2346 is equivalent to:
2347 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2348
2349 static void
convert_lsl_to_ubfm(aarch64_inst * inst)2350 convert_lsl_to_ubfm (aarch64_inst *inst)
2351 {
2352 int64_t shift = inst->operands[2].imm.value;
2353
2354 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2355 {
2356 inst->operands[2].imm.value = (32 - shift) & 0x1f;
2357 inst->operands[3].imm.value = 31 - shift;
2358 }
2359 else
2360 {
2361 inst->operands[2].imm.value = (64 - shift) & 0x3f;
2362 inst->operands[3].imm.value = 63 - shift;
2363 }
2364 }
2365
2366 /* CINC <Wd>, <Wn>, <cond>
2367 is equivalent to:
2368 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2369
2370 static void
convert_to_csel(aarch64_inst * inst)2371 convert_to_csel (aarch64_inst *inst)
2372 {
2373 copy_operand_info (inst, 3, 2);
2374 copy_operand_info (inst, 2, 1);
2375 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2376 }
2377
2378 /* CSET <Wd>, <cond>
2379 is equivalent to:
2380 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2381
2382 static void
convert_cset_to_csinc(aarch64_inst * inst)2383 convert_cset_to_csinc (aarch64_inst *inst)
2384 {
2385 copy_operand_info (inst, 3, 1);
2386 copy_operand_info (inst, 2, 0);
2387 copy_operand_info (inst, 1, 0);
2388 inst->operands[1].reg.regno = 0x1f;
2389 inst->operands[2].reg.regno = 0x1f;
2390 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2391 }
2392
2393 /* MOV <Wd>, #<imm>
2394 is equivalent to:
2395 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2396
2397 static void
convert_mov_to_movewide(aarch64_inst * inst)2398 convert_mov_to_movewide (aarch64_inst *inst)
2399 {
2400 int is32;
2401 uint32_t shift_amount;
2402 uint64_t value = ~(uint64_t)0;
2403
2404 switch (inst->opcode->op)
2405 {
2406 case OP_MOV_IMM_WIDE:
2407 value = inst->operands[1].imm.value;
2408 break;
2409 case OP_MOV_IMM_WIDEN:
2410 value = ~inst->operands[1].imm.value;
2411 break;
2412 default:
2413 return;
2414 }
2415 inst->operands[1].type = AARCH64_OPND_HALF;
2416 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2417 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
2418 /* The constraint check should have guaranteed this wouldn't happen. */
2419 return;
2420 value >>= shift_amount;
2421 value &= 0xffff;
2422 inst->operands[1].imm.value = value;
2423 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
2424 inst->operands[1].shifter.amount = shift_amount;
2425 }
2426
2427 /* MOV <Wd>, #<imm>
2428 is equivalent to:
2429 ORR <Wd>, WZR, #<imm>. */
2430
2431 static void
convert_mov_to_movebitmask(aarch64_inst * inst)2432 convert_mov_to_movebitmask (aarch64_inst *inst)
2433 {
2434 copy_operand_info (inst, 2, 1);
2435 inst->operands[1].reg.regno = 0x1f;
2436 inst->operands[1].skip = 0;
2437 }
2438
2439 /* Some alias opcodes are assembled by being converted to their real-form. */
2440
2441 static void
convert_to_real(aarch64_inst * inst,const aarch64_opcode * real)2442 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
2443 {
2444 const aarch64_opcode *alias = inst->opcode;
2445
2446 if ((alias->flags & F_CONV) == 0)
2447 goto convert_to_real_return;
2448
2449 switch (alias->op)
2450 {
2451 case OP_ASR_IMM:
2452 case OP_LSR_IMM:
2453 convert_sr_to_bfm (inst);
2454 break;
2455 case OP_LSL_IMM:
2456 convert_lsl_to_ubfm (inst);
2457 break;
2458 case OP_CINC:
2459 case OP_CINV:
2460 case OP_CNEG:
2461 convert_to_csel (inst);
2462 break;
2463 case OP_CSET:
2464 case OP_CSETM:
2465 convert_cset_to_csinc (inst);
2466 break;
2467 case OP_UBFX:
2468 case OP_BFXIL:
2469 case OP_SBFX:
2470 convert_bfx_to_bfm (inst);
2471 break;
2472 case OP_SBFIZ:
2473 case OP_BFI:
2474 case OP_UBFIZ:
2475 convert_bfi_to_bfm (inst);
2476 break;
2477 case OP_BFC:
2478 convert_bfc_to_bfm (inst);
2479 break;
2480 case OP_MOV_V:
2481 convert_mov_to_orr (inst);
2482 break;
2483 case OP_MOV_IMM_WIDE:
2484 case OP_MOV_IMM_WIDEN:
2485 convert_mov_to_movewide (inst);
2486 break;
2487 case OP_MOV_IMM_LOG:
2488 convert_mov_to_movebitmask (inst);
2489 break;
2490 case OP_ROR_IMM:
2491 convert_ror_to_extr (inst);
2492 break;
2493 case OP_SXTL:
2494 case OP_SXTL2:
2495 case OP_UXTL:
2496 case OP_UXTL2:
2497 convert_xtl_to_shll (inst);
2498 break;
2499 default:
2500 break;
2501 }
2502
2503 convert_to_real_return:
2504 aarch64_replace_opcode (inst, real);
2505 }
2506
2507 /* Encode *INST_ORI of the opcode code OPCODE.
2508 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2509 matched operand qualifier sequence in *QLF_SEQ. */
2510
2511 bool
aarch64_opcode_encode(const aarch64_opcode * opcode,const aarch64_inst * inst_ori,aarch64_insn * code,aarch64_opnd_qualifier_t * qlf_seq,aarch64_operand_error * mismatch_detail,aarch64_instr_sequence * insn_sequence)2512 aarch64_opcode_encode (const aarch64_opcode *opcode,
2513 const aarch64_inst *inst_ori, aarch64_insn *code,
2514 aarch64_opnd_qualifier_t *qlf_seq,
2515 aarch64_operand_error *mismatch_detail,
2516 aarch64_instr_sequence* insn_sequence)
2517 {
2518 int i;
2519 const aarch64_opcode *aliased;
2520 aarch64_inst copy, *inst;
2521
2522 DEBUG_TRACE ("enter with %s", opcode->name);
2523
2524 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2525 copy = *inst_ori;
2526 inst = ©
2527
2528 assert (inst->opcode == NULL || inst->opcode == opcode);
2529 if (inst->opcode == NULL)
2530 inst->opcode = opcode;
2531
2532 /* Constrain the operands.
2533 After passing this, the encoding is guaranteed to succeed. */
2534 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2535 {
2536 DEBUG_TRACE ("FAIL since operand constraint not met");
2537 return 0;
2538 }
2539
2540 /* Get the base value.
2541 Note: this has to be before the aliasing handling below in order to
2542 get the base value from the alias opcode before we move on to the
2543 aliased opcode for encoding. */
2544 inst->value = opcode->opcode;
2545
2546 /* No need to do anything else if the opcode does not have any operand. */
2547 if (aarch64_num_of_operands (opcode) == 0)
2548 goto encoding_exit;
2549
2550 /* Assign operand indexes and check types. Also put the matched
2551 operand qualifiers in *QLF_SEQ to return. */
2552 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2553 {
2554 assert (opcode->operands[i] == inst->operands[i].type);
2555 inst->operands[i].idx = i;
2556 if (qlf_seq != NULL)
2557 *qlf_seq = inst->operands[i].qualifier;
2558 }
2559
2560 aliased = aarch64_find_real_opcode (opcode);
2561 /* If the opcode is an alias and it does not ask for direct encoding by
2562 itself, the instruction will be transformed to the form of real opcode
2563 and the encoding will be carried out using the rules for the aliased
2564 opcode. */
2565 if (aliased != NULL && (opcode->flags & F_CONV))
2566 {
2567 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2568 aliased->name, opcode->name);
2569 /* Convert the operands to the form of the real opcode. */
2570 convert_to_real (inst, aliased);
2571 opcode = aliased;
2572 }
2573
2574 aarch64_opnd_info *info = inst->operands;
2575
2576 /* Call the inserter of each operand. */
2577 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2578 {
2579 const aarch64_operand *opnd;
2580 enum aarch64_opnd type = opcode->operands[i];
2581 if (type == AARCH64_OPND_NIL)
2582 break;
2583 if (info->skip)
2584 {
2585 DEBUG_TRACE ("skip the incomplete operand %d", i);
2586 continue;
2587 }
2588 opnd = &aarch64_operands[type];
2589 if (operand_has_inserter (opnd)
2590 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2591 mismatch_detail))
2592 return false;
2593 }
2594
2595 /* Call opcode encoders indicated by flags. */
2596 if (opcode_has_special_coder (opcode))
2597 do_special_encoding (inst);
2598
2599 /* Possibly use the instruction class to encode the chosen qualifier
2600 variant. */
2601 aarch64_encode_variant_using_iclass (inst);
2602
2603 /* Run a verifier if the instruction has one set. */
2604 if (opcode->verifier)
2605 {
2606 enum err_type result = opcode->verifier (inst, *code, 0, true,
2607 mismatch_detail, insn_sequence);
2608 switch (result)
2609 {
2610 case ERR_UND:
2611 case ERR_UNP:
2612 case ERR_NYI:
2613 return false;
2614 default:
2615 break;
2616 }
2617 }
2618
2619 /* Always run constrain verifiers, this is needed because constrains need to
2620 maintain a global state. Regardless if the instruction has the flag set
2621 or not. */
2622 enum err_type result = verify_constraints (inst, *code, 0, true,
2623 mismatch_detail, insn_sequence);
2624 switch (result)
2625 {
2626 case ERR_UND:
2627 case ERR_UNP:
2628 case ERR_NYI:
2629 return false;
2630 default:
2631 break;
2632 }
2633
2634
2635 encoding_exit:
2636 DEBUG_TRACE ("exit with %s", opcode->name);
2637
2638 *code = inst->value;
2639
2640 return true;
2641 }
2642