xref: /netbsd-src/external/gpl3/gdb.old/dist/opcodes/aarch64-asm.c (revision f0fde9902fd4d72ded2807793acc7bfaa1ebf243)
1 /* aarch64-asm.c -- AArch64 assembler support.
2    Copyright (C) 2012-2019 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
26 
27 /* Utilities.  */
28 
29 /* The unnamed arguments consist of the number of fields and information about
30    these fields where the VALUE will be inserted into CODE.  MASK can be zero or
31    the base mask of the opcode.
32 
33    N.B. the fields are required to be in such an order than the least signficant
34    field for VALUE comes the first, e.g. the <index> in
35     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36    is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37    the order of M, L, H.  */
38 
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 {
42   uint32_t num;
43   const aarch64_field *field;
44   enum aarch64_field_kind kind;
45   va_list va;
46 
47   va_start (va, mask);
48   num = va_arg (va, uint32_t);
49   assert (num <= 5);
50   while (num--)
51     {
52       kind = va_arg (va, enum aarch64_field_kind);
53       field = &fields[kind];
54       insert_field (kind, code, value, mask);
55       value >>= field->width;
56     }
57   va_end (va);
58 }
59 
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61    The least significant bit goes in the final field.  */
62 
63 static void
64 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 		   aarch64_insn value)
66 {
67   unsigned int i;
68   enum aarch64_field_kind kind;
69 
70   for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71     if (self->fields[i] != FLD_NIL)
72       {
73 	kind = self->fields[i];
74 	insert_field (kind, code, value, 0);
75 	value >>= fields[kind].width;
76       }
77 }
78 
79 /* Operand inserters.  */
80 
81 /* Insert register number.  */
82 bfd_boolean
83 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
84 		   aarch64_insn *code,
85 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
86 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
87 {
88   insert_field (self->fields[0], code, info->reg.regno, 0);
89   return TRUE;
90 }
91 
92 /* Insert register number, index and/or other data for SIMD register element
93    operand, e.g. the last source operand in
94      SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
95 bfd_boolean
96 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
97 		     aarch64_insn *code, const aarch64_inst *inst,
98 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
99 {
100   /* regno */
101   insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
102   /* index and/or type */
103   if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
104     {
105       int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
106       if (info->type == AARCH64_OPND_En
107 	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
108 	{
109 	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
110 	  assert (info->idx == 1);	/* Vn */
111 	  aarch64_insn value = info->reglane.index << pos;
112 	  insert_field (FLD_imm4, code, value, 0);
113 	}
114       else
115 	{
116 	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
117 	     imm5<3:0>	<V>
118 	     0000	RESERVED
119 	     xxx1	B
120 	     xx10	H
121 	     x100	S
122 	     1000	D  */
123 	  aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
124 	  insert_field (FLD_imm5, code, value, 0);
125 	}
126     }
127   else if (inst->opcode->iclass == dotproduct)
128     {
129       unsigned reglane_index = info->reglane.index;
130       switch (info->qualifier)
131 	{
132 	case AARCH64_OPND_QLF_S_4B:
133 	  /* L:H */
134 	  assert (reglane_index < 4);
135 	  insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
136 	  break;
137 	default:
138 	  assert (0);
139 	}
140     }
141   else if (inst->opcode->iclass == cryptosm3)
142     {
143       /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>].  */
144       unsigned reglane_index = info->reglane.index;
145       assert (reglane_index < 4);
146       insert_field (FLD_SM3_imm2, code, reglane_index, 0);
147     }
148   else
149     {
150       /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
151          or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
152       unsigned reglane_index = info->reglane.index;
153 
154       if (inst->opcode->op == OP_FCMLA_ELEM)
155 	/* Complex operand takes two elements.  */
156 	reglane_index *= 2;
157 
158       switch (info->qualifier)
159 	{
160 	case AARCH64_OPND_QLF_S_H:
161 	  /* H:L:M */
162 	  assert (reglane_index < 8);
163 	  insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
164 	  break;
165 	case AARCH64_OPND_QLF_S_S:
166 	  /* H:L */
167 	  assert (reglane_index < 4);
168 	  insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
169 	  break;
170 	case AARCH64_OPND_QLF_S_D:
171 	  /* H */
172 	  assert (reglane_index < 2);
173 	  insert_field (FLD_H, code, reglane_index, 0);
174 	  break;
175 	default:
176 	  assert (0);
177 	}
178     }
179   return TRUE;
180 }
181 
182 /* Insert regno and len field of a register list operand, e.g. Vn in TBL.  */
183 bfd_boolean
184 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
185 		     aarch64_insn *code,
186 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
187 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
188 {
189   /* R */
190   insert_field (self->fields[0], code, info->reglist.first_regno, 0);
191   /* len */
192   insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
193   return TRUE;
194 }
195 
196 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
197    in AdvSIMD load/store instructions.  */
198 bfd_boolean
199 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
200 			  const aarch64_opnd_info *info, aarch64_insn *code,
201 			  const aarch64_inst *inst,
202 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
203 {
204   aarch64_insn value = 0;
205   /* Number of elements in each structure to be loaded/stored.  */
206   unsigned num = get_opcode_dependent_value (inst->opcode);
207 
208   /* Rt */
209   insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
210   /* opcode */
211   switch (num)
212     {
213     case 1:
214       switch (info->reglist.num_regs)
215 	{
216 	case 1: value = 0x7; break;
217 	case 2: value = 0xa; break;
218 	case 3: value = 0x6; break;
219 	case 4: value = 0x2; break;
220 	default: assert (0);
221 	}
222       break;
223     case 2:
224       value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
225       break;
226     case 3:
227       value = 0x4;
228       break;
229     case 4:
230       value = 0x0;
231       break;
232     default:
233       assert (0);
234     }
235   insert_field (FLD_opcode, code, value, 0);
236 
237   return TRUE;
238 }
239 
240 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
241    single structure to all lanes instructions.  */
242 bfd_boolean
243 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
244 			    const aarch64_opnd_info *info, aarch64_insn *code,
245 			    const aarch64_inst *inst,
246 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
247 {
248   aarch64_insn value;
249   /* The opcode dependent area stores the number of elements in
250      each structure to be loaded/stored.  */
251   int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
252 
253   /* Rt */
254   insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
255   /* S */
256   value = (aarch64_insn) 0;
257   if (is_ld1r && info->reglist.num_regs == 2)
258     /* OP_LD1R does not have alternating variant, but have "two consecutive"
259        instead.  */
260     value = (aarch64_insn) 1;
261   insert_field (FLD_S, code, value, 0);
262 
263   return TRUE;
264 }
265 
266 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
267    operand e.g. Vt in AdvSIMD load/store single element instructions.  */
268 bfd_boolean
269 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
270 			   const aarch64_opnd_info *info, aarch64_insn *code,
271 			   const aarch64_inst *inst ATTRIBUTE_UNUSED,
272 			   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
273 {
274   aarch64_field field = {0, 0};
275   aarch64_insn QSsize = 0;	/* fields Q:S:size.  */
276   aarch64_insn opcodeh2 = 0;	/* opcode<2:1> */
277 
278   assert (info->reglist.has_index);
279 
280   /* Rt */
281   insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
282   /* Encode the index, opcode<2:1> and size.  */
283   switch (info->qualifier)
284     {
285     case AARCH64_OPND_QLF_S_B:
286       /* Index encoded in "Q:S:size".  */
287       QSsize = info->reglist.index;
288       opcodeh2 = 0x0;
289       break;
290     case AARCH64_OPND_QLF_S_H:
291       /* Index encoded in "Q:S:size<1>".  */
292       QSsize = info->reglist.index << 1;
293       opcodeh2 = 0x1;
294       break;
295     case AARCH64_OPND_QLF_S_S:
296       /* Index encoded in "Q:S".  */
297       QSsize = info->reglist.index << 2;
298       opcodeh2 = 0x2;
299       break;
300     case AARCH64_OPND_QLF_S_D:
301       /* Index encoded in "Q".  */
302       QSsize = info->reglist.index << 3 | 0x1;
303       opcodeh2 = 0x2;
304       break;
305     default:
306       assert (0);
307     }
308   insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
309   gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
310   insert_field_2 (&field, code, opcodeh2, 0);
311 
312   return TRUE;
313 }
314 
315 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
316    SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
317    or SSHR <V><d>, <V><n>, #<shift>.  */
318 bfd_boolean
319 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
320 			       const aarch64_opnd_info *info,
321 			       aarch64_insn *code, const aarch64_inst *inst,
322 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
323 {
324   unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
325   aarch64_insn Q, imm;
326 
327   if (inst->opcode->iclass == asimdshf)
328     {
329       /* Q
330 	 immh	Q	<T>
331 	 0000	x	SEE AdvSIMD modified immediate
332 	 0001	0	8B
333 	 0001	1	16B
334 	 001x	0	4H
335 	 001x	1	8H
336 	 01xx	0	2S
337 	 01xx	1	4S
338 	 1xxx	0	RESERVED
339 	 1xxx	1	2D  */
340       Q = (val & 0x1) ? 1 : 0;
341       insert_field (FLD_Q, code, Q, inst->opcode->mask);
342       val >>= 1;
343     }
344 
345   assert (info->type == AARCH64_OPND_IMM_VLSR
346 	  || info->type == AARCH64_OPND_IMM_VLSL);
347 
348   if (info->type == AARCH64_OPND_IMM_VLSR)
349     /* immh:immb
350        immh	<shift>
351        0000	SEE AdvSIMD modified immediate
352        0001	(16-UInt(immh:immb))
353        001x	(32-UInt(immh:immb))
354        01xx	(64-UInt(immh:immb))
355        1xxx	(128-UInt(immh:immb))  */
356     imm = (16 << (unsigned)val) - info->imm.value;
357   else
358     /* immh:immb
359        immh	<shift>
360        0000	SEE AdvSIMD modified immediate
361        0001	(UInt(immh:immb)-8)
362        001x	(UInt(immh:immb)-16)
363        01xx	(UInt(immh:immb)-32)
364        1xxx	(UInt(immh:immb)-64)  */
365     imm = info->imm.value + (8 << (unsigned)val);
366   insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
367 
368   return TRUE;
369 }
370 
371 /* Insert fields for e.g. the immediate operands in
372    BFM <Wd>, <Wn>, #<immr>, #<imms>.  */
373 bfd_boolean
374 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
375 		 aarch64_insn *code,
376 		 const aarch64_inst *inst ATTRIBUTE_UNUSED,
377 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
378 {
379   int64_t imm;
380 
381   imm = info->imm.value;
382   if (operand_need_shift_by_two (self))
383     imm >>= 2;
384   if (operand_need_shift_by_four (self))
385     imm >>= 4;
386   insert_all_fields (self, code, imm);
387   return TRUE;
388 }
389 
390 /* Insert immediate and its shift amount for e.g. the last operand in
391      MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
392 bfd_boolean
393 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
394 		      aarch64_insn *code, const aarch64_inst *inst,
395 		      aarch64_operand_error *errors)
396 {
397   /* imm16 */
398   aarch64_ins_imm (self, info, code, inst, errors);
399   /* hw */
400   insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
401   return TRUE;
402 }
403 
404 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
405      MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
406 bfd_boolean
407 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
408 				  const aarch64_opnd_info *info,
409 				  aarch64_insn *code,
410 				  const aarch64_inst *inst ATTRIBUTE_UNUSED,
411 				  aarch64_operand_error *errors
412 					ATTRIBUTE_UNUSED)
413 {
414   enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
415   uint64_t imm = info->imm.value;
416   enum aarch64_modifier_kind kind = info->shifter.kind;
417   int amount = info->shifter.amount;
418   aarch64_field field = {0, 0};
419 
420   /* a:b:c:d:e:f:g:h */
421   if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
422     {
423       /* Either MOVI <Dd>, #<imm>
424 	 or     MOVI <Vd>.2D, #<imm>.
425 	 <imm> is a 64-bit immediate
426 	 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
427 	 encoded in "a:b:c:d:e:f:g:h".	*/
428       imm = aarch64_shrink_expanded_imm8 (imm);
429       assert ((int)imm >= 0);
430     }
431   insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
432 
433   if (kind == AARCH64_MOD_NONE)
434     return TRUE;
435 
436   /* shift amount partially in cmode */
437   assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
438   if (kind == AARCH64_MOD_LSL)
439     {
440       /* AARCH64_MOD_LSL: shift zeros.  */
441       int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
442       assert (esize == 4 || esize == 2 || esize == 1);
443       /* For 8-bit move immediate, the optional LSL #0 does not require
444 	 encoding.  */
445       if (esize == 1)
446 	return TRUE;
447       amount >>= 3;
448       if (esize == 4)
449 	gen_sub_field (FLD_cmode, 1, 2, &field);	/* per word */
450       else
451 	gen_sub_field (FLD_cmode, 1, 1, &field);	/* per halfword */
452     }
453   else
454     {
455       /* AARCH64_MOD_MSL: shift ones.  */
456       amount >>= 4;
457       gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
458     }
459   insert_field_2 (&field, code, amount, 0);
460 
461   return TRUE;
462 }
463 
464 /* Insert fields for an 8-bit floating-point immediate.  */
465 bfd_boolean
466 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
467 		   aarch64_insn *code,
468 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
469 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
470 {
471   insert_all_fields (self, code, info->imm.value);
472   return TRUE;
473 }
474 
475 /* Insert 1-bit rotation immediate (#90 or #270).  */
476 bfd_boolean
477 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
478 			 const aarch64_opnd_info *info,
479 			 aarch64_insn *code, const aarch64_inst *inst,
480 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
481 {
482   uint64_t rot = (info->imm.value - 90) / 180;
483   assert (rot < 2U);
484   insert_field (self->fields[0], code, rot, inst->opcode->mask);
485   return TRUE;
486 }
487 
488 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270).  */
489 bfd_boolean
490 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
491 			 const aarch64_opnd_info *info,
492 			 aarch64_insn *code, const aarch64_inst *inst,
493 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
494 {
495   uint64_t rot = info->imm.value / 90;
496   assert (rot < 4U);
497   insert_field (self->fields[0], code, rot, inst->opcode->mask);
498   return TRUE;
499 }
500 
501 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
502    e.g.  SCVTF <Dd>, <Wn>, #<fbits>.  */
503 bfd_boolean
504 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
505 		   aarch64_insn *code,
506 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
507 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
508 {
509   insert_field (self->fields[0], code, 64 - info->imm.value, 0);
510   return TRUE;
511 }
512 
513 /* Insert arithmetic immediate for e.g. the last operand in
514      SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
515 bfd_boolean
516 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
517 		  aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
518 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
519 {
520   /* shift */
521   aarch64_insn value = info->shifter.amount ? 1 : 0;
522   insert_field (self->fields[0], code, value, 0);
523   /* imm12 (unsigned) */
524   insert_field (self->fields[1], code, info->imm.value, 0);
525   return TRUE;
526 }
527 
528 /* Common routine shared by aarch64_ins{,_inv}_limm.  INVERT_P says whether
529    the operand should be inverted before encoding.  */
530 static bfd_boolean
531 aarch64_ins_limm_1 (const aarch64_operand *self,
532 		    const aarch64_opnd_info *info, aarch64_insn *code,
533 		    const aarch64_inst *inst, bfd_boolean invert_p,
534 		    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
535 {
536   aarch64_insn value;
537   uint64_t imm = info->imm.value;
538   int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
539 
540   if (invert_p)
541     imm = ~imm;
542   /* The constraint check should have guaranteed this wouldn't happen.  */
543   assert (aarch64_logical_immediate_p (imm, esize, &value));
544 
545   insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
546 		 self->fields[0]);
547   return TRUE;
548 }
549 
550 /* Insert logical/bitmask immediate for e.g. the last operand in
551      ORR <Wd|WSP>, <Wn>, #<imm>.  */
552 bfd_boolean
553 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
554 		  aarch64_insn *code, const aarch64_inst *inst,
555 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
556 {
557   return aarch64_ins_limm_1 (self, info, code, inst,
558 			     inst->opcode->op == OP_BIC, errors);
559 }
560 
561 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.).  */
562 bfd_boolean
563 aarch64_ins_inv_limm (const aarch64_operand *self,
564 		      const aarch64_opnd_info *info, aarch64_insn *code,
565 		      const aarch64_inst *inst,
566 		      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
567 {
568   return aarch64_ins_limm_1 (self, info, code, inst, TRUE, errors);
569 }
570 
571 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
572    or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
573 bfd_boolean
574 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
575 		aarch64_insn *code, const aarch64_inst *inst,
576 		aarch64_operand_error *errors)
577 {
578   aarch64_insn value = 0;
579 
580   assert (info->idx == 0);
581 
582   /* Rt */
583   aarch64_ins_regno (self, info, code, inst, errors);
584   if (inst->opcode->iclass == ldstpair_indexed
585       || inst->opcode->iclass == ldstnapair_offs
586       || inst->opcode->iclass == ldstpair_off
587       || inst->opcode->iclass == loadlit)
588     {
589       /* size */
590       switch (info->qualifier)
591 	{
592 	case AARCH64_OPND_QLF_S_S: value = 0; break;
593 	case AARCH64_OPND_QLF_S_D: value = 1; break;
594 	case AARCH64_OPND_QLF_S_Q: value = 2; break;
595 	default: assert (0);
596 	}
597       insert_field (FLD_ldst_size, code, value, 0);
598     }
599   else
600     {
601       /* opc[1]:size */
602       value = aarch64_get_qualifier_standard_value (info->qualifier);
603       insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
604     }
605 
606   return TRUE;
607 }
608 
609 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
610 bfd_boolean
611 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
612 			 const aarch64_opnd_info *info, aarch64_insn *code,
613 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
614 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
615 {
616   /* Rn */
617   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
618   return TRUE;
619 }
620 
621 /* Encode the address operand for e.g.
622      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
623 bfd_boolean
624 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 			 const aarch64_opnd_info *info, aarch64_insn *code,
626 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
627 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
628 {
629   aarch64_insn S;
630   enum aarch64_modifier_kind kind = info->shifter.kind;
631 
632   /* Rn */
633   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
634   /* Rm */
635   insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
636   /* option */
637   if (kind == AARCH64_MOD_LSL)
638     kind = AARCH64_MOD_UXTX;	/* Trick to enable the table-driven.  */
639   insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
640   /* S */
641   if (info->qualifier != AARCH64_OPND_QLF_S_B)
642     S = info->shifter.amount != 0;
643   else
644     /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
645        S	<amount>
646        0	[absent]
647        1	#0
648        Must be #0 if <extend> is explicitly LSL.  */
649     S = info->shifter.operator_present && info->shifter.amount_present;
650   insert_field (FLD_S, code, S, 0);
651 
652   return TRUE;
653 }
654 
655 /* Encode the address operand for e.g.
656      stlur <Xt>, [<Xn|SP>{, <amount>}].  */
657 bfd_boolean
658 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
659 			 const aarch64_opnd_info *info, aarch64_insn *code,
660 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
661 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
662 {
663   /* Rn */
664   insert_field (self->fields[0], code, info->addr.base_regno, 0);
665 
666   /* simm9 */
667   int imm = info->addr.offset.imm;
668   insert_field (self->fields[1], code, imm, 0);
669 
670   /* writeback */
671   if (info->addr.writeback)
672     {
673       assert (info->addr.preind == 1 && info->addr.postind == 0);
674       insert_field (self->fields[2], code, 1, 0);
675     }
676   return TRUE;
677 }
678 
679 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!.  */
680 bfd_boolean
681 aarch64_ins_addr_simm (const aarch64_operand *self,
682 		       const aarch64_opnd_info *info,
683 		       aarch64_insn *code,
684 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
685 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
686 {
687   int imm;
688 
689   /* Rn */
690   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
691   /* simm (imm9 or imm7) */
692   imm = info->addr.offset.imm;
693   if (self->fields[0] == FLD_imm7
694      || info->qualifier == AARCH64_OPND_QLF_imm_tag)
695     /* scaled immediate in ld/st pair instructions..  */
696     imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
697   insert_field (self->fields[0], code, imm, 0);
698   /* pre/post- index */
699   if (info->addr.writeback)
700     {
701       assert (inst->opcode->iclass != ldst_unscaled
702 	      && inst->opcode->iclass != ldstnapair_offs
703 	      && inst->opcode->iclass != ldstpair_off
704 	      && inst->opcode->iclass != ldst_unpriv);
705       assert (info->addr.preind != info->addr.postind);
706       if (info->addr.preind)
707 	insert_field (self->fields[1], code, 1, 0);
708     }
709 
710   return TRUE;
711 }
712 
713 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}].  */
714 bfd_boolean
715 aarch64_ins_addr_simm10 (const aarch64_operand *self,
716 			 const aarch64_opnd_info *info,
717 			 aarch64_insn *code,
718 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
719 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
720 {
721   int imm;
722 
723   /* Rn */
724   insert_field (self->fields[0], code, info->addr.base_regno, 0);
725   /* simm10 */
726   imm = info->addr.offset.imm >> 3;
727   insert_field (self->fields[1], code, imm >> 9, 0);
728   insert_field (self->fields[2], code, imm, 0);
729   /* writeback */
730   if (info->addr.writeback)
731     {
732       assert (info->addr.preind == 1 && info->addr.postind == 0);
733       insert_field (self->fields[3], code, 1, 0);
734     }
735   return TRUE;
736 }
737 
738 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}].  */
739 bfd_boolean
740 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
741 			 const aarch64_opnd_info *info,
742 			 aarch64_insn *code,
743 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
744 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
745 {
746   int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
747 
748   /* Rn */
749   insert_field (self->fields[0], code, info->addr.base_regno, 0);
750   /* uimm12 */
751   insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
752   return TRUE;
753 }
754 
755 /* Encode the address operand for e.g.
756      LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
757 bfd_boolean
758 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
759 			    const aarch64_opnd_info *info, aarch64_insn *code,
760 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
761 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
762 {
763   /* Rn */
764   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
765   /* Rm | #<amount>  */
766   if (info->addr.offset.is_reg)
767     insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
768   else
769     insert_field (FLD_Rm, code, 0x1f, 0);
770   return TRUE;
771 }
772 
773 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
774 bfd_boolean
775 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
776 		  const aarch64_opnd_info *info, aarch64_insn *code,
777 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
778 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
779 {
780   /* cond */
781   insert_field (FLD_cond, code, info->cond->value, 0);
782   return TRUE;
783 }
784 
785 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
786 bfd_boolean
787 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
788 		    const aarch64_opnd_info *info, aarch64_insn *code,
789 		    const aarch64_inst *inst,
790 		    aarch64_operand_error *detail ATTRIBUTE_UNUSED)
791 {
792    /* If a system instruction check if we have any restrictions on which
793       registers it can use.  */
794    if (inst->opcode->iclass == ic_system)
795      {
796         uint64_t opcode_flags
797 	  = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
798 	uint32_t sysreg_flags
799 	  = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
800 
801         /* Check to see if it's read-only, else check if it's write only.
802 	   if it's both or unspecified don't care.  */
803 	if (opcode_flags == F_SYS_READ
804 	    && sysreg_flags
805 	    && sysreg_flags != F_REG_READ)
806 	  {
807 		detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
808 		detail->error = _("specified register cannot be read from");
809 		detail->index = info->idx;
810 		detail->non_fatal = TRUE;
811 	  }
812 	else if (opcode_flags == F_SYS_WRITE
813 		 && sysreg_flags
814 		 && sysreg_flags != F_REG_WRITE)
815 	  {
816 		detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
817 		detail->error = _("specified register cannot be written to");
818 		detail->index = info->idx;
819 		detail->non_fatal = TRUE;
820 	  }
821      }
822   /* op0:op1:CRn:CRm:op2 */
823   insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
824 		 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
825   return TRUE;
826 }
827 
828 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
829 bfd_boolean
830 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
831 			 const aarch64_opnd_info *info, aarch64_insn *code,
832 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
833 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
834 {
835   /* op1:op2 */
836   insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
837 		 FLD_op2, FLD_op1);
838   return TRUE;
839 }
840 
841 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
842 bfd_boolean
843 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
844 		       const aarch64_opnd_info *info, aarch64_insn *code,
845 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
846 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
847 {
848   /* op1:CRn:CRm:op2 */
849   insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
850 		 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
851   return TRUE;
852 }
853 
854 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
855 
856 bfd_boolean
857 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
858 		     const aarch64_opnd_info *info, aarch64_insn *code,
859 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
860 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
861 {
862   /* CRm */
863   insert_field (FLD_CRm, code, info->barrier->value, 0);
864   return TRUE;
865 }
866 
867 /* Encode the prefetch operation option operand for e.g.
868      PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
869 
870 bfd_boolean
871 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
872 		   const aarch64_opnd_info *info, aarch64_insn *code,
873 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
874 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
875 {
876   /* prfop in Rt */
877   insert_field (FLD_Rt, code, info->prfop->value, 0);
878   return TRUE;
879 }
880 
881 /* Encode the hint number for instructions that alias HINT but take an
882    operand.  */
883 
884 bfd_boolean
885 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
886 		  const aarch64_opnd_info *info, aarch64_insn *code,
887 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
888 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
889 {
890   /* CRm:op2.  */
891   insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
892   return TRUE;
893 }
894 
895 /* Encode the extended register operand for e.g.
896      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
897 bfd_boolean
898 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
899 			  const aarch64_opnd_info *info, aarch64_insn *code,
900 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
901 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
902 {
903   enum aarch64_modifier_kind kind;
904 
905   /* Rm */
906   insert_field (FLD_Rm, code, info->reg.regno, 0);
907   /* option */
908   kind = info->shifter.kind;
909   if (kind == AARCH64_MOD_LSL)
910     kind = info->qualifier == AARCH64_OPND_QLF_W
911       ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
912   insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
913   /* imm3 */
914   insert_field (FLD_imm3, code, info->shifter.amount, 0);
915 
916   return TRUE;
917 }
918 
919 /* Encode the shifted register operand for e.g.
920      SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
921 bfd_boolean
922 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
923 			 const aarch64_opnd_info *info, aarch64_insn *code,
924 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
925 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
926 {
927   /* Rm */
928   insert_field (FLD_Rm, code, info->reg.regno, 0);
929   /* shift */
930   insert_field (FLD_shift, code,
931 		aarch64_get_operand_modifier_value (info->shifter.kind), 0);
932   /* imm6 */
933   insert_field (FLD_imm6, code, info->shifter.amount, 0);
934 
935   return TRUE;
936 }
937 
938 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
939    where <simm4> is a 4-bit signed value and where <factor> is 1 plus
940    SELF's operand-dependent value.  fields[0] specifies the field that
941    holds <base>.  <simm4> is encoded in the SVE_imm4 field.  */
942 bfd_boolean
943 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
944 			       const aarch64_opnd_info *info,
945 			       aarch64_insn *code,
946 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
947 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
948 {
949   int factor = 1 + get_operand_specific_data (self);
950   insert_field (self->fields[0], code, info->addr.base_regno, 0);
951   insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
952   return TRUE;
953 }
954 
955 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
956    where <simm6> is a 6-bit signed value and where <factor> is 1 plus
957    SELF's operand-dependent value.  fields[0] specifies the field that
958    holds <base>.  <simm6> is encoded in the SVE_imm6 field.  */
959 bfd_boolean
960 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
961 			       const aarch64_opnd_info *info,
962 			       aarch64_insn *code,
963 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
964 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
965 {
966   int factor = 1 + get_operand_specific_data (self);
967   insert_field (self->fields[0], code, info->addr.base_regno, 0);
968   insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
969   return TRUE;
970 }
971 
972 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
973    where <simm9> is a 9-bit signed value and where <factor> is 1 plus
974    SELF's operand-dependent value.  fields[0] specifies the field that
975    holds <base>.  <simm9> is encoded in the concatenation of the SVE_imm6
976    and imm3 fields, with imm3 being the less-significant part.  */
977 bfd_boolean
978 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
979 			       const aarch64_opnd_info *info,
980 			       aarch64_insn *code,
981 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
982 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
983 {
984   int factor = 1 + get_operand_specific_data (self);
985   insert_field (self->fields[0], code, info->addr.base_regno, 0);
986   insert_fields (code, info->addr.offset.imm / factor, 0,
987 		 2, FLD_imm3, FLD_SVE_imm6);
988   return TRUE;
989 }
990 
991 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
992    is a 4-bit signed number and where <shift> is SELF's operand-dependent
993    value.  fields[0] specifies the base register field.  */
994 bfd_boolean
995 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
996 			    const aarch64_opnd_info *info, aarch64_insn *code,
997 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
998 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
999 {
1000   int factor = 1 << get_operand_specific_data (self);
1001   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1002   insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1003   return TRUE;
1004 }
1005 
1006 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1007    is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1008    value.  fields[0] specifies the base register field.  */
1009 bfd_boolean
1010 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1011 			    const aarch64_opnd_info *info, aarch64_insn *code,
1012 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1013 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1014 {
1015   int factor = 1 << get_operand_specific_data (self);
1016   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1017   insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1018   return TRUE;
1019 }
1020 
1021 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1022    is SELF's operand-dependent value.  fields[0] specifies the base
1023    register field and fields[1] specifies the offset register field.  */
1024 bfd_boolean
1025 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1026 			     const aarch64_opnd_info *info, aarch64_insn *code,
1027 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1028 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1029 {
1030   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1031   insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1032   return TRUE;
1033 }
1034 
1035 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1036    <shift> is SELF's operand-dependent value.  fields[0] specifies the
1037    base register field, fields[1] specifies the offset register field and
1038    fields[2] is a single-bit field that selects SXTW over UXTW.  */
1039 bfd_boolean
1040 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1041 			     const aarch64_opnd_info *info, aarch64_insn *code,
1042 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1043 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1044 {
1045   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1046   insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1047   if (info->shifter.kind == AARCH64_MOD_UXTW)
1048     insert_field (self->fields[2], code, 0, 0);
1049   else
1050     insert_field (self->fields[2], code, 1, 0);
1051   return TRUE;
1052 }
1053 
1054 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1055    5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1056    fields[0] specifies the base register field.  */
1057 bfd_boolean
1058 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1059 			    const aarch64_opnd_info *info, aarch64_insn *code,
1060 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1061 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1062 {
1063   int factor = 1 << get_operand_specific_data (self);
1064   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1065   insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1066   return TRUE;
1067 }
1068 
1069 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1070    where <modifier> is fixed by the instruction and where <msz> is a
1071    2-bit unsigned number.  fields[0] specifies the base register field
1072    and fields[1] specifies the offset register field.  */
1073 static bfd_boolean
1074 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1075 			 const aarch64_opnd_info *info, aarch64_insn *code,
1076 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1077 {
1078   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1079   insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1080   insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1081   return TRUE;
1082 }
1083 
1084 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1085    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1086    field and fields[1] specifies the offset register field.  */
1087 bfd_boolean
1088 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1089 			     const aarch64_opnd_info *info, aarch64_insn *code,
1090 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1091 			     aarch64_operand_error *errors)
1092 {
1093   return aarch64_ext_sve_addr_zz (self, info, code, errors);
1094 }
1095 
1096 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1097    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1098    field and fields[1] specifies the offset register field.  */
1099 bfd_boolean
1100 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1101 			      const aarch64_opnd_info *info,
1102 			      aarch64_insn *code,
1103 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1104 			      aarch64_operand_error *errors)
1105 {
1106   return aarch64_ext_sve_addr_zz (self, info, code, errors);
1107 }
1108 
1109 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1110    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1111    field and fields[1] specifies the offset register field.  */
1112 bfd_boolean
1113 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1114 			      const aarch64_opnd_info *info,
1115 			      aarch64_insn *code,
1116 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1117 			      aarch64_operand_error *errors)
1118 {
1119   return aarch64_ext_sve_addr_zz (self, info, code, errors);
1120 }
1121 
1122 /* Encode an SVE ADD/SUB immediate.  */
1123 bfd_boolean
1124 aarch64_ins_sve_aimm (const aarch64_operand *self,
1125 		      const aarch64_opnd_info *info, aarch64_insn *code,
1126 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1127 		      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1128 {
1129   if (info->shifter.amount == 8)
1130     insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1131   else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1132     insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1133   else
1134     insert_all_fields (self, code, info->imm.value & 0xff);
1135   return TRUE;
1136 }
1137 
1138 /* Encode an SVE CPY/DUP immediate.  */
1139 bfd_boolean
1140 aarch64_ins_sve_asimm (const aarch64_operand *self,
1141 		       const aarch64_opnd_info *info, aarch64_insn *code,
1142 		       const aarch64_inst *inst,
1143 		       aarch64_operand_error *errors)
1144 {
1145   return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1146 }
1147 
1148 /* Encode Zn[MM], where MM has a 7-bit triangular encoding.  The fields
1149    array specifies which field to use for Zn.  MM is encoded in the
1150    concatenation of imm5 and SVE_tszh, with imm5 being the less
1151    significant part.  */
1152 bfd_boolean
1153 aarch64_ins_sve_index (const aarch64_operand *self,
1154 		       const aarch64_opnd_info *info, aarch64_insn *code,
1155 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1156 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1157 {
1158   unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1159   insert_field (self->fields[0], code, info->reglane.regno, 0);
1160   insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1161 		 2, FLD_imm5, FLD_SVE_tszh);
1162   return TRUE;
1163 }
1164 
1165 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM.  */
1166 bfd_boolean
1167 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1168 			  const aarch64_opnd_info *info, aarch64_insn *code,
1169 			  const aarch64_inst *inst,
1170 			  aarch64_operand_error *errors)
1171 {
1172   return aarch64_ins_limm (self, info, code, inst, errors);
1173 }
1174 
1175 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1176    and where MM occupies the most-significant part.  The operand-dependent
1177    value specifies the number of bits in Zn.  */
1178 bfd_boolean
1179 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1180 			    const aarch64_opnd_info *info, aarch64_insn *code,
1181 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1182 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1183 {
1184   unsigned int reg_bits = get_operand_specific_data (self);
1185   assert (info->reglane.regno < (1U << reg_bits));
1186   unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1187   insert_all_fields (self, code, val);
1188   return TRUE;
1189 }
1190 
1191 /* Encode {Zn.<T> - Zm.<T>}.  The fields array specifies which field
1192    to use for Zn.  */
1193 bfd_boolean
1194 aarch64_ins_sve_reglist (const aarch64_operand *self,
1195 			 const aarch64_opnd_info *info, aarch64_insn *code,
1196 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1197 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1198 {
1199   insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1200   return TRUE;
1201 }
1202 
1203 /* Encode <pattern>{, MUL #<amount>}.  The fields array specifies which
1204    fields to use for <pattern>.  <amount> - 1 is encoded in the SVE_imm4
1205    field.  */
1206 bfd_boolean
1207 aarch64_ins_sve_scale (const aarch64_operand *self,
1208 		       const aarch64_opnd_info *info, aarch64_insn *code,
1209 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1210 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1211 {
1212   insert_all_fields (self, code, info->imm.value);
1213   insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1214   return TRUE;
1215 }
1216 
1217 /* Encode an SVE shift left immediate.  */
1218 bfd_boolean
1219 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1220 			const aarch64_opnd_info *info, aarch64_insn *code,
1221 			const aarch64_inst *inst,
1222 			aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1223 {
1224   const aarch64_opnd_info *prev_operand;
1225   unsigned int esize;
1226 
1227   assert (info->idx > 0);
1228   prev_operand = &inst->operands[info->idx - 1];
1229   esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1230   insert_all_fields (self, code, 8 * esize + info->imm.value);
1231   return TRUE;
1232 }
1233 
1234 /* Encode an SVE shift right immediate.  */
1235 bfd_boolean
1236 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1237 			const aarch64_opnd_info *info, aarch64_insn *code,
1238 			const aarch64_inst *inst,
1239 			aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1240 {
1241   const aarch64_opnd_info *prev_operand;
1242   unsigned int esize;
1243 
1244   assert (info->idx > 0);
1245   prev_operand = &inst->operands[info->idx - 1];
1246   esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1247   insert_all_fields (self, code, 16 * esize - info->imm.value);
1248   return TRUE;
1249 }
1250 
1251 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1252    The fields array specifies which field to use.  */
1253 bfd_boolean
1254 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1255 				const aarch64_opnd_info *info,
1256 				aarch64_insn *code,
1257 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1258 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1259 {
1260   if (info->imm.value == 0x3f000000)
1261     insert_field (self->fields[0], code, 0, 0);
1262   else
1263     insert_field (self->fields[0], code, 1, 0);
1264   return TRUE;
1265 }
1266 
1267 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1268    The fields array specifies which field to use.  */
1269 bfd_boolean
1270 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1271 				const aarch64_opnd_info *info,
1272 				aarch64_insn *code,
1273 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1274 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1275 {
1276   if (info->imm.value == 0x3f000000)
1277     insert_field (self->fields[0], code, 0, 0);
1278   else
1279     insert_field (self->fields[0], code, 1, 0);
1280   return TRUE;
1281 }
1282 
1283 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1284    The fields array specifies which field to use.  */
1285 bfd_boolean
1286 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1287 				const aarch64_opnd_info *info,
1288 				aarch64_insn *code,
1289 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1290 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1291 {
1292   if (info->imm.value == 0)
1293     insert_field (self->fields[0], code, 0, 0);
1294   else
1295     insert_field (self->fields[0], code, 1, 0);
1296   return TRUE;
1297 }
1298 
1299 /* Miscellaneous encoding functions.  */
1300 
1301 /* Encode size[0], i.e. bit 22, for
1302      e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1303 
1304 static void
1305 encode_asimd_fcvt (aarch64_inst *inst)
1306 {
1307   aarch64_insn value;
1308   aarch64_field field = {0, 0};
1309   enum aarch64_opnd_qualifier qualifier;
1310 
1311   switch (inst->opcode->op)
1312     {
1313     case OP_FCVTN:
1314     case OP_FCVTN2:
1315       /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1316       qualifier = inst->operands[1].qualifier;
1317       break;
1318     case OP_FCVTL:
1319     case OP_FCVTL2:
1320       /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
1321       qualifier = inst->operands[0].qualifier;
1322       break;
1323     default:
1324       assert (0);
1325     }
1326   assert (qualifier == AARCH64_OPND_QLF_V_4S
1327 	  || qualifier == AARCH64_OPND_QLF_V_2D);
1328   value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1329   gen_sub_field (FLD_size, 0, 1, &field);
1330   insert_field_2 (&field, &inst->value, value, 0);
1331 }
1332 
1333 /* Encode size[0], i.e. bit 22, for
1334      e.g. FCVTXN <Vb><d>, <Va><n>.  */
1335 
1336 static void
1337 encode_asisd_fcvtxn (aarch64_inst *inst)
1338 {
1339   aarch64_insn val = 1;
1340   aarch64_field field = {0, 0};
1341   assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1342   gen_sub_field (FLD_size, 0, 1, &field);
1343   insert_field_2 (&field, &inst->value, val, 0);
1344 }
1345 
1346 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
1347 static void
1348 encode_fcvt (aarch64_inst *inst)
1349 {
1350   aarch64_insn val;
1351   const aarch64_field field = {15, 2};
1352 
1353   /* opc dstsize */
1354   switch (inst->operands[0].qualifier)
1355     {
1356     case AARCH64_OPND_QLF_S_S: val = 0; break;
1357     case AARCH64_OPND_QLF_S_D: val = 1; break;
1358     case AARCH64_OPND_QLF_S_H: val = 3; break;
1359     default: abort ();
1360     }
1361   insert_field_2 (&field, &inst->value, val, 0);
1362 
1363   return;
1364 }
1365 
1366 /* Return the index in qualifiers_list that INST is using.  Should only
1367    be called once the qualifiers are known to be valid.  */
1368 
1369 static int
1370 aarch64_get_variant (struct aarch64_inst *inst)
1371 {
1372   int i, nops, variant;
1373 
1374   nops = aarch64_num_of_operands (inst->opcode);
1375   for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1376     {
1377       for (i = 0; i < nops; ++i)
1378 	if (inst->opcode->qualifiers_list[variant][i]
1379 	    != inst->operands[i].qualifier)
1380 	  break;
1381       if (i == nops)
1382 	return variant;
1383     }
1384   abort ();
1385 }
1386 
1387 /* Do miscellaneous encodings that are not common enough to be driven by
1388    flags.  */
1389 
1390 static void
1391 do_misc_encoding (aarch64_inst *inst)
1392 {
1393   unsigned int value;
1394 
1395   switch (inst->opcode->op)
1396     {
1397     case OP_FCVT:
1398       encode_fcvt (inst);
1399       break;
1400     case OP_FCVTN:
1401     case OP_FCVTN2:
1402     case OP_FCVTL:
1403     case OP_FCVTL2:
1404       encode_asimd_fcvt (inst);
1405       break;
1406     case OP_FCVTXN_S:
1407       encode_asisd_fcvtxn (inst);
1408       break;
1409     case OP_MOV_P_P:
1410     case OP_MOVS_P_P:
1411       /* Copy Pn to Pm and Pg.  */
1412       value = extract_field (FLD_SVE_Pn, inst->value, 0);
1413       insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1414       insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1415       break;
1416     case OP_MOV_Z_P_Z:
1417       /* Copy Zd to Zm.  */
1418       value = extract_field (FLD_SVE_Zd, inst->value, 0);
1419       insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1420       break;
1421     case OP_MOV_Z_V:
1422       /* Fill in the zero immediate.  */
1423       insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1424 		     2, FLD_imm5, FLD_SVE_tszh);
1425       break;
1426     case OP_MOV_Z_Z:
1427       /* Copy Zn to Zm.  */
1428       value = extract_field (FLD_SVE_Zn, inst->value, 0);
1429       insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1430       break;
1431     case OP_MOV_Z_Zi:
1432       break;
1433     case OP_MOVM_P_P_P:
1434       /* Copy Pd to Pm.  */
1435       value = extract_field (FLD_SVE_Pd, inst->value, 0);
1436       insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1437       break;
1438     case OP_MOVZS_P_P_P:
1439     case OP_MOVZ_P_P_P:
1440       /* Copy Pn to Pm.  */
1441       value = extract_field (FLD_SVE_Pn, inst->value, 0);
1442       insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1443       break;
1444     case OP_NOTS_P_P_P_Z:
1445     case OP_NOT_P_P_P_Z:
1446       /* Copy Pg to Pm.  */
1447       value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1448       insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1449       break;
1450     default: break;
1451     }
1452 }
1453 
1454 /* Encode the 'size' and 'Q' field for e.g. SHADD.  */
1455 static void
1456 encode_sizeq (aarch64_inst *inst)
1457 {
1458   aarch64_insn sizeq;
1459   enum aarch64_field_kind kind;
1460   int idx;
1461 
1462   /* Get the index of the operand whose information we are going to use
1463      to encode the size and Q fields.
1464      This is deduced from the possible valid qualifier lists.  */
1465   idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1466   DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1467 	       aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1468   sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1469   /* Q */
1470   insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1471   /* size */
1472   if (inst->opcode->iclass == asisdlse
1473      || inst->opcode->iclass == asisdlsep
1474      || inst->opcode->iclass == asisdlso
1475      || inst->opcode->iclass == asisdlsop)
1476     kind = FLD_vldst_size;
1477   else
1478     kind = FLD_size;
1479   insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1480 }
1481 
1482 /* Opcodes that have fields shared by multiple operands are usually flagged
1483    with flags.  In this function, we detect such flags and use the
1484    information in one of the related operands to do the encoding.  The 'one'
1485    operand is not any operand but one of the operands that has the enough
1486    information for such an encoding.  */
1487 
1488 static void
1489 do_special_encoding (struct aarch64_inst *inst)
1490 {
1491   int idx;
1492   aarch64_insn value = 0;
1493 
1494   DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1495 
1496   /* Condition for truly conditional executed instructions, e.g. b.cond.  */
1497   if (inst->opcode->flags & F_COND)
1498     {
1499       insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1500     }
1501   if (inst->opcode->flags & F_SF)
1502     {
1503       idx = select_operand_for_sf_field_coding (inst->opcode);
1504       value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1505 	       || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1506 	? 1 : 0;
1507       insert_field (FLD_sf, &inst->value, value, 0);
1508       if (inst->opcode->flags & F_N)
1509 	insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1510     }
1511   if (inst->opcode->flags & F_LSE_SZ)
1512     {
1513       idx = select_operand_for_sf_field_coding (inst->opcode);
1514       value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1515 	       || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1516 	? 1 : 0;
1517       insert_field (FLD_lse_sz, &inst->value, value, 0);
1518     }
1519   if (inst->opcode->flags & F_SIZEQ)
1520     encode_sizeq (inst);
1521   if (inst->opcode->flags & F_FPTYPE)
1522     {
1523       idx = select_operand_for_fptype_field_coding (inst->opcode);
1524       switch (inst->operands[idx].qualifier)
1525 	{
1526 	case AARCH64_OPND_QLF_S_S: value = 0; break;
1527 	case AARCH64_OPND_QLF_S_D: value = 1; break;
1528 	case AARCH64_OPND_QLF_S_H: value = 3; break;
1529 	default: assert (0);
1530 	}
1531       insert_field (FLD_type, &inst->value, value, 0);
1532     }
1533   if (inst->opcode->flags & F_SSIZE)
1534     {
1535       enum aarch64_opnd_qualifier qualifier;
1536       idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1537       qualifier = inst->operands[idx].qualifier;
1538       assert (qualifier >= AARCH64_OPND_QLF_S_B
1539 	      && qualifier <= AARCH64_OPND_QLF_S_Q);
1540       value = aarch64_get_qualifier_standard_value (qualifier);
1541       insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1542     }
1543   if (inst->opcode->flags & F_T)
1544     {
1545       int num;	/* num of consecutive '0's on the right side of imm5<3:0>.  */
1546       aarch64_field field = {0, 0};
1547       enum aarch64_opnd_qualifier qualifier;
1548 
1549       idx = 0;
1550       qualifier = inst->operands[idx].qualifier;
1551       assert (aarch64_get_operand_class (inst->opcode->operands[0])
1552 	      == AARCH64_OPND_CLASS_SIMD_REG
1553 	      && qualifier >= AARCH64_OPND_QLF_V_8B
1554 	      && qualifier <= AARCH64_OPND_QLF_V_2D);
1555       /* imm5<3:0>	q	<t>
1556 	 0000		x	reserved
1557 	 xxx1		0	8b
1558 	 xxx1		1	16b
1559 	 xx10		0	4h
1560 	 xx10		1	8h
1561 	 x100		0	2s
1562 	 x100		1	4s
1563 	 1000		0	reserved
1564 	 1000		1	2d  */
1565       value = aarch64_get_qualifier_standard_value (qualifier);
1566       insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1567       num = (int) value >> 1;
1568       assert (num >= 0 && num <= 3);
1569       gen_sub_field (FLD_imm5, 0, num + 1, &field);
1570       insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1571     }
1572   if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1573     {
1574       /* Use Rt to encode in the case of e.g.
1575 	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
1576       enum aarch64_opnd_qualifier qualifier;
1577       idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1578       if (idx == -1)
1579 	/* Otherwise use the result operand, which has to be a integer
1580 	   register.  */
1581 	idx = 0;
1582       assert (idx == 0 || idx == 1);
1583       assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1584 	      == AARCH64_OPND_CLASS_INT_REG);
1585       qualifier = inst->operands[idx].qualifier;
1586       insert_field (FLD_Q, &inst->value,
1587 		    aarch64_get_qualifier_standard_value (qualifier), 0);
1588     }
1589   if (inst->opcode->flags & F_LDS_SIZE)
1590     {
1591       /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1592       enum aarch64_opnd_qualifier qualifier;
1593       aarch64_field field = {0, 0};
1594       assert (aarch64_get_operand_class (inst->opcode->operands[0])
1595 	      == AARCH64_OPND_CLASS_INT_REG);
1596       gen_sub_field (FLD_opc, 0, 1, &field);
1597       qualifier = inst->operands[0].qualifier;
1598       insert_field_2 (&field, &inst->value,
1599 		      1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1600     }
1601   /* Miscellaneous encoding as the last step.  */
1602   if (inst->opcode->flags & F_MISC)
1603     do_misc_encoding (inst);
1604 
1605   DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1606 }
1607 
1608 /* Some instructions (including all SVE ones) use the instruction class
1609    to describe how a qualifiers_list index is represented in the instruction
1610    encoding.  If INST is such an instruction, encode the chosen qualifier
1611    variant.  */
1612 
1613 static void
1614 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1615 {
1616   switch (inst->opcode->iclass)
1617     {
1618     case sve_cpy:
1619       insert_fields (&inst->value, aarch64_get_variant (inst),
1620 		     0, 2, FLD_SVE_M_14, FLD_size);
1621       break;
1622 
1623     case sve_index:
1624     case sve_shift_pred:
1625     case sve_shift_unpred:
1626       /* For indices and shift amounts, the variant is encoded as
1627 	 part of the immediate.  */
1628       break;
1629 
1630     case sve_limm:
1631       /* For sve_limm, the .B, .H, and .S forms are just a convenience
1632 	 and depend on the immediate.  They don't have a separate
1633 	 encoding.  */
1634       break;
1635 
1636     case sve_misc:
1637       /* sve_misc instructions have only a single variant.  */
1638       break;
1639 
1640     case sve_movprfx:
1641       insert_fields (&inst->value, aarch64_get_variant (inst),
1642 		     0, 2, FLD_SVE_M_16, FLD_size);
1643       break;
1644 
1645     case sve_pred_zm:
1646       insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1647       break;
1648 
1649     case sve_size_bhs:
1650     case sve_size_bhsd:
1651       insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1652       break;
1653 
1654     case sve_size_hsd:
1655       insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1656       break;
1657 
1658     case sve_size_sd:
1659       insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1660       break;
1661 
1662     default:
1663       break;
1664     }
1665 }
1666 
1667 /* Converters converting an alias opcode instruction to its real form.  */
1668 
1669 /* ROR <Wd>, <Ws>, #<shift>
1670      is equivalent to:
1671    EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
1672 static void
1673 convert_ror_to_extr (aarch64_inst *inst)
1674 {
1675   copy_operand_info (inst, 3, 2);
1676   copy_operand_info (inst, 2, 1);
1677 }
1678 
1679 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1680      is equivalent to:
1681    USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
1682 static void
1683 convert_xtl_to_shll (aarch64_inst *inst)
1684 {
1685   inst->operands[2].qualifier = inst->operands[1].qualifier;
1686   inst->operands[2].imm.value = 0;
1687 }
1688 
1689 /* Convert
1690      LSR <Xd>, <Xn>, #<shift>
1691    to
1692      UBFM <Xd>, <Xn>, #<shift>, #63.  */
1693 static void
1694 convert_sr_to_bfm (aarch64_inst *inst)
1695 {
1696   inst->operands[3].imm.value =
1697     inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1698 }
1699 
1700 /* Convert MOV to ORR.  */
1701 static void
1702 convert_mov_to_orr (aarch64_inst *inst)
1703 {
1704   /* MOV <Vd>.<T>, <Vn>.<T>
1705      is equivalent to:
1706      ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
1707   copy_operand_info (inst, 2, 1);
1708 }
1709 
1710 /* When <imms> >= <immr>, the instruction written:
1711      SBFX <Xd>, <Xn>, #<lsb>, #<width>
1712    is equivalent to:
1713      SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
1714 
1715 static void
1716 convert_bfx_to_bfm (aarch64_inst *inst)
1717 {
1718   int64_t lsb, width;
1719 
1720   /* Convert the operand.  */
1721   lsb = inst->operands[2].imm.value;
1722   width = inst->operands[3].imm.value;
1723   inst->operands[2].imm.value = lsb;
1724   inst->operands[3].imm.value = lsb + width - 1;
1725 }
1726 
1727 /* When <imms> < <immr>, the instruction written:
1728      SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1729    is equivalent to:
1730      SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
1731 
1732 static void
1733 convert_bfi_to_bfm (aarch64_inst *inst)
1734 {
1735   int64_t lsb, width;
1736 
1737   /* Convert the operand.  */
1738   lsb = inst->operands[2].imm.value;
1739   width = inst->operands[3].imm.value;
1740   if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1741     {
1742       inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1743       inst->operands[3].imm.value = width - 1;
1744     }
1745   else
1746     {
1747       inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1748       inst->operands[3].imm.value = width - 1;
1749     }
1750 }
1751 
1752 /* The instruction written:
1753      BFC <Xd>, #<lsb>, #<width>
1754    is equivalent to:
1755      BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1).  */
1756 
1757 static void
1758 convert_bfc_to_bfm (aarch64_inst *inst)
1759 {
1760   int64_t lsb, width;
1761 
1762   /* Insert XZR.  */
1763   copy_operand_info (inst, 3, 2);
1764   copy_operand_info (inst, 2, 1);
1765   copy_operand_info (inst, 1, 0);
1766   inst->operands[1].reg.regno = 0x1f;
1767 
1768   /* Convert the immediate operand.  */
1769   lsb = inst->operands[2].imm.value;
1770   width = inst->operands[3].imm.value;
1771   if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1772     {
1773       inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1774       inst->operands[3].imm.value = width - 1;
1775     }
1776   else
1777     {
1778       inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1779       inst->operands[3].imm.value = width - 1;
1780     }
1781 }
1782 
1783 /* The instruction written:
1784      LSL <Xd>, <Xn>, #<shift>
1785    is equivalent to:
1786      UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
1787 
1788 static void
1789 convert_lsl_to_ubfm (aarch64_inst *inst)
1790 {
1791   int64_t shift = inst->operands[2].imm.value;
1792 
1793   if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1794     {
1795       inst->operands[2].imm.value = (32 - shift) & 0x1f;
1796       inst->operands[3].imm.value = 31 - shift;
1797     }
1798   else
1799     {
1800       inst->operands[2].imm.value = (64 - shift) & 0x3f;
1801       inst->operands[3].imm.value = 63 - shift;
1802     }
1803 }
1804 
1805 /* CINC <Wd>, <Wn>, <cond>
1806      is equivalent to:
1807    CSINC <Wd>, <Wn>, <Wn>, invert(<cond>).  */
1808 
1809 static void
1810 convert_to_csel (aarch64_inst *inst)
1811 {
1812   copy_operand_info (inst, 3, 2);
1813   copy_operand_info (inst, 2, 1);
1814   inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1815 }
1816 
1817 /* CSET <Wd>, <cond>
1818      is equivalent to:
1819    CSINC <Wd>, WZR, WZR, invert(<cond>).  */
1820 
1821 static void
1822 convert_cset_to_csinc (aarch64_inst *inst)
1823 {
1824   copy_operand_info (inst, 3, 1);
1825   copy_operand_info (inst, 2, 0);
1826   copy_operand_info (inst, 1, 0);
1827   inst->operands[1].reg.regno = 0x1f;
1828   inst->operands[2].reg.regno = 0x1f;
1829   inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1830 }
1831 
1832 /* MOV <Wd>, #<imm>
1833    is equivalent to:
1834    MOVZ <Wd>, #<imm16>, LSL #<shift>.  */
1835 
1836 static void
1837 convert_mov_to_movewide (aarch64_inst *inst)
1838 {
1839   int is32;
1840   uint32_t shift_amount;
1841   uint64_t value;
1842 
1843   switch (inst->opcode->op)
1844     {
1845     case OP_MOV_IMM_WIDE:
1846       value = inst->operands[1].imm.value;
1847       break;
1848     case OP_MOV_IMM_WIDEN:
1849       value = ~inst->operands[1].imm.value;
1850       break;
1851     default:
1852       assert (0);
1853     }
1854   inst->operands[1].type = AARCH64_OPND_HALF;
1855   is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1856   if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1857     /* The constraint check should have guaranteed this wouldn't happen.  */
1858     assert (0);
1859   value >>= shift_amount;
1860   value &= 0xffff;
1861   inst->operands[1].imm.value = value;
1862   inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1863   inst->operands[1].shifter.amount = shift_amount;
1864 }
1865 
1866 /* MOV <Wd>, #<imm>
1867      is equivalent to:
1868    ORR <Wd>, WZR, #<imm>.  */
1869 
1870 static void
1871 convert_mov_to_movebitmask (aarch64_inst *inst)
1872 {
1873   copy_operand_info (inst, 2, 1);
1874   inst->operands[1].reg.regno = 0x1f;
1875   inst->operands[1].skip = 0;
1876 }
1877 
1878 /* Some alias opcodes are assembled by being converted to their real-form.  */
1879 
1880 static void
1881 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1882 {
1883   const aarch64_opcode *alias = inst->opcode;
1884 
1885   if ((alias->flags & F_CONV) == 0)
1886     goto convert_to_real_return;
1887 
1888   switch (alias->op)
1889     {
1890     case OP_ASR_IMM:
1891     case OP_LSR_IMM:
1892       convert_sr_to_bfm (inst);
1893       break;
1894     case OP_LSL_IMM:
1895       convert_lsl_to_ubfm (inst);
1896       break;
1897     case OP_CINC:
1898     case OP_CINV:
1899     case OP_CNEG:
1900       convert_to_csel (inst);
1901       break;
1902     case OP_CSET:
1903     case OP_CSETM:
1904       convert_cset_to_csinc (inst);
1905       break;
1906     case OP_UBFX:
1907     case OP_BFXIL:
1908     case OP_SBFX:
1909       convert_bfx_to_bfm (inst);
1910       break;
1911     case OP_SBFIZ:
1912     case OP_BFI:
1913     case OP_UBFIZ:
1914       convert_bfi_to_bfm (inst);
1915       break;
1916     case OP_BFC:
1917       convert_bfc_to_bfm (inst);
1918       break;
1919     case OP_MOV_V:
1920       convert_mov_to_orr (inst);
1921       break;
1922     case OP_MOV_IMM_WIDE:
1923     case OP_MOV_IMM_WIDEN:
1924       convert_mov_to_movewide (inst);
1925       break;
1926     case OP_MOV_IMM_LOG:
1927       convert_mov_to_movebitmask (inst);
1928       break;
1929     case OP_ROR_IMM:
1930       convert_ror_to_extr (inst);
1931       break;
1932     case OP_SXTL:
1933     case OP_SXTL2:
1934     case OP_UXTL:
1935     case OP_UXTL2:
1936       convert_xtl_to_shll (inst);
1937       break;
1938     default:
1939       break;
1940     }
1941 
1942 convert_to_real_return:
1943   aarch64_replace_opcode (inst, real);
1944 }
1945 
1946 /* Encode *INST_ORI of the opcode code OPCODE.
1947    Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1948    matched operand qualifier sequence in *QLF_SEQ.  */
1949 
1950 bfd_boolean
1951 aarch64_opcode_encode (const aarch64_opcode *opcode,
1952 		       const aarch64_inst *inst_ori, aarch64_insn *code,
1953 		       aarch64_opnd_qualifier_t *qlf_seq,
1954 		       aarch64_operand_error *mismatch_detail,
1955 		       aarch64_instr_sequence* insn_sequence)
1956 {
1957   int i;
1958   const aarch64_opcode *aliased;
1959   aarch64_inst copy, *inst;
1960 
1961   DEBUG_TRACE ("enter with %s", opcode->name);
1962 
1963   /* Create a copy of *INST_ORI, so that we can do any change we want.  */
1964   copy = *inst_ori;
1965   inst = &copy;
1966 
1967   assert (inst->opcode == NULL || inst->opcode == opcode);
1968   if (inst->opcode == NULL)
1969     inst->opcode = opcode;
1970 
1971   /* Constrain the operands.
1972      After passing this, the encoding is guaranteed to succeed.  */
1973   if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1974     {
1975       DEBUG_TRACE ("FAIL since operand constraint not met");
1976       return 0;
1977     }
1978 
1979   /* Get the base value.
1980      Note: this has to be before the aliasing handling below in order to
1981      get the base value from the alias opcode before we move on to the
1982      aliased opcode for encoding.  */
1983   inst->value = opcode->opcode;
1984 
1985   /* No need to do anything else if the opcode does not have any operand.  */
1986   if (aarch64_num_of_operands (opcode) == 0)
1987     goto encoding_exit;
1988 
1989   /* Assign operand indexes and check types.  Also put the matched
1990      operand qualifiers in *QLF_SEQ to return.  */
1991   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1992     {
1993       assert (opcode->operands[i] == inst->operands[i].type);
1994       inst->operands[i].idx = i;
1995       if (qlf_seq != NULL)
1996 	*qlf_seq = inst->operands[i].qualifier;
1997     }
1998 
1999   aliased = aarch64_find_real_opcode (opcode);
2000   /* If the opcode is an alias and it does not ask for direct encoding by
2001      itself, the instruction will be transformed to the form of real opcode
2002      and the encoding will be carried out using the rules for the aliased
2003      opcode.  */
2004   if (aliased != NULL && (opcode->flags & F_CONV))
2005     {
2006       DEBUG_TRACE ("real opcode '%s' has been found for the alias  %s",
2007 		   aliased->name, opcode->name);
2008       /* Convert the operands to the form of the real opcode.  */
2009       convert_to_real (inst, aliased);
2010       opcode = aliased;
2011     }
2012 
2013   aarch64_opnd_info *info = inst->operands;
2014 
2015   /* Call the inserter of each operand.  */
2016   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2017     {
2018       const aarch64_operand *opnd;
2019       enum aarch64_opnd type = opcode->operands[i];
2020       if (type == AARCH64_OPND_NIL)
2021 	break;
2022       if (info->skip)
2023 	{
2024 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
2025 	  continue;
2026 	}
2027       opnd = &aarch64_operands[type];
2028       if (operand_has_inserter (opnd)
2029 	  && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2030 				      mismatch_detail))
2031 	    return FALSE;
2032     }
2033 
2034   /* Call opcode encoders indicated by flags.  */
2035   if (opcode_has_special_coder (opcode))
2036     do_special_encoding (inst);
2037 
2038   /* Possibly use the instruction class to encode the chosen qualifier
2039      variant.  */
2040   aarch64_encode_variant_using_iclass (inst);
2041 
2042   /* Run a verifier if the instruction has one set.  */
2043   if (opcode->verifier)
2044     {
2045       enum err_type result = opcode->verifier (inst, *code, 0, TRUE,
2046 					       mismatch_detail, insn_sequence);
2047       switch (result)
2048 	{
2049 	case ERR_UND:
2050 	case ERR_UNP:
2051 	case ERR_NYI:
2052 	  return FALSE;
2053 	default:
2054 	  break;
2055 	}
2056     }
2057 
2058   /* Always run constrain verifiers, this is needed because constrains need to
2059      maintain a global state.  Regardless if the instruction has the flag set
2060      or not.  */
2061   enum err_type result = verify_constraints (inst, *code, 0, TRUE,
2062 					     mismatch_detail, insn_sequence);
2063   switch (result)
2064     {
2065     case ERR_UND:
2066     case ERR_UNP:
2067     case ERR_NYI:
2068       return FALSE;
2069     default:
2070       break;
2071     }
2072 
2073 
2074 encoding_exit:
2075   DEBUG_TRACE ("exit with %s", opcode->name);
2076 
2077   *code = inst->value;
2078 
2079   return TRUE;
2080 }
2081