xref: /netbsd-src/external/gpl3/gdb/dist/opcodes/aarch64-asm.c (revision 0bc1daf3bf4bb0e072916aa468ba4ba965b544bb)
1 /* aarch64-asm.c -- AArch64 assembler support.
2    Copyright (C) 2012-2024 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
26 
27 /* Utilities.  */
28 
29 /* The unnamed arguments consist of the number of fields and information about
30    these fields where the VALUE will be inserted into CODE.  MASK can be zero or
31    the base mask of the opcode.
32 
33    N.B. the fields are required to be in such an order than the least signficant
34    field for VALUE comes the first, e.g. the <index> in
35     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36    is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37    the order of M, L, H.  */
38 
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 {
42   uint32_t num;
43   const aarch64_field *field;
44   enum aarch64_field_kind kind;
45   va_list va;
46 
47   va_start (va, mask);
48   num = va_arg (va, uint32_t);
49   assert (num <= 5);
50   while (num--)
51     {
52       kind = va_arg (va, enum aarch64_field_kind);
53       field = &fields[kind];
54       insert_field (kind, code, value, mask);
55       value >>= field->width;
56     }
57   va_end (va);
58 }
59 
60 /* Insert a raw field value VALUE into all fields in SELF->fields after START.
61    The least significant bit goes in the final field.  */
62 
63 static void
64 insert_all_fields_after (const aarch64_operand *self, unsigned int start,
65 			 aarch64_insn *code, aarch64_insn value)
66 {
67   unsigned int i;
68   enum aarch64_field_kind kind;
69 
70   for (i = ARRAY_SIZE (self->fields); i-- > start; )
71     if (self->fields[i] != FLD_NIL)
72       {
73 	kind = self->fields[i];
74 	insert_field (kind, code, value, 0);
75 	value >>= fields[kind].width;
76       }
77 }
78 
79 /* Insert a raw field value VALUE into all fields in SELF->fields.
80    The least significant bit goes in the final field.  */
81 
82 static void
83 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
84 		   aarch64_insn value)
85 {
86   return insert_all_fields_after (self, 0, code, value);
87 }
88 
89 /* Operand inserters.  */
90 
91 /* Insert nothing.  */
92 bool
93 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
94 		  const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
95 		  aarch64_insn *code ATTRIBUTE_UNUSED,
96 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98 {
99   return true;
100 }
101 
102 /* Insert register number.  */
103 bool
104 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
105 		   aarch64_insn *code,
106 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
107 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
108 {
109   int val = info->reg.regno - get_operand_specific_data (self);
110   insert_field (self->fields[0], code, val, 0);
111   return true;
112 }
113 
114 /* Insert register number, index and/or other data for SIMD register element
115    operand, e.g. the last source operand in
116      SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
117 bool
118 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
119 		     aarch64_insn *code, const aarch64_inst *inst,
120 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
121 {
122   /* regno */
123   insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
124   /* index and/or type */
125   if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
126     {
127       int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
128       if (info->type == AARCH64_OPND_En
129 	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
130 	{
131 	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
132 	  assert (info->idx == 1);	/* Vn */
133 	  aarch64_insn value = info->reglane.index << pos;
134 	  insert_field (FLD_imm4_11, code, value, 0);
135 	}
136       else
137 	{
138 	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
139 	     imm5<3:0>	<V>
140 	     0000	RESERVED
141 	     xxx1	B
142 	     xx10	H
143 	     x100	S
144 	     1000	D  */
145 	  aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
146 	  insert_field (FLD_imm5, code, value, 0);
147 	}
148     }
149   else if (inst->opcode->iclass == dotproduct)
150     {
151       unsigned reglane_index = info->reglane.index;
152       switch (info->qualifier)
153 	{
154 	case AARCH64_OPND_QLF_S_4B:
155 	case AARCH64_OPND_QLF_S_2H:
156 	  /* L:H */
157 	  assert (reglane_index < 4);
158 	  insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
159 	  break;
160 	default:
161 	  return false;
162 	}
163     }
164   else if (inst->opcode->iclass == cryptosm3)
165     {
166       /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>].  */
167       unsigned reglane_index = info->reglane.index;
168       assert (reglane_index < 4);
169       insert_field (FLD_SM3_imm2, code, reglane_index, 0);
170     }
171   else
172     {
173       /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
174          or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
175       unsigned reglane_index = info->reglane.index;
176 
177       if (inst->opcode->op == OP_FCMLA_ELEM)
178 	/* Complex operand takes two elements.  */
179 	reglane_index *= 2;
180 
181       switch (info->qualifier)
182 	{
183 	case AARCH64_OPND_QLF_S_H:
184 	  /* H:L:M */
185 	  assert (reglane_index < 8);
186 	  insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
187 	  break;
188 	case AARCH64_OPND_QLF_S_S:
189 	  /* H:L */
190 	  assert (reglane_index < 4);
191 	  insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
192 	  break;
193 	case AARCH64_OPND_QLF_S_D:
194 	  /* H */
195 	  assert (reglane_index < 2);
196 	  insert_field (FLD_H, code, reglane_index, 0);
197 	  break;
198 	default:
199 	  return false;
200 	}
201     }
202   return true;
203 }
204 
205 /* Insert regno and len field of a register list operand, e.g. Vn in TBL.  */
206 bool
207 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
208 		     aarch64_insn *code,
209 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
210 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
211 {
212   /* R */
213   insert_field (self->fields[0], code, info->reglist.first_regno, 0);
214   /* len */
215   insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
216   return true;
217 }
218 
219 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
220    in AdvSIMD load/store instructions.  */
221 bool
222 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
223 			  const aarch64_opnd_info *info, aarch64_insn *code,
224 			  const aarch64_inst *inst,
225 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
226 {
227   aarch64_insn value = 0;
228   /* Number of elements in each structure to be loaded/stored.  */
229   unsigned num = get_opcode_dependent_value (inst->opcode);
230 
231   /* Rt */
232   insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
233   /* opcode */
234   switch (num)
235     {
236     case 1:
237       switch (info->reglist.num_regs)
238 	{
239 	case 1: value = 0x7; break;
240 	case 2: value = 0xa; break;
241 	case 3: value = 0x6; break;
242 	case 4: value = 0x2; break;
243 	default: return false;
244 	}
245       break;
246     case 2:
247       value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
248       break;
249     case 3:
250       value = 0x4;
251       break;
252     case 4:
253       value = 0x0;
254       break;
255     default:
256       return false;
257     }
258   insert_field (FLD_opcode, code, value, 0);
259 
260   return true;
261 }
262 
263 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
264    single structure to all lanes instructions.  */
265 bool
266 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
267 			    const aarch64_opnd_info *info, aarch64_insn *code,
268 			    const aarch64_inst *inst,
269 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
270 {
271   aarch64_insn value;
272   /* The opcode dependent area stores the number of elements in
273      each structure to be loaded/stored.  */
274   int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
275 
276   /* Rt */
277   insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
278   /* S */
279   value = (aarch64_insn) 0;
280   if (is_ld1r && info->reglist.num_regs == 2)
281     /* OP_LD1R does not have alternating variant, but have "two consecutive"
282        instead.  */
283     value = (aarch64_insn) 1;
284   insert_field (FLD_S, code, value, 0);
285 
286   return true;
287 }
288 
289 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
290    operand e.g. Vt in AdvSIMD load/store single element instructions.  */
291 bool
292 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
293 			   const aarch64_opnd_info *info, aarch64_insn *code,
294 			   const aarch64_inst *inst ATTRIBUTE_UNUSED,
295 			   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
296 {
297   aarch64_field field = {0, 0};
298   aarch64_insn QSsize = 0;	/* fields Q:S:size.  */
299   aarch64_insn opcodeh2 = 0;	/* opcode<2:1> */
300 
301   assert (info->reglist.has_index);
302 
303   /* Rt */
304   insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
305   /* Encode the index, opcode<2:1> and size.  */
306   switch (info->qualifier)
307     {
308     case AARCH64_OPND_QLF_S_B:
309       /* Index encoded in "Q:S:size".  */
310       QSsize = info->reglist.index;
311       opcodeh2 = 0x0;
312       break;
313     case AARCH64_OPND_QLF_S_H:
314       /* Index encoded in "Q:S:size<1>".  */
315       QSsize = info->reglist.index << 1;
316       opcodeh2 = 0x1;
317       break;
318     case AARCH64_OPND_QLF_S_S:
319       /* Index encoded in "Q:S".  */
320       QSsize = info->reglist.index << 2;
321       opcodeh2 = 0x2;
322       break;
323     case AARCH64_OPND_QLF_S_D:
324       /* Index encoded in "Q".  */
325       QSsize = info->reglist.index << 3 | 0x1;
326       opcodeh2 = 0x2;
327       break;
328     default:
329       return false;
330     }
331   insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
332   gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
333   insert_field_2 (&field, code, opcodeh2, 0);
334 
335   return true;
336 }
337 
338 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
339    SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
340    or SSHR <V><d>, <V><n>, #<shift>.  */
341 bool
342 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
343 			       const aarch64_opnd_info *info,
344 			       aarch64_insn *code, const aarch64_inst *inst,
345 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
346 {
347   unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
348   aarch64_insn Q, imm;
349 
350   if (inst->opcode->iclass == asimdshf)
351     {
352       /* Q
353 	 immh	Q	<T>
354 	 0000	x	SEE AdvSIMD modified immediate
355 	 0001	0	8B
356 	 0001	1	16B
357 	 001x	0	4H
358 	 001x	1	8H
359 	 01xx	0	2S
360 	 01xx	1	4S
361 	 1xxx	0	RESERVED
362 	 1xxx	1	2D  */
363       Q = (val & 0x1) ? 1 : 0;
364       insert_field (FLD_Q, code, Q, inst->opcode->mask);
365       val >>= 1;
366     }
367 
368   assert (info->type == AARCH64_OPND_IMM_VLSR
369 	  || info->type == AARCH64_OPND_IMM_VLSL);
370 
371   if (info->type == AARCH64_OPND_IMM_VLSR)
372     /* immh:immb
373        immh	<shift>
374        0000	SEE AdvSIMD modified immediate
375        0001	(16-UInt(immh:immb))
376        001x	(32-UInt(immh:immb))
377        01xx	(64-UInt(immh:immb))
378        1xxx	(128-UInt(immh:immb))  */
379     imm = (16 << (unsigned)val) - info->imm.value;
380   else
381     /* immh:immb
382        immh	<shift>
383        0000	SEE AdvSIMD modified immediate
384        0001	(UInt(immh:immb)-8)
385        001x	(UInt(immh:immb)-16)
386        01xx	(UInt(immh:immb)-32)
387        1xxx	(UInt(immh:immb)-64)  */
388     imm = info->imm.value + (8 << (unsigned)val);
389   insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
390 
391   return true;
392 }
393 
394 /* Insert fields for e.g. the immediate operands in
395    BFM <Wd>, <Wn>, #<immr>, #<imms>.  */
396 bool
397 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
398 		 aarch64_insn *code,
399 		 const aarch64_inst *inst ATTRIBUTE_UNUSED,
400 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
401 {
402   int64_t imm;
403 
404   imm = info->imm.value;
405   if (operand_need_shift_by_two (self))
406     imm >>= 2;
407   if (operand_need_shift_by_three (self))
408     imm >>= 3;
409   if (operand_need_shift_by_four (self))
410     imm >>= 4;
411   insert_all_fields (self, code, imm);
412   return true;
413 }
414 
415 /* Insert immediate and its shift amount for e.g. the last operand in
416      MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
417 bool
418 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
419 		      aarch64_insn *code, const aarch64_inst *inst,
420 		      aarch64_operand_error *errors)
421 {
422   /* imm16 */
423   aarch64_ins_imm (self, info, code, inst, errors);
424   /* hw */
425   insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
426   return true;
427 }
428 
429 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
430      MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
431 bool
432 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
433 				  const aarch64_opnd_info *info,
434 				  aarch64_insn *code,
435 				  const aarch64_inst *inst ATTRIBUTE_UNUSED,
436 				  aarch64_operand_error *errors
437 					ATTRIBUTE_UNUSED)
438 {
439   enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
440   uint64_t imm = info->imm.value;
441   enum aarch64_modifier_kind kind = info->shifter.kind;
442   int amount = info->shifter.amount;
443   aarch64_field field = {0, 0};
444 
445   /* a:b:c:d:e:f:g:h */
446   if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
447     {
448       /* Either MOVI <Dd>, #<imm>
449 	 or     MOVI <Vd>.2D, #<imm>.
450 	 <imm> is a 64-bit immediate
451 	 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
452 	 encoded in "a:b:c:d:e:f:g:h".	*/
453       imm = aarch64_shrink_expanded_imm8 (imm);
454       assert ((int)imm >= 0);
455     }
456   insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
457 
458   if (kind == AARCH64_MOD_NONE)
459     return true;
460 
461   /* shift amount partially in cmode */
462   assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
463   if (kind == AARCH64_MOD_LSL)
464     {
465       /* AARCH64_MOD_LSL: shift zeros.  */
466       int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
467       assert (esize == 4 || esize == 2 || esize == 1);
468       /* For 8-bit move immediate, the optional LSL #0 does not require
469 	 encoding.  */
470       if (esize == 1)
471 	return true;
472       amount >>= 3;
473       if (esize == 4)
474 	gen_sub_field (FLD_cmode, 1, 2, &field);	/* per word */
475       else
476 	gen_sub_field (FLD_cmode, 1, 1, &field);	/* per halfword */
477     }
478   else
479     {
480       /* AARCH64_MOD_MSL: shift ones.  */
481       amount >>= 4;
482       gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
483     }
484   insert_field_2 (&field, code, amount, 0);
485 
486   return true;
487 }
488 
489 /* Insert fields for an 8-bit floating-point immediate.  */
490 bool
491 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
492 		   aarch64_insn *code,
493 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
494 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
495 {
496   insert_all_fields (self, code, info->imm.value);
497   return true;
498 }
499 
500 /* Insert 1-bit rotation immediate (#90 or #270).  */
501 bool
502 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
503 			 const aarch64_opnd_info *info,
504 			 aarch64_insn *code, const aarch64_inst *inst,
505 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
506 {
507   uint64_t rot = (info->imm.value - 90) / 180;
508   assert (rot < 2U);
509   insert_field (self->fields[0], code, rot, inst->opcode->mask);
510   return true;
511 }
512 
513 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270).  */
514 bool
515 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
516 			 const aarch64_opnd_info *info,
517 			 aarch64_insn *code, const aarch64_inst *inst,
518 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
519 {
520   uint64_t rot = info->imm.value / 90;
521   assert (rot < 4U);
522   insert_field (self->fields[0], code, rot, inst->opcode->mask);
523   return true;
524 }
525 
526 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
527    e.g.  SCVTF <Dd>, <Wn>, #<fbits>.  */
528 bool
529 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
530 		   aarch64_insn *code,
531 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
532 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
533 {
534   insert_field (self->fields[0], code, 64 - info->imm.value, 0);
535   return true;
536 }
537 
538 /* Insert arithmetic immediate for e.g. the last operand in
539      SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
540 bool
541 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
542 		  aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
543 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
544 {
545   /* shift */
546   aarch64_insn value = info->shifter.amount ? 1 : 0;
547   insert_field (self->fields[0], code, value, 0);
548   /* imm12 (unsigned) */
549   insert_field (self->fields[1], code, info->imm.value, 0);
550   return true;
551 }
552 
553 /* Common routine shared by aarch64_ins{,_inv}_limm.  INVERT_P says whether
554    the operand should be inverted before encoding.  */
555 static bool
556 aarch64_ins_limm_1 (const aarch64_operand *self,
557 		    const aarch64_opnd_info *info, aarch64_insn *code,
558 		    const aarch64_inst *inst, bool invert_p,
559 		    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
560 {
561   bool res;
562   aarch64_insn value;
563   uint64_t imm = info->imm.value;
564   int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
565 
566   if (invert_p)
567     imm = ~imm;
568   /* The constraint check should guarantee that this will work.  */
569   res = aarch64_logical_immediate_p (imm, esize, &value);
570   if (res)
571     insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
572 		   self->fields[0]);
573   return res;
574 }
575 
576 /* Insert logical/bitmask immediate for e.g. the last operand in
577      ORR <Wd|WSP>, <Wn>, #<imm>.  */
578 bool
579 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
580 		  aarch64_insn *code, const aarch64_inst *inst,
581 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582 {
583   return aarch64_ins_limm_1 (self, info, code, inst,
584 			     inst->opcode->op == OP_BIC, errors);
585 }
586 
587 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.).  */
588 bool
589 aarch64_ins_inv_limm (const aarch64_operand *self,
590 		      const aarch64_opnd_info *info, aarch64_insn *code,
591 		      const aarch64_inst *inst,
592 		      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
593 {
594   return aarch64_ins_limm_1 (self, info, code, inst, true, errors);
595 }
596 
597 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
598    or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
599 bool
600 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
601 		aarch64_insn *code, const aarch64_inst *inst,
602 		aarch64_operand_error *errors)
603 {
604   aarch64_insn value = 0;
605 
606   assert (info->idx == 0);
607 
608   /* Rt */
609   aarch64_ins_regno (self, info, code, inst, errors);
610   if (inst->opcode->iclass == ldstpair_indexed
611       || inst->opcode->iclass == ldstnapair_offs
612       || inst->opcode->iclass == ldstpair_off
613       || inst->opcode->iclass == loadlit)
614     {
615       /* size */
616       switch (info->qualifier)
617 	{
618 	case AARCH64_OPND_QLF_S_S: value = 0; break;
619 	case AARCH64_OPND_QLF_S_D: value = 1; break;
620 	case AARCH64_OPND_QLF_S_Q: value = 2; break;
621 	default: return false;
622 	}
623       insert_field (FLD_ldst_size, code, value, 0);
624     }
625   else
626     {
627       /* opc[1]:size */
628       value = aarch64_get_qualifier_standard_value (info->qualifier);
629       insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
630     }
631 
632   return true;
633 }
634 
635 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
636 bool
637 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 			 const aarch64_opnd_info *info, aarch64_insn *code,
639 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
640 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
641 {
642   /* Rn */
643   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
644   return true;
645 }
646 
647 /* Encode the address operand for e.g.
648      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
649 bool
650 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
651 			 const aarch64_opnd_info *info, aarch64_insn *code,
652 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
653 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
654 {
655   aarch64_insn S;
656   enum aarch64_modifier_kind kind = info->shifter.kind;
657 
658   /* Rn */
659   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
660   /* Rm */
661   insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
662   /* option */
663   if (kind == AARCH64_MOD_LSL)
664     kind = AARCH64_MOD_UXTX;	/* Trick to enable the table-driven.  */
665   insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
666   /* S */
667   if (info->qualifier != AARCH64_OPND_QLF_S_B)
668     S = info->shifter.amount != 0;
669   else
670     /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
671        S	<amount>
672        0	[absent]
673        1	#0
674        Must be #0 if <extend> is explicitly LSL.  */
675     S = info->shifter.operator_present && info->shifter.amount_present;
676   insert_field (FLD_S, code, S, 0);
677 
678   return true;
679 }
680 
681 /* Encode the address operand for e.g.
682      stlur <Xt>, [<Xn|SP>{, <amount>}].  */
683 bool
684 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
685 			 const aarch64_opnd_info *info, aarch64_insn *code,
686 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
687 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
688 {
689   /* Rn */
690   insert_field (self->fields[0], code, info->addr.base_regno, 0);
691 
692   /* simm9 */
693   int imm = info->addr.offset.imm;
694   insert_field (self->fields[1], code, imm, 0);
695 
696   /* writeback */
697   if (info->addr.writeback)
698     {
699       assert (info->addr.preind == 1 && info->addr.postind == 0);
700       insert_field (self->fields[2], code, 1, 0);
701     }
702   return true;
703 }
704 
705 /* Encode the address operand for e.g.
706      stlur <Xt>, [<Xn|SP>{, <amount>}].  */
707 bool
708 aarch64_ins_rcpc3_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
709 			       const aarch64_opnd_info *info, aarch64_insn *code,
710 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
711 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
712 {
713   /* Rn */
714   insert_field (self->fields[0], code, info->addr.base_regno, 0);
715 
716   /* simm9 */
717   int imm = info->addr.offset.imm;
718   insert_field (self->fields[1], code, imm, 0);
719 
720   return true;
721 }
722 
723 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!.  */
724 bool
725 aarch64_ins_addr_simm (const aarch64_operand *self,
726 		       const aarch64_opnd_info *info,
727 		       aarch64_insn *code,
728 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
729 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
730 {
731   int imm;
732 
733   /* Rn */
734   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
735   /* simm (imm9 or imm7) */
736   imm = info->addr.offset.imm;
737   if (self->fields[0] == FLD_imm7
738      || info->qualifier == AARCH64_OPND_QLF_imm_tag)
739     /* scaled immediate in ld/st pair instructions..  */
740     imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
741   insert_field (self->fields[0], code, imm, 0);
742   /* pre/post- index */
743   if (info->addr.writeback)
744     {
745       assert (inst->opcode->iclass != ldst_unscaled
746 	      && inst->opcode->iclass != ldstnapair_offs
747 	      && inst->opcode->iclass != ldstpair_off
748 	      && inst->opcode->iclass != ldst_unpriv);
749       assert (info->addr.preind != info->addr.postind);
750       if (info->addr.preind)
751 	insert_field (self->fields[1], code, 1, 0);
752     }
753 
754   return true;
755 }
756 
757 /* Encode the address operand, potentially offset by the load/store ammount,
758    e.g. LDIAPP <Xt>, <Xt2> [<Xn|SP>, #<simm>]
759    and  STILP  <Xt>, <Xt2> [<Xn|SP>], #<simm>.*/
760 bool
761 aarch64_ins_rcpc3_addr_opt_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
762 				   const aarch64_opnd_info *info,
763 				   aarch64_insn *code,
764 				   const aarch64_inst *inst ATTRIBUTE_UNUSED,
765 				   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
766 {
767   int imm;
768 
769   /* Rn */
770   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
771   /* simm */
772   imm = info->addr.offset.imm;
773   if (!imm)
774     insert_field (FLD_opc2, code, 1, 0);
775 
776   return true;
777 }
778 
779 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}].  */
780 bool
781 aarch64_ins_addr_simm10 (const aarch64_operand *self,
782 			 const aarch64_opnd_info *info,
783 			 aarch64_insn *code,
784 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
785 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
786 {
787   int imm;
788 
789   /* Rn */
790   insert_field (self->fields[0], code, info->addr.base_regno, 0);
791   /* simm10 */
792   imm = info->addr.offset.imm >> 3;
793   insert_field (self->fields[1], code, imm >> 9, 0);
794   insert_field (self->fields[2], code, imm, 0);
795   /* writeback */
796   if (info->addr.writeback)
797     {
798       assert (info->addr.preind == 1 && info->addr.postind == 0);
799       insert_field (self->fields[3], code, 1, 0);
800     }
801   return true;
802 }
803 
804 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}].  */
805 bool
806 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
807 			 const aarch64_opnd_info *info,
808 			 aarch64_insn *code,
809 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
810 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
811 {
812   int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
813 
814   /* Rn */
815   insert_field (self->fields[0], code, info->addr.base_regno, 0);
816   /* uimm12 */
817   insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
818   return true;
819 }
820 
821 /* Encode the address operand for e.g.
822      LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
823 bool
824 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
825 			    const aarch64_opnd_info *info, aarch64_insn *code,
826 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
827 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
828 {
829   /* Rn */
830   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
831   /* Rm | #<amount>  */
832   if (info->addr.offset.is_reg)
833     insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
834   else
835     insert_field (FLD_Rm, code, 0x1f, 0);
836   return true;
837 }
838 
839 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
840 bool
841 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
842 		  const aarch64_opnd_info *info, aarch64_insn *code,
843 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
844 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
845 {
846   /* cond */
847   insert_field (FLD_cond, code, info->cond->value, 0);
848   return true;
849 }
850 
851 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
852 bool
853 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
854 		    const aarch64_opnd_info *info, aarch64_insn *code,
855 		    const aarch64_inst *inst,
856 		    aarch64_operand_error *detail ATTRIBUTE_UNUSED)
857 {
858    /* If a system instruction check if we have any restrictions on which
859       registers it can use.  */
860    if (inst->opcode->iclass == ic_system)
861      {
862         uint64_t opcode_flags
863 	  = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
864 	uint32_t sysreg_flags
865 	  = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
866 
867         /* Check to see if it's read-only, else check if it's write only.
868 	   if it's both or unspecified don't care.  */
869 	if (opcode_flags == F_SYS_READ
870 	    && sysreg_flags
871 	    && sysreg_flags != F_REG_READ)
872 	  {
873 		detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
874 		detail->error = _("specified register cannot be read from");
875 		detail->index = info->idx;
876 		detail->non_fatal = true;
877 	  }
878 	else if (opcode_flags == F_SYS_WRITE
879 		 && sysreg_flags
880 		 && sysreg_flags != F_REG_WRITE)
881 	  {
882 		detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
883 		detail->error = _("specified register cannot be written to");
884 		detail->index = info->idx;
885 		detail->non_fatal = true;
886 	  }
887      }
888   /* op0:op1:CRn:CRm:op2 */
889   insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
890 		 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
891   return true;
892 }
893 
894 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
895 bool
896 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
897 			 const aarch64_opnd_info *info, aarch64_insn *code,
898 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
899 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
900 {
901   /* op1:op2 */
902   insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
903 		 FLD_op2, FLD_op1);
904 
905   /* Extra CRm mask.  */
906   if (info->sysreg.flags | F_REG_IN_CRM)
907     insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0);
908   return true;
909 }
910 
911 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
912 bool
913 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
914 		       const aarch64_opnd_info *info, aarch64_insn *code,
915 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
916 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
917 {
918   /* op1:CRn:CRm:op2 */
919   insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
920 		 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
921   return true;
922 }
923 
924 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
925 
926 bool
927 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
928 		     const aarch64_opnd_info *info, aarch64_insn *code,
929 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
930 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
931 {
932   /* CRm */
933   insert_field (FLD_CRm, code, info->barrier->value, 0);
934   return true;
935 }
936 
937 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>.  */
938 
939 bool
940 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
941 		     const aarch64_opnd_info *info, aarch64_insn *code,
942 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
943 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
944 {
945   /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
946      encoded in CRm<3:2>.  */
947   aarch64_insn value = (info->barrier->value >> 2) - 4;
948   insert_field (FLD_CRm_dsb_nxs, code, value, 0);
949   return true;
950 }
951 
952 /* Encode the prefetch operation option operand for e.g.
953      PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
954 
955 bool
956 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
957 		   const aarch64_opnd_info *info, aarch64_insn *code,
958 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
959 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
960 {
961   /* prfop in Rt */
962   insert_field (FLD_Rt, code, info->prfop->value, 0);
963   return true;
964 }
965 
966 /* Encode the hint number for instructions that alias HINT but take an
967    operand.  */
968 
969 bool
970 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
971 		  const aarch64_opnd_info *info, aarch64_insn *code,
972 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
973 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
974 {
975   /* CRm:op2.  */
976   insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
977   return true;
978 }
979 
980 /* Encode the extended register operand for e.g.
981      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
982 bool
983 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
984 			  const aarch64_opnd_info *info, aarch64_insn *code,
985 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
986 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
987 {
988   enum aarch64_modifier_kind kind;
989 
990   /* Rm */
991   insert_field (FLD_Rm, code, info->reg.regno, 0);
992   /* option */
993   kind = info->shifter.kind;
994   if (kind == AARCH64_MOD_LSL)
995     kind = info->qualifier == AARCH64_OPND_QLF_W
996       ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
997   insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
998   /* imm3 */
999   insert_field (FLD_imm3_10, code, info->shifter.amount, 0);
1000 
1001   return true;
1002 }
1003 
1004 /* Encode the shifted register operand for e.g.
1005      SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
1006 bool
1007 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1008 			 const aarch64_opnd_info *info, aarch64_insn *code,
1009 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1010 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1011 {
1012   /* Rm */
1013   insert_field (FLD_Rm, code, info->reg.regno, 0);
1014   /* shift */
1015   insert_field (FLD_shift, code,
1016 		aarch64_get_operand_modifier_value (info->shifter.kind), 0);
1017   /* imm6 */
1018   insert_field (FLD_imm6_10, code, info->shifter.amount, 0);
1019 
1020   return true;
1021 }
1022 
1023 /* Encode the LSL-shifted register operand for e.g.
1024      ADDPT <Xd|SP>, <Xn|SP>, <Xm>{, LSL #<amount>}.  */
1025 bool
1026 aarch64_ins_reg_lsl_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1027 			     const aarch64_opnd_info *info, aarch64_insn *code,
1028 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1029 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1030 {
1031   /* Rm */
1032   insert_field (FLD_Rm, code, info->reg.regno, 0);
1033   /* imm3 */
1034   insert_field (FLD_imm3_10, code, info->shifter.amount, 0);
1035   return true;
1036 }
1037 
1038 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1039    where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1040    SELF's operand-dependent value.  fields[0] specifies the field that
1041    holds <base>.  <simm4> is encoded in the SVE_imm4 field.  */
1042 bool
1043 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
1044 			       const aarch64_opnd_info *info,
1045 			       aarch64_insn *code,
1046 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1047 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1048 {
1049   int factor = 1 + get_operand_specific_data (self);
1050   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1051   insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1052   return true;
1053 }
1054 
1055 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1056    where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1057    SELF's operand-dependent value.  fields[0] specifies the field that
1058    holds <base>.  <simm6> is encoded in the SVE_imm6 field.  */
1059 bool
1060 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
1061 			       const aarch64_opnd_info *info,
1062 			       aarch64_insn *code,
1063 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1064 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1065 {
1066   int factor = 1 + get_operand_specific_data (self);
1067   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1068   insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1069   return true;
1070 }
1071 
1072 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1073    where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1074    SELF's operand-dependent value.  fields[0] specifies the field that
1075    holds <base>.  <simm9> is encoded in the concatenation of the SVE_imm6
1076    and imm3 fields, with imm3 being the less-significant part.  */
1077 bool
1078 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
1079 			       const aarch64_opnd_info *info,
1080 			       aarch64_insn *code,
1081 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1082 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1083 {
1084   int factor = 1 + get_operand_specific_data (self);
1085   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1086   insert_fields (code, info->addr.offset.imm / factor, 0,
1087 		 2, FLD_imm3_10, FLD_SVE_imm6);
1088   return true;
1089 }
1090 
1091 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1092    is a 4-bit signed number and where <shift> is SELF's operand-dependent
1093    value.  fields[0] specifies the base register field.  */
1094 bool
1095 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1096 			    const aarch64_opnd_info *info, aarch64_insn *code,
1097 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1098 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1099 {
1100   int factor = 1 << get_operand_specific_data (self);
1101   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1102   insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1103   return true;
1104 }
1105 
1106 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1107    is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1108    value.  fields[0] specifies the base register field.  */
1109 bool
1110 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1111 			    const aarch64_opnd_info *info, aarch64_insn *code,
1112 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1113 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1114 {
1115   int factor = 1 << get_operand_specific_data (self);
1116   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1117   insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1118   return true;
1119 }
1120 
1121 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1122    is SELF's operand-dependent value.  fields[0] specifies the base
1123    register field and fields[1] specifies the offset register field.  */
1124 bool
1125 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1126 			     const aarch64_opnd_info *info, aarch64_insn *code,
1127 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1128 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1129 {
1130   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1131   insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1132   return true;
1133 }
1134 
1135 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1136    <shift> is SELF's operand-dependent value.  fields[0] specifies the
1137    base register field, fields[1] specifies the offset register field and
1138    fields[2] is a single-bit field that selects SXTW over UXTW.  */
1139 bool
1140 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1141 			     const aarch64_opnd_info *info, aarch64_insn *code,
1142 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1143 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1144 {
1145   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1146   insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1147   if (info->shifter.kind == AARCH64_MOD_UXTW)
1148     insert_field (self->fields[2], code, 0, 0);
1149   else
1150     insert_field (self->fields[2], code, 1, 0);
1151   return true;
1152 }
1153 
1154 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1155    5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1156    fields[0] specifies the base register field.  */
1157 bool
1158 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1159 			    const aarch64_opnd_info *info, aarch64_insn *code,
1160 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1161 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1162 {
1163   int factor = 1 << get_operand_specific_data (self);
1164   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1165   insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1166   return true;
1167 }
1168 
1169 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1170    where <modifier> is fixed by the instruction and where <msz> is a
1171    2-bit unsigned number.  fields[0] specifies the base register field
1172    and fields[1] specifies the offset register field.  */
1173 static bool
1174 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1175 			 const aarch64_opnd_info *info, aarch64_insn *code,
1176 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1177 {
1178   insert_field (self->fields[0], code, info->addr.base_regno, 0);
1179   insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1180   insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1181   return true;
1182 }
1183 
1184 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1185    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1186    field and fields[1] specifies the offset register field.  */
1187 bool
1188 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1189 			     const aarch64_opnd_info *info, aarch64_insn *code,
1190 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1191 			     aarch64_operand_error *errors)
1192 {
1193   return aarch64_ext_sve_addr_zz (self, info, code, errors);
1194 }
1195 
1196 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1197    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1198    field and fields[1] specifies the offset register field.  */
1199 bool
1200 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1201 			      const aarch64_opnd_info *info,
1202 			      aarch64_insn *code,
1203 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1204 			      aarch64_operand_error *errors)
1205 {
1206   return aarch64_ext_sve_addr_zz (self, info, code, errors);
1207 }
1208 
1209 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1210    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1211    field and fields[1] specifies the offset register field.  */
1212 bool
1213 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1214 			      const aarch64_opnd_info *info,
1215 			      aarch64_insn *code,
1216 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1217 			      aarch64_operand_error *errors)
1218 {
1219   return aarch64_ext_sve_addr_zz (self, info, code, errors);
1220 }
1221 
1222 /* Encode an SVE ADD/SUB immediate.  */
1223 bool
1224 aarch64_ins_sve_aimm (const aarch64_operand *self,
1225 		      const aarch64_opnd_info *info, aarch64_insn *code,
1226 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1227 		      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1228 {
1229   if (info->shifter.amount == 8)
1230     insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1231   else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1232     insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1233   else
1234     insert_all_fields (self, code, info->imm.value & 0xff);
1235   return true;
1236 }
1237 
1238 bool
1239 aarch64_ins_sve_aligned_reglist (const aarch64_operand *self,
1240 				 const aarch64_opnd_info *info,
1241 				 aarch64_insn *code,
1242 				 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1243 				 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1244 {
1245   unsigned int num_regs = get_operand_specific_data (self);
1246   unsigned int val = info->reglist.first_regno;
1247   insert_field (self->fields[0], code, val / num_regs, 0);
1248   return true;
1249 }
1250 
1251 /* Encode an SVE CPY/DUP immediate.  */
1252 bool
1253 aarch64_ins_sve_asimm (const aarch64_operand *self,
1254 		       const aarch64_opnd_info *info, aarch64_insn *code,
1255 		       const aarch64_inst *inst,
1256 		       aarch64_operand_error *errors)
1257 {
1258   return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1259 }
1260 
1261 /* Encode Zn[MM], where MM has a 7-bit triangular encoding.  The fields
1262    array specifies which field to use for Zn.  MM is encoded in the
1263    concatenation of imm5 and SVE_tszh, with imm5 being the less
1264    significant part.  */
1265 bool
1266 aarch64_ins_sve_index (const aarch64_operand *self,
1267 		       const aarch64_opnd_info *info, aarch64_insn *code,
1268 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1269 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1270 {
1271   unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1272   insert_field (self->fields[0], code, info->reglane.regno, 0);
1273   insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1274 		 2, FLD_imm5, FLD_SVE_tszh);
1275   return true;
1276 }
1277 
1278 /* Encode Zn.<T>[<imm>], where <imm> is an immediate with range of 0 to one less
1279    than the number of elements in 128 bit, which can encode il:tsz.  */
1280 bool
1281 aarch64_ins_sve_index_imm (const aarch64_operand *self,
1282 			   const aarch64_opnd_info *info, aarch64_insn *code,
1283 			   const aarch64_inst *inst ATTRIBUTE_UNUSED,
1284 			   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1285 {
1286   insert_field (self->fields[0], code, info->reglane.regno, 0);
1287   unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1288   insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1289 		 2, self->fields[1],self->fields[2]);
1290   return true;
1291 }
1292 
1293 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM.  */
1294 bool
1295 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1296 			  const aarch64_opnd_info *info, aarch64_insn *code,
1297 			  const aarch64_inst *inst,
1298 			  aarch64_operand_error *errors)
1299 {
1300   return aarch64_ins_limm (self, info, code, inst, errors);
1301 }
1302 
1303 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1304    and where MM occupies the most-significant part.  The operand-dependent
1305    value specifies the number of bits in Zn.  */
1306 bool
1307 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1308 			    const aarch64_opnd_info *info, aarch64_insn *code,
1309 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1310 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1311 {
1312   unsigned int reg_bits = get_operand_specific_data (self);
1313   assert (info->reglane.regno < (1U << reg_bits));
1314   unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1315   insert_all_fields (self, code, val);
1316   return true;
1317 }
1318 
1319 /* Encode {Zn.<T> - Zm.<T>}.  The fields array specifies which field
1320    to use for Zn.  */
1321 bool
1322 aarch64_ins_sve_reglist (const aarch64_operand *self,
1323 			 const aarch64_opnd_info *info, aarch64_insn *code,
1324 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1325 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1326 {
1327   insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1328   return true;
1329 }
1330 
1331 /* Encode a strided register list.  The first field holds the top bit
1332    (0 or 16) and the second field holds the lower bits.  The stride is
1333    16 divided by the list length.  */
1334 bool
1335 aarch64_ins_sve_strided_reglist (const aarch64_operand *self,
1336 				 const aarch64_opnd_info *info,
1337 				 aarch64_insn *code,
1338 				 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1339 				 aarch64_operand_error *errors
1340 				   ATTRIBUTE_UNUSED)
1341 {
1342   unsigned int num_regs = get_operand_specific_data (self);
1343   unsigned int mask ATTRIBUTE_UNUSED = 16 | (16 / num_regs - 1);
1344   unsigned int val = info->reglist.first_regno;
1345   assert ((val & mask) == val);
1346   insert_field (self->fields[0], code, val >> 4, 0);
1347   insert_field (self->fields[1], code, val & 15, 0);
1348   return true;
1349 }
1350 
1351 /* Encode <pattern>{, MUL #<amount>}.  The fields array specifies which
1352    fields to use for <pattern>.  <amount> - 1 is encoded in the SVE_imm4
1353    field.  */
1354 bool
1355 aarch64_ins_sve_scale (const aarch64_operand *self,
1356 		       const aarch64_opnd_info *info, aarch64_insn *code,
1357 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1358 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1359 {
1360   insert_all_fields (self, code, info->imm.value);
1361   insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1362   return true;
1363 }
1364 
1365 /* Encode an SVE shift left immediate.  */
1366 bool
1367 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1368 			const aarch64_opnd_info *info, aarch64_insn *code,
1369 			const aarch64_inst *inst,
1370 			aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1371 {
1372   const aarch64_opnd_info *prev_operand;
1373   unsigned int esize;
1374 
1375   assert (info->idx > 0);
1376   prev_operand = &inst->operands[info->idx - 1];
1377   esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1378   insert_all_fields (self, code, 8 * esize + info->imm.value);
1379   return true;
1380 }
1381 
1382 /* Encode an SVE shift right immediate.  */
1383 bool
1384 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1385 			const aarch64_opnd_info *info, aarch64_insn *code,
1386 			const aarch64_inst *inst,
1387 			aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1388 {
1389   const aarch64_opnd_info *prev_operand;
1390   unsigned int esize;
1391 
1392   unsigned int opnd_backshift = get_operand_specific_data (self);
1393   assert (info->idx >= (int)opnd_backshift);
1394   prev_operand = &inst->operands[info->idx - opnd_backshift];
1395   esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1396   insert_all_fields (self, code, 16 * esize - info->imm.value);
1397   return true;
1398 }
1399 
1400 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1401    The fields array specifies which field to use.  */
1402 bool
1403 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1404 				const aarch64_opnd_info *info,
1405 				aarch64_insn *code,
1406 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1407 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1408 {
1409   if (info->imm.value == 0x3f000000)
1410     insert_field (self->fields[0], code, 0, 0);
1411   else
1412     insert_field (self->fields[0], code, 1, 0);
1413   return true;
1414 }
1415 
1416 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1417    The fields array specifies which field to use.  */
1418 bool
1419 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1420 				const aarch64_opnd_info *info,
1421 				aarch64_insn *code,
1422 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1423 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1424 {
1425   if (info->imm.value == 0x3f000000)
1426     insert_field (self->fields[0], code, 0, 0);
1427   else
1428     insert_field (self->fields[0], code, 1, 0);
1429   return true;
1430 }
1431 
1432 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1433    The fields array specifies which field to use.  */
1434 bool
1435 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1436 				const aarch64_opnd_info *info,
1437 				aarch64_insn *code,
1438 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1439 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1440 {
1441   if (info->imm.value == 0)
1442     insert_field (self->fields[0], code, 0, 0);
1443   else
1444     insert_field (self->fields[0], code, 1, 0);
1445   return true;
1446 }
1447 
1448 bool
1449 aarch64_ins_sme_za_vrs1 (const aarch64_operand *self,
1450 			     const aarch64_opnd_info *info,
1451 			     aarch64_insn *code,
1452 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1453 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1454 {
1455   int za_reg = info->indexed_za.regno;
1456   int regno = info->indexed_za.index.regno & 3;
1457   int imm = info->indexed_za.index.imm;
1458   int v =  info->indexed_za.v;
1459   int countm1 = info->indexed_za.index.countm1;
1460 
1461   insert_field (self->fields[0], code, v, 0);
1462   insert_field (self->fields[1], code, regno, 0);
1463   switch (info->qualifier)
1464     {
1465     case AARCH64_OPND_QLF_S_B:
1466       insert_field (self->fields[2], code, imm / (countm1 + 1), 0);
1467       break;
1468     case AARCH64_OPND_QLF_S_H:
1469     case AARCH64_OPND_QLF_S_S:
1470       insert_field (self->fields[2], code, za_reg, 0);
1471       insert_field (self->fields[3], code, imm / (countm1 + 1), 0);
1472       break;
1473     case AARCH64_OPND_QLF_S_D:
1474       insert_field (self->fields[2], code, za_reg, 0);
1475       break;
1476     default:
1477       return false;
1478     }
1479 
1480   return true;
1481 }
1482 
1483 bool
1484 aarch64_ins_sme_za_vrs2 (const aarch64_operand *self,
1485 			     const aarch64_opnd_info *info,
1486 			     aarch64_insn *code,
1487 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1488 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1489 {
1490   int za_reg = info->indexed_za.regno;
1491   int regno = info->indexed_za.index.regno & 3;
1492   int imm = info->indexed_za.index.imm;
1493   int v =  info->indexed_za.v;
1494   int countm1 = info->indexed_za.index.countm1;
1495 
1496   insert_field (self->fields[0], code, v, 0);
1497   insert_field (self->fields[1], code, regno, 0);
1498   switch (info->qualifier)
1499     {
1500     case AARCH64_OPND_QLF_S_B:
1501       insert_field (self->fields[2], code, imm / (countm1 + 1), 0);
1502       break;
1503     case AARCH64_OPND_QLF_S_H:
1504       insert_field (self->fields[2], code, za_reg, 0);
1505       insert_field (self->fields[3], code, imm / (countm1 + 1), 0);
1506       break;
1507     case AARCH64_OPND_QLF_S_S:
1508     case AARCH64_OPND_QLF_S_D:
1509       insert_field (self->fields[2], code, za_reg, 0);
1510       break;
1511     default:
1512       return false;
1513     }
1514 
1515   return true;
1516 }
1517 
1518 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1519    vector indicator, vector selector and immediate.  */
1520 bool
1521 aarch64_ins_sme_za_hv_tiles (const aarch64_operand *self,
1522                              const aarch64_opnd_info *info,
1523                              aarch64_insn *code,
1524                              const aarch64_inst *inst ATTRIBUTE_UNUSED,
1525                              aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1526 {
1527   int fld_size;
1528   int fld_q;
1529   int fld_v = info->indexed_za.v;
1530   int fld_rv = info->indexed_za.index.regno - 12;
1531   int fld_zan_imm = info->indexed_za.index.imm;
1532   int regno = info->indexed_za.regno;
1533 
1534   switch (info->qualifier)
1535     {
1536     case AARCH64_OPND_QLF_S_B:
1537       fld_size = 0;
1538       fld_q = 0;
1539       break;
1540     case AARCH64_OPND_QLF_S_H:
1541       fld_size = 1;
1542       fld_q = 0;
1543       fld_zan_imm |= regno << 3;
1544       break;
1545     case AARCH64_OPND_QLF_S_S:
1546       fld_size = 2;
1547       fld_q = 0;
1548       fld_zan_imm |= regno << 2;
1549       break;
1550     case AARCH64_OPND_QLF_S_D:
1551       fld_size = 3;
1552       fld_q = 0;
1553       fld_zan_imm |= regno << 1;
1554       break;
1555     case AARCH64_OPND_QLF_S_Q:
1556       fld_size = 3;
1557       fld_q = 1;
1558       fld_zan_imm = regno;
1559       break;
1560     default:
1561       return false;
1562     }
1563 
1564   insert_field (self->fields[0], code, fld_size, 0);
1565   insert_field (self->fields[1], code, fld_q, 0);
1566   insert_field (self->fields[2], code, fld_v, 0);
1567   insert_field (self->fields[3], code, fld_rv, 0);
1568   insert_field (self->fields[4], code, fld_zan_imm, 0);
1569 
1570   return true;
1571 }
1572 
1573 bool
1574 aarch64_ins_sme_za_hv_tiles_range (const aarch64_operand *self,
1575 				   const aarch64_opnd_info *info,
1576 				   aarch64_insn *code,
1577 				   const aarch64_inst *inst ATTRIBUTE_UNUSED,
1578 				   aarch64_operand_error *errors
1579 				     ATTRIBUTE_UNUSED)
1580 {
1581   int ebytes = aarch64_get_qualifier_esize (info->qualifier);
1582   int range_size = get_opcode_dependent_value (inst->opcode);
1583   int fld_v = info->indexed_za.v;
1584   int fld_rv = info->indexed_za.index.regno - 12;
1585   int imm = info->indexed_za.index.imm;
1586   int max_value = 16 / range_size / ebytes;
1587 
1588   if (max_value == 0)
1589     max_value = 1;
1590 
1591   assert (imm % range_size == 0 && (imm / range_size) < max_value);
1592   int fld_zan_imm = (info->indexed_za.regno * max_value) | (imm / range_size);
1593   assert (fld_zan_imm < (range_size == 4 && ebytes < 8 ? 4 : 8));
1594 
1595   insert_field (self->fields[0], code, fld_v, 0);
1596   insert_field (self->fields[1], code, fld_rv, 0);
1597   insert_field (self->fields[2], code, fld_zan_imm, 0);
1598 
1599   return true;
1600 }
1601 
1602 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1603    separated by commas, encoded in the "imm8" field.
1604 
1605    For programmer convenience an assembler must also accept the names of
1606    32-bit, 16-bit and 8-bit element tiles which are converted into the
1607    corresponding set of 64-bit element tiles.
1608 */
1609 bool
1610 aarch64_ins_sme_za_list (const aarch64_operand *self,
1611                          const aarch64_opnd_info *info,
1612                          aarch64_insn *code,
1613                          const aarch64_inst *inst ATTRIBUTE_UNUSED,
1614                          aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1615 {
1616   int fld_mask = info->imm.value;
1617   insert_field (self->fields[0], code, fld_mask, 0);
1618   return true;
1619 }
1620 
1621 bool
1622 aarch64_ins_sme_za_array (const aarch64_operand *self,
1623                           const aarch64_opnd_info *info,
1624                           aarch64_insn *code,
1625                           const aarch64_inst *inst ATTRIBUTE_UNUSED,
1626                           aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1627 {
1628   int regno = info->indexed_za.index.regno & 3;
1629   int imm = info->indexed_za.index.imm;
1630   int countm1 = info->indexed_za.index.countm1;
1631   assert (imm % (countm1 + 1) == 0);
1632   insert_field (self->fields[0], code, regno, 0);
1633   insert_field (self->fields[1], code, imm / (countm1 + 1), 0);
1634   return true;
1635 }
1636 
1637 bool
1638 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand *self,
1639                                const aarch64_opnd_info *info,
1640                                aarch64_insn *code,
1641                                const aarch64_inst *inst ATTRIBUTE_UNUSED,
1642                                aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1643 {
1644   int regno = info->addr.base_regno;
1645   int imm = info->addr.offset.imm;
1646   insert_field (self->fields[0], code, regno, 0);
1647   insert_field (self->fields[1], code, imm, 0);
1648   return true;
1649 }
1650 
1651 /* Encode in SMSTART and SMSTOP {SM | ZA } mode.  */
1652 bool
1653 aarch64_ins_sme_sm_za (const aarch64_operand *self,
1654                        const aarch64_opnd_info *info,
1655                        aarch64_insn *code,
1656                        const aarch64_inst *inst ATTRIBUTE_UNUSED,
1657                        aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1658 {
1659   aarch64_insn fld_crm;
1660   /* Set CRm[3:1] bits.  */
1661   if (info->reg.regno == 's')
1662     fld_crm = 0x02 ; /* SVCRSM.  */
1663   else if (info->reg.regno == 'z')
1664     fld_crm = 0x04; /* SVCRZA.  */
1665   else
1666     return false;
1667 
1668   insert_field (self->fields[0], code, fld_crm, 0);
1669   return true;
1670 }
1671 
1672 /* Encode source scalable predicate register (Pn), name of the index base
1673    register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1674    range 0 to one less than the number of vector elements in a 128-bit vector
1675    register, encoded in "i1:tszh:tszl".
1676 */
1677 bool
1678 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand *self,
1679                                      const aarch64_opnd_info *info,
1680                                      aarch64_insn *code,
1681                                      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1682                                      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1683 {
1684   int fld_pn = info->indexed_za.regno;
1685   int fld_rm = info->indexed_za.index.regno - 12;
1686   int imm = info->indexed_za.index.imm;
1687   int fld_i1, fld_tszh, fld_tshl;
1688 
1689   insert_field (self->fields[0], code, fld_rm, 0);
1690   insert_field (self->fields[1], code, fld_pn, 0);
1691 
1692   /* Optional element index, defaulting to 0, in the range 0 to one less than
1693      the number of vector elements in a 128-bit vector register, encoded in
1694      "i1:tszh:tszl".
1695 
1696         i1  tszh  tszl  <T>
1697         0   0     000   RESERVED
1698         x   x     xx1   B
1699         x   x     x10   H
1700         x   x     100   S
1701         x   1     000   D
1702   */
1703   switch (info->qualifier)
1704   {
1705     case AARCH64_OPND_QLF_S_B:
1706       /* <imm> is 4 bit value.  */
1707       fld_i1 = (imm >> 3) & 0x1;
1708       fld_tszh = (imm >> 2) & 0x1;
1709       fld_tshl = ((imm << 1) | 0x1) & 0x7;
1710       break;
1711     case AARCH64_OPND_QLF_S_H:
1712       /* <imm> is 3 bit value.  */
1713       fld_i1 = (imm >> 2) & 0x1;
1714       fld_tszh = (imm >> 1) & 0x1;
1715       fld_tshl = ((imm << 2) | 0x2) & 0x7;
1716       break;
1717     case AARCH64_OPND_QLF_S_S:
1718       /* <imm> is 2 bit value.  */
1719       fld_i1 = (imm >> 1) & 0x1;
1720       fld_tszh = imm & 0x1;
1721       fld_tshl = 0x4;
1722       break;
1723     case AARCH64_OPND_QLF_S_D:
1724       /* <imm> is 1 bit value.  */
1725       fld_i1 = imm & 0x1;
1726       fld_tszh = 0x1;
1727       fld_tshl = 0x0;
1728       break;
1729     default:
1730       return false;
1731   }
1732 
1733   insert_field (self->fields[2], code, fld_i1, 0);
1734   insert_field (self->fields[3], code, fld_tszh, 0);
1735   insert_field (self->fields[4], code, fld_tshl, 0);
1736   return true;
1737 }
1738 
1739 /* Insert X0-X30.  Register 31 is unallocated.  */
1740 bool
1741 aarch64_ins_x0_to_x30 (const aarch64_operand *self,
1742 		       const aarch64_opnd_info *info,
1743 		       aarch64_insn *code,
1744 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1745 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1746 {
1747   assert (info->reg.regno <= 30);
1748   insert_field (self->fields[0], code, info->reg.regno, 0);
1749   return true;
1750 }
1751 
1752 /* Insert an indexed register, with the first field being the register
1753    number and the remaining fields being the index.  */
1754 bool
1755 aarch64_ins_simple_index (const aarch64_operand *self,
1756 			  const aarch64_opnd_info *info,
1757 			  aarch64_insn *code,
1758 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1759 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1760 {
1761   int bias = get_operand_specific_data (self);
1762   insert_field (self->fields[0], code, info->reglane.regno - bias, 0);
1763   insert_all_fields_after (self, 1, code, info->reglane.index);
1764   return true;
1765 }
1766 
1767 /* Insert a plain shift-right immediate, when there is only a single
1768    element size.  */
1769 bool
1770 aarch64_ins_plain_shrimm (const aarch64_operand *self,
1771 			  const aarch64_opnd_info *info, aarch64_insn *code,
1772 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1773 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1774 {
1775   unsigned int base = 1 << get_operand_field_width (self, 0);
1776   insert_field (self->fields[0], code, base - info->imm.value, 0);
1777   return true;
1778 }
1779 
1780 /* Miscellaneous encoding functions.  */
1781 
1782 /* Encode size[0], i.e. bit 22, for
1783      e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1784 
1785 static void
1786 encode_asimd_fcvt (aarch64_inst *inst)
1787 {
1788   aarch64_insn value;
1789   aarch64_field field = {0, 0};
1790   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL;
1791 
1792   switch (inst->opcode->op)
1793     {
1794     case OP_FCVTN:
1795     case OP_FCVTN2:
1796       /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1797       qualifier = inst->operands[1].qualifier;
1798       break;
1799     case OP_FCVTL:
1800     case OP_FCVTL2:
1801       /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
1802       qualifier = inst->operands[0].qualifier;
1803       break;
1804     default:
1805       return;
1806     }
1807   assert (qualifier == AARCH64_OPND_QLF_V_4S
1808 	  || qualifier == AARCH64_OPND_QLF_V_2D);
1809   value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1810   gen_sub_field (FLD_size, 0, 1, &field);
1811   insert_field_2 (&field, &inst->value, value, 0);
1812 }
1813 
1814 /* Encode size[0], i.e. bit 22, for
1815      e.g. FCVTXN <Vb><d>, <Va><n>.  */
1816 
1817 static void
1818 encode_asisd_fcvtxn (aarch64_inst *inst)
1819 {
1820   aarch64_insn val = 1;
1821   aarch64_field field = {0, 0};
1822   assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1823   gen_sub_field (FLD_size, 0, 1, &field);
1824   insert_field_2 (&field, &inst->value, val, 0);
1825 }
1826 
1827 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
1828 static void
1829 encode_fcvt (aarch64_inst *inst)
1830 {
1831   aarch64_insn val;
1832   const aarch64_field field = {15, 2};
1833 
1834   /* opc dstsize */
1835   switch (inst->operands[0].qualifier)
1836     {
1837     case AARCH64_OPND_QLF_S_S: val = 0; break;
1838     case AARCH64_OPND_QLF_S_D: val = 1; break;
1839     case AARCH64_OPND_QLF_S_H: val = 3; break;
1840     default: abort ();
1841     }
1842   insert_field_2 (&field, &inst->value, val, 0);
1843 
1844   return;
1845 }
1846 
1847 /* Return the index in qualifiers_list that INST is using.  Should only
1848    be called once the qualifiers are known to be valid.  */
1849 
1850 static int
1851 aarch64_get_variant (struct aarch64_inst *inst)
1852 {
1853   int i, nops, variant;
1854 
1855   nops = aarch64_num_of_operands (inst->opcode);
1856   for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1857     {
1858       for (i = 0; i < nops; ++i)
1859 	if (inst->opcode->qualifiers_list[variant][i]
1860 	    != inst->operands[i].qualifier)
1861 	  break;
1862       if (i == nops)
1863 	return variant;
1864     }
1865   abort ();
1866 }
1867 
1868 /* Do miscellaneous encodings that are not common enough to be driven by
1869    flags.  */
1870 
1871 static void
1872 do_misc_encoding (aarch64_inst *inst)
1873 {
1874   unsigned int value;
1875 
1876   switch (inst->opcode->op)
1877     {
1878     case OP_FCVT:
1879       encode_fcvt (inst);
1880       break;
1881     case OP_FCVTN:
1882     case OP_FCVTN2:
1883     case OP_FCVTL:
1884     case OP_FCVTL2:
1885       encode_asimd_fcvt (inst);
1886       break;
1887     case OP_FCVTXN_S:
1888       encode_asisd_fcvtxn (inst);
1889       break;
1890     case OP_MOV_P_P:
1891     case OP_MOV_PN_PN:
1892     case OP_MOVS_P_P:
1893       /* Copy Pn to Pm and Pg.  */
1894       value = extract_field (FLD_SVE_Pn, inst->value, 0);
1895       insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1896       insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1897       break;
1898     case OP_MOV_Z_P_Z:
1899       /* Copy Zd to Zm.  */
1900       value = extract_field (FLD_SVE_Zd, inst->value, 0);
1901       insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1902       break;
1903     case OP_MOV_Z_V:
1904       /* Fill in the zero immediate.  */
1905       insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1906 		     2, FLD_imm5, FLD_SVE_tszh);
1907       break;
1908     case OP_MOV_Z_Z:
1909       /* Copy Zn to Zm.  */
1910       value = extract_field (FLD_SVE_Zn, inst->value, 0);
1911       insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1912       break;
1913     case OP_MOV_Z_Zi:
1914       break;
1915     case OP_MOVM_P_P_P:
1916       /* Copy Pd to Pm.  */
1917       value = extract_field (FLD_SVE_Pd, inst->value, 0);
1918       insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1919       break;
1920     case OP_MOVZS_P_P_P:
1921     case OP_MOVZ_P_P_P:
1922       /* Copy Pn to Pm.  */
1923       value = extract_field (FLD_SVE_Pn, inst->value, 0);
1924       insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1925       break;
1926     case OP_NOTS_P_P_P_Z:
1927     case OP_NOT_P_P_P_Z:
1928       /* Copy Pg to Pm.  */
1929       value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1930       insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1931       break;
1932     default: break;
1933     }
1934 }
1935 
1936 /* Encode the 'size' and 'Q' field for e.g. SHADD.  */
1937 static void
1938 encode_sizeq (aarch64_inst *inst)
1939 {
1940   aarch64_insn sizeq;
1941   enum aarch64_field_kind kind;
1942   int idx;
1943 
1944   /* Get the index of the operand whose information we are going to use
1945      to encode the size and Q fields.
1946      This is deduced from the possible valid qualifier lists.  */
1947   idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1948   DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1949 	       aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1950   sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1951   /* Q */
1952   insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1953   /* size */
1954   if (inst->opcode->iclass == asisdlse
1955      || inst->opcode->iclass == asisdlsep
1956      || inst->opcode->iclass == asisdlso
1957      || inst->opcode->iclass == asisdlsop)
1958     kind = FLD_vldst_size;
1959   else
1960     kind = FLD_size;
1961   insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1962 }
1963 
1964 /* Opcodes that have fields shared by multiple operands are usually flagged
1965    with flags.  In this function, we detect such flags and use the
1966    information in one of the related operands to do the encoding.  The 'one'
1967    operand is not any operand but one of the operands that has the enough
1968    information for such an encoding.  */
1969 
1970 static void
1971 do_special_encoding (struct aarch64_inst *inst)
1972 {
1973   int idx;
1974   aarch64_insn value = 0;
1975 
1976   DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1977 
1978   /* Condition for truly conditional executed instructions, e.g. b.cond.  */
1979   if (inst->opcode->flags & F_COND)
1980     {
1981       insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1982     }
1983   if (inst->opcode->flags & F_SF)
1984     {
1985       idx = select_operand_for_sf_field_coding (inst->opcode);
1986       value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1987 	       || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1988 	? 1 : 0;
1989       insert_field (FLD_sf, &inst->value, value, 0);
1990       if (inst->opcode->flags & F_N)
1991 	insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1992     }
1993   if (inst->opcode->flags & F_LSE_SZ)
1994     {
1995       idx = select_operand_for_sf_field_coding (inst->opcode);
1996       value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1997 	       || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1998 	? 1 : 0;
1999       insert_field (FLD_lse_sz, &inst->value, value, 0);
2000     }
2001   if (inst->opcode->flags & F_RCPC3_SIZE)
2002     {
2003       switch (inst->operands[0].qualifier)
2004 	{
2005 	case AARCH64_OPND_QLF_W: value = 2; break;
2006 	case AARCH64_OPND_QLF_X: value = 3; break;
2007 	case AARCH64_OPND_QLF_S_B: value = 0; break;
2008 	case AARCH64_OPND_QLF_S_H: value = 1; break;
2009 	case AARCH64_OPND_QLF_S_S: value = 2; break;
2010 	case AARCH64_OPND_QLF_S_D: value = 3; break;
2011 	case AARCH64_OPND_QLF_S_Q: value = 0; break;
2012 	default: return;
2013 	}
2014       insert_field (FLD_rcpc3_size, &inst->value, value, 0);
2015     }
2016 
2017   if (inst->opcode->flags & F_SIZEQ)
2018     encode_sizeq (inst);
2019   if (inst->opcode->flags & F_FPTYPE)
2020     {
2021       idx = select_operand_for_fptype_field_coding (inst->opcode);
2022       switch (inst->operands[idx].qualifier)
2023 	{
2024 	case AARCH64_OPND_QLF_S_S: value = 0; break;
2025 	case AARCH64_OPND_QLF_S_D: value = 1; break;
2026 	case AARCH64_OPND_QLF_S_H: value = 3; break;
2027 	default: return;
2028 	}
2029       insert_field (FLD_type, &inst->value, value, 0);
2030     }
2031   if (inst->opcode->flags & F_SSIZE)
2032     {
2033       enum aarch64_opnd_qualifier qualifier;
2034       idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2035       qualifier = inst->operands[idx].qualifier;
2036       assert (qualifier >= AARCH64_OPND_QLF_S_B
2037 	      && qualifier <= AARCH64_OPND_QLF_S_Q);
2038       value = aarch64_get_qualifier_standard_value (qualifier);
2039       insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
2040     }
2041   if (inst->opcode->flags & F_T)
2042     {
2043       int num;	/* num of consecutive '0's on the right side of imm5<3:0>.  */
2044       aarch64_field field = {0, 0};
2045       enum aarch64_opnd_qualifier qualifier;
2046 
2047       idx = 0;
2048       qualifier = inst->operands[idx].qualifier;
2049       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2050 	      == AARCH64_OPND_CLASS_SIMD_REG
2051 	      && qualifier >= AARCH64_OPND_QLF_V_8B
2052 	      && qualifier <= AARCH64_OPND_QLF_V_2D);
2053       /* imm5<3:0>	q	<t>
2054 	 0000		x	reserved
2055 	 xxx1		0	8b
2056 	 xxx1		1	16b
2057 	 xx10		0	4h
2058 	 xx10		1	8h
2059 	 x100		0	2s
2060 	 x100		1	4s
2061 	 1000		0	reserved
2062 	 1000		1	2d  */
2063       value = aarch64_get_qualifier_standard_value (qualifier);
2064       insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
2065       num = (int) value >> 1;
2066       assert (num >= 0 && num <= 3);
2067       gen_sub_field (FLD_imm5, 0, num + 1, &field);
2068       insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
2069     }
2070 
2071   if ((inst->opcode->flags & F_OPD_SIZE) && inst->opcode->iclass == sve2_urqvs)
2072     {
2073       enum aarch64_opnd_qualifier qualifier[2];
2074       aarch64_insn value1 = 0;
2075       idx = 0;
2076       qualifier[0] = inst->operands[idx].qualifier;
2077       qualifier[1] = inst->operands[idx+2].qualifier;
2078       value = aarch64_get_qualifier_standard_value (qualifier[0]);
2079       value1 = aarch64_get_qualifier_standard_value (qualifier[1]);
2080       assert ((value >> 1) == value1);
2081       insert_field (FLD_size, &inst->value, value1, inst->opcode->mask);
2082     }
2083 
2084   if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2085     {
2086       /* Use Rt to encode in the case of e.g.
2087 	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
2088       enum aarch64_opnd_qualifier qualifier;
2089       idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2090       if (idx == -1)
2091 	/* Otherwise use the result operand, which has to be a integer
2092 	   register.  */
2093 	idx = 0;
2094       assert (idx == 0 || idx == 1);
2095       assert (aarch64_get_operand_class (inst->opcode->operands[idx])
2096 	      == AARCH64_OPND_CLASS_INT_REG);
2097       qualifier = inst->operands[idx].qualifier;
2098       insert_field (FLD_Q, &inst->value,
2099 		    aarch64_get_qualifier_standard_value (qualifier), 0);
2100     }
2101   if (inst->opcode->flags & F_LDS_SIZE)
2102     {
2103       /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
2104       enum aarch64_opnd_qualifier qualifier;
2105       aarch64_field field = {0, 0};
2106       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2107 	      == AARCH64_OPND_CLASS_INT_REG);
2108       gen_sub_field (FLD_opc, 0, 1, &field);
2109       qualifier = inst->operands[0].qualifier;
2110       insert_field_2 (&field, &inst->value,
2111 		      1 - aarch64_get_qualifier_standard_value (qualifier), 0);
2112     }
2113   /* Miscellaneous encoding as the last step.  */
2114   if (inst->opcode->flags & F_MISC)
2115     do_misc_encoding (inst);
2116 
2117   DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
2118 }
2119 
2120 /* Some instructions (including all SVE ones) use the instruction class
2121    to describe how a qualifiers_list index is represented in the instruction
2122    encoding.  If INST is such an instruction, encode the chosen qualifier
2123    variant.  */
2124 
2125 static void
2126 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
2127 {
2128   int variant = 0;
2129   switch (inst->opcode->iclass)
2130     {
2131     case sme_mov:
2132     case sme_psel:
2133       /* The variant is encoded as part of the immediate.  */
2134       break;
2135 
2136     case sme_size_12_bhs:
2137       insert_field (FLD_SME_size_12, &inst->value,
2138 		    aarch64_get_variant (inst), 0);
2139       break;
2140 
2141     case sme_size_22:
2142       insert_field (FLD_SME_size_22, &inst->value,
2143 		    aarch64_get_variant (inst), 0);
2144       break;
2145 
2146     case sme_size_22_hsd:
2147       insert_field (FLD_SME_size_22, &inst->value,
2148 		    aarch64_get_variant (inst) + 1, 0);
2149       break;
2150 
2151     case sme_size_12_hs:
2152       insert_field (FLD_SME_size_12, &inst->value,
2153 		    aarch64_get_variant (inst) + 1, 0);
2154       break;
2155 
2156     case sme_sz_23:
2157       insert_field (FLD_SME_sz_23, &inst->value,
2158 		    aarch64_get_variant (inst), 0);
2159       break;
2160 
2161     case sve_cpy:
2162       insert_fields (&inst->value, aarch64_get_variant (inst),
2163 		     0, 2, FLD_SVE_M_14, FLD_size);
2164       break;
2165 
2166     case sme_shift:
2167     case sve_index:
2168     case sve_index1:
2169     case sve_shift_pred:
2170     case sve_shift_unpred:
2171     case sve_shift_tsz_hsd:
2172     case sve_shift_tsz_bhsd:
2173       /* For indices and shift amounts, the variant is encoded as
2174 	 part of the immediate.  */
2175       break;
2176 
2177     case sve_limm:
2178     case sme2_mov:
2179       /* For sve_limm, the .B, .H, and .S forms are just a convenience
2180 	 and depend on the immediate.  They don't have a separate
2181 	 encoding.  */
2182       break;
2183 
2184     case sme_misc:
2185     case sme2_movaz:
2186     case sve_misc:
2187       /* These instructions have only a single variant.  */
2188       break;
2189 
2190     case sve_movprfx:
2191       insert_fields (&inst->value, aarch64_get_variant (inst),
2192 		     0, 2, FLD_SVE_M_16, FLD_size);
2193       break;
2194 
2195     case sve_pred_zm:
2196       insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
2197       break;
2198 
2199     case sve_size_bhs:
2200     case sve_size_bhsd:
2201       insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
2202       break;
2203 
2204     case sve_size_hsd:
2205       /* MOD 3 For `OP_SVE_Vv_HSD`.  */
2206       insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) % 3 + 1, 0);
2207       break;
2208 
2209     case sme_fp_sd:
2210     case sme_int_sd:
2211     case sve_size_bh:
2212     case sve_size_sd:
2213       insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
2214       break;
2215 
2216     case sve_size_sd2:
2217       insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
2218       break;
2219 
2220     case sve_size_hsd2:
2221       insert_field (FLD_SVE_size, &inst->value,
2222 		    aarch64_get_variant (inst) + 1, 0);
2223       break;
2224 
2225     case sve_size_tsz_bhs:
2226       insert_fields (&inst->value,
2227 		     (1 << aarch64_get_variant (inst)),
2228 		     0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
2229       break;
2230 
2231     case sve_size_13:
2232       variant = aarch64_get_variant (inst) + 1;
2233       if (variant == 2)
2234 	  variant = 3;
2235       insert_field (FLD_size, &inst->value, variant, 0);
2236       break;
2237 
2238     default:
2239       break;
2240     }
2241 }
2242 
2243 /* Converters converting an alias opcode instruction to its real form.  */
2244 
2245 /* ROR <Wd>, <Ws>, #<shift>
2246      is equivalent to:
2247    EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
2248 static void
2249 convert_ror_to_extr (aarch64_inst *inst)
2250 {
2251   copy_operand_info (inst, 3, 2);
2252   copy_operand_info (inst, 2, 1);
2253 }
2254 
2255 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2256      is equivalent to:
2257    USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
2258 static void
2259 convert_xtl_to_shll (aarch64_inst *inst)
2260 {
2261   inst->operands[2].qualifier = inst->operands[1].qualifier;
2262   inst->operands[2].imm.value = 0;
2263 }
2264 
2265 /* Convert
2266      LSR <Xd>, <Xn>, #<shift>
2267    to
2268      UBFM <Xd>, <Xn>, #<shift>, #63.  */
2269 static void
2270 convert_sr_to_bfm (aarch64_inst *inst)
2271 {
2272   inst->operands[3].imm.value =
2273     inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2274 }
2275 
2276 /* Convert MOV to ORR.  */
2277 static void
2278 convert_mov_to_orr (aarch64_inst *inst)
2279 {
2280   /* MOV <Vd>.<T>, <Vn>.<T>
2281      is equivalent to:
2282      ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
2283   copy_operand_info (inst, 2, 1);
2284 }
2285 
2286 /* When <imms> >= <immr>, the instruction written:
2287      SBFX <Xd>, <Xn>, #<lsb>, #<width>
2288    is equivalent to:
2289      SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
2290 
2291 static void
2292 convert_bfx_to_bfm (aarch64_inst *inst)
2293 {
2294   int64_t lsb, width;
2295 
2296   /* Convert the operand.  */
2297   lsb = inst->operands[2].imm.value;
2298   width = inst->operands[3].imm.value;
2299   inst->operands[2].imm.value = lsb;
2300   inst->operands[3].imm.value = lsb + width - 1;
2301 }
2302 
2303 /* When <imms> < <immr>, the instruction written:
2304      SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2305    is equivalent to:
2306      SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
2307 
2308 static void
2309 convert_bfi_to_bfm (aarch64_inst *inst)
2310 {
2311   int64_t lsb, width;
2312 
2313   /* Convert the operand.  */
2314   lsb = inst->operands[2].imm.value;
2315   width = inst->operands[3].imm.value;
2316   if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2317     {
2318       inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2319       inst->operands[3].imm.value = width - 1;
2320     }
2321   else
2322     {
2323       inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2324       inst->operands[3].imm.value = width - 1;
2325     }
2326 }
2327 
2328 /* The instruction written:
2329      BFC <Xd>, #<lsb>, #<width>
2330    is equivalent to:
2331      BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1).  */
2332 
2333 static void
2334 convert_bfc_to_bfm (aarch64_inst *inst)
2335 {
2336   int64_t lsb, width;
2337 
2338   /* Insert XZR.  */
2339   copy_operand_info (inst, 3, 2);
2340   copy_operand_info (inst, 2, 1);
2341   copy_operand_info (inst, 1, 0);
2342   inst->operands[1].reg.regno = 0x1f;
2343 
2344   /* Convert the immediate operand.  */
2345   lsb = inst->operands[2].imm.value;
2346   width = inst->operands[3].imm.value;
2347   if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2348     {
2349       inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2350       inst->operands[3].imm.value = width - 1;
2351     }
2352   else
2353     {
2354       inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2355       inst->operands[3].imm.value = width - 1;
2356     }
2357 }
2358 
2359 /* The instruction written:
2360      LSL <Xd>, <Xn>, #<shift>
2361    is equivalent to:
2362      UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
2363 
2364 static void
2365 convert_lsl_to_ubfm (aarch64_inst *inst)
2366 {
2367   int64_t shift = inst->operands[2].imm.value;
2368 
2369   if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2370     {
2371       inst->operands[2].imm.value = (32 - shift) & 0x1f;
2372       inst->operands[3].imm.value = 31 - shift;
2373     }
2374   else
2375     {
2376       inst->operands[2].imm.value = (64 - shift) & 0x3f;
2377       inst->operands[3].imm.value = 63 - shift;
2378     }
2379 }
2380 
2381 /* CINC <Wd>, <Wn>, <cond>
2382      is equivalent to:
2383    CSINC <Wd>, <Wn>, <Wn>, invert(<cond>).  */
2384 
2385 static void
2386 convert_to_csel (aarch64_inst *inst)
2387 {
2388   copy_operand_info (inst, 3, 2);
2389   copy_operand_info (inst, 2, 1);
2390   inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2391 }
2392 
2393 /* CSET <Wd>, <cond>
2394      is equivalent to:
2395    CSINC <Wd>, WZR, WZR, invert(<cond>).  */
2396 
2397 static void
2398 convert_cset_to_csinc (aarch64_inst *inst)
2399 {
2400   copy_operand_info (inst, 3, 1);
2401   copy_operand_info (inst, 2, 0);
2402   copy_operand_info (inst, 1, 0);
2403   inst->operands[1].reg.regno = 0x1f;
2404   inst->operands[2].reg.regno = 0x1f;
2405   inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2406 }
2407 
2408 /* MOV <Wd>, #<imm>
2409    is equivalent to:
2410    MOVZ <Wd>, #<imm16>, LSL #<shift>.  */
2411 
2412 static void
2413 convert_mov_to_movewide (aarch64_inst *inst)
2414 {
2415   int is32;
2416   uint32_t shift_amount;
2417   uint64_t value = ~(uint64_t)0;
2418 
2419   switch (inst->opcode->op)
2420     {
2421     case OP_MOV_IMM_WIDE:
2422       value = inst->operands[1].imm.value;
2423       break;
2424     case OP_MOV_IMM_WIDEN:
2425       value = ~inst->operands[1].imm.value;
2426       break;
2427     default:
2428       return;
2429     }
2430   inst->operands[1].type = AARCH64_OPND_HALF;
2431   is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2432   if (! aarch64_wide_constant_p (value, is32, &shift_amount))
2433     /* The constraint check should have guaranteed this wouldn't happen.  */
2434     return;
2435   value >>= shift_amount;
2436   value &= 0xffff;
2437   inst->operands[1].imm.value = value;
2438   inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
2439   inst->operands[1].shifter.amount = shift_amount;
2440 }
2441 
2442 /* MOV <Wd>, #<imm>
2443      is equivalent to:
2444    ORR <Wd>, WZR, #<imm>.  */
2445 
2446 static void
2447 convert_mov_to_movebitmask (aarch64_inst *inst)
2448 {
2449   copy_operand_info (inst, 2, 1);
2450   inst->operands[1].reg.regno = 0x1f;
2451   inst->operands[1].skip = 0;
2452 }
2453 
2454 /* Some alias opcodes are assembled by being converted to their real-form.  */
2455 
2456 static void
2457 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
2458 {
2459   const aarch64_opcode *alias = inst->opcode;
2460 
2461   if ((alias->flags & F_CONV) == 0)
2462     goto convert_to_real_return;
2463 
2464   switch (alias->op)
2465     {
2466     case OP_ASR_IMM:
2467     case OP_LSR_IMM:
2468       convert_sr_to_bfm (inst);
2469       break;
2470     case OP_LSL_IMM:
2471       convert_lsl_to_ubfm (inst);
2472       break;
2473     case OP_CINC:
2474     case OP_CINV:
2475     case OP_CNEG:
2476       convert_to_csel (inst);
2477       break;
2478     case OP_CSET:
2479     case OP_CSETM:
2480       convert_cset_to_csinc (inst);
2481       break;
2482     case OP_UBFX:
2483     case OP_BFXIL:
2484     case OP_SBFX:
2485       convert_bfx_to_bfm (inst);
2486       break;
2487     case OP_SBFIZ:
2488     case OP_BFI:
2489     case OP_UBFIZ:
2490       convert_bfi_to_bfm (inst);
2491       break;
2492     case OP_BFC:
2493       convert_bfc_to_bfm (inst);
2494       break;
2495     case OP_MOV_V:
2496       convert_mov_to_orr (inst);
2497       break;
2498     case OP_MOV_IMM_WIDE:
2499     case OP_MOV_IMM_WIDEN:
2500       convert_mov_to_movewide (inst);
2501       break;
2502     case OP_MOV_IMM_LOG:
2503       convert_mov_to_movebitmask (inst);
2504       break;
2505     case OP_ROR_IMM:
2506       convert_ror_to_extr (inst);
2507       break;
2508     case OP_SXTL:
2509     case OP_SXTL2:
2510     case OP_UXTL:
2511     case OP_UXTL2:
2512       convert_xtl_to_shll (inst);
2513       break;
2514     default:
2515       break;
2516     }
2517 
2518  convert_to_real_return:
2519   aarch64_replace_opcode (inst, real);
2520 }
2521 
2522 /* Encode *INST_ORI of the opcode code OPCODE.
2523    Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2524    matched operand qualifier sequence in *QLF_SEQ.  */
2525 
2526 bool
2527 aarch64_opcode_encode (const aarch64_opcode *opcode,
2528 		       const aarch64_inst *inst_ori, aarch64_insn *code,
2529 		       aarch64_opnd_qualifier_t *qlf_seq,
2530 		       aarch64_operand_error *mismatch_detail,
2531 		       aarch64_instr_sequence* insn_sequence)
2532 {
2533   int i;
2534   const aarch64_opcode *aliased;
2535   aarch64_inst copy, *inst;
2536 
2537   DEBUG_TRACE ("enter with %s", opcode->name);
2538 
2539   /* Create a copy of *INST_ORI, so that we can do any change we want.  */
2540   copy = *inst_ori;
2541   inst = &copy;
2542 
2543   assert (inst->opcode == NULL || inst->opcode == opcode);
2544   if (inst->opcode == NULL)
2545     inst->opcode = opcode;
2546 
2547   /* Constrain the operands.
2548      After passing this, the encoding is guaranteed to succeed.  */
2549   if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2550     {
2551       DEBUG_TRACE ("FAIL since operand constraint not met");
2552       return 0;
2553     }
2554 
2555   /* Get the base value.
2556      Note: this has to be before the aliasing handling below in order to
2557      get the base value from the alias opcode before we move on to the
2558      aliased opcode for encoding.  */
2559   inst->value = opcode->opcode;
2560 
2561   /* No need to do anything else if the opcode does not have any operand.  */
2562   if (aarch64_num_of_operands (opcode) == 0)
2563     goto encoding_exit;
2564 
2565   /* Assign operand indexes and check types.  Also put the matched
2566      operand qualifiers in *QLF_SEQ to return.  */
2567   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2568     {
2569       assert (opcode->operands[i] == inst->operands[i].type);
2570       inst->operands[i].idx = i;
2571       if (qlf_seq != NULL)
2572 	*qlf_seq = inst->operands[i].qualifier;
2573     }
2574 
2575   aliased = aarch64_find_real_opcode (opcode);
2576   /* If the opcode is an alias and it does not ask for direct encoding by
2577      itself, the instruction will be transformed to the form of real opcode
2578      and the encoding will be carried out using the rules for the aliased
2579      opcode.  */
2580   if (aliased != NULL && (opcode->flags & F_CONV))
2581     {
2582       DEBUG_TRACE ("real opcode '%s' has been found for the alias  %s",
2583 		   aliased->name, opcode->name);
2584       /* Convert the operands to the form of the real opcode.  */
2585       convert_to_real (inst, aliased);
2586       opcode = aliased;
2587     }
2588 
2589   aarch64_opnd_info *info = inst->operands;
2590 
2591   /* Call the inserter of each operand.  */
2592   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2593     {
2594       const aarch64_operand *opnd;
2595       enum aarch64_opnd type = opcode->operands[i];
2596       if (type == AARCH64_OPND_NIL)
2597 	break;
2598       if (info->skip)
2599 	{
2600 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
2601 	  continue;
2602 	}
2603       opnd = &aarch64_operands[type];
2604       if (operand_has_inserter (opnd)
2605 	  && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2606 				      mismatch_detail))
2607 	    return false;
2608     }
2609 
2610   /* Call opcode encoders indicated by flags.  */
2611   if (opcode_has_special_coder (opcode))
2612     do_special_encoding (inst);
2613 
2614   /* Possibly use the instruction class to encode the chosen qualifier
2615      variant.  */
2616   aarch64_encode_variant_using_iclass (inst);
2617 
2618   /* Run a verifier if the instruction has one set.  */
2619   if (opcode->verifier)
2620     {
2621       enum err_type result = opcode->verifier (inst, *code, 0, true,
2622 					       mismatch_detail, insn_sequence);
2623       switch (result)
2624 	{
2625 	case ERR_UND:
2626 	case ERR_UNP:
2627 	case ERR_NYI:
2628 	  return false;
2629 	default:
2630 	  break;
2631 	}
2632     }
2633 
2634   /* Always run constrain verifiers, this is needed because constrains need to
2635      maintain a global state.  Regardless if the instruction has the flag set
2636      or not.  */
2637   enum err_type result = verify_constraints (inst, *code, 0, true,
2638 					     mismatch_detail, insn_sequence);
2639   switch (result)
2640     {
2641     case ERR_UND:
2642     case ERR_UNP:
2643     case ERR_NYI:
2644       return false;
2645     default:
2646       break;
2647     }
2648 
2649 
2650  encoding_exit:
2651   DEBUG_TRACE ("exit with %s", opcode->name);
2652 
2653   *code = inst->value;
2654 
2655   return true;
2656 }
2657