xref: /netbsd-src/external/gpl3/gcc/dist/gcc/config/aarch64/aarch64-protos.h (revision 0a3071956a3a9fdebdbf7f338cf2d439b45fc728)
1 /* Machine description for AArch64 architecture.
2    Copyright (C) 2009-2022 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    GCC is distributed in the hope that it will be useful, but
13    WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 
22 #ifndef GCC_AARCH64_PROTOS_H
23 #define GCC_AARCH64_PROTOS_H
24 
25 #include "input.h"
26 
27 /* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
28    high and lo relocs that calculate the base address using a PC
29    relative reloc.
30    So to get the address of foo, we generate
31    adrp x0, foo
32    add  x0, x0, :lo12:foo
33 
34    To load or store something to foo, we could use the corresponding
35    load store variants that generate an
36    ldr x0, [x0,:lo12:foo]
37    or
38    str x1, [x0, :lo12:foo]
39 
40    This corresponds to the small code model of the compiler.
41 
42    SYMBOL_SMALL_GOT_4G: Similar to the one above but this
43    gives us the GOT entry of the symbol being referred to :
44    Thus calculating the GOT entry for foo is done using the
45    following sequence of instructions.  The ADRP instruction
46    gets us to the page containing the GOT entry of the symbol
47    and the got_lo12 gets us the actual offset in it, together
48    the base and offset, we can address 4G size GOT table.
49 
50    adrp  x0, :got:foo
51    ldr   x0, [x0, :gotoff_lo12:foo]
52 
53    This corresponds to the small PIC model of the compiler.
54 
55    SYMBOL_SMALL_GOT_28K: Similar to SYMBOL_SMALL_GOT_4G, but used for symbol
56    restricted within 28K GOT table size.
57 
58    ldr reg, [gp, #:gotpage_lo15:sym]
59 
60    This corresponds to -fpic model for small memory model of the compiler.
61 
62    SYMBOL_SMALL_TLSGD
63    SYMBOL_SMALL_TLSDESC
64    SYMBOL_SMALL_TLSIE
65    SYMBOL_TINY_TLSIE
66    SYMBOL_TLSLE12
67    SYMBOL_TLSLE24
68    SYMBOL_TLSLE32
69    SYMBOL_TLSLE48
70    Each of these represents a thread-local symbol, and corresponds to the
71    thread local storage relocation operator for the symbol being referred to.
72 
73    SYMBOL_TINY_ABSOLUTE
74 
75    Generate symbol accesses as a PC relative address using a single
76    instruction.  To compute the address of symbol foo, we generate:
77 
78    ADR x0, foo
79 
80    SYMBOL_TINY_GOT
81 
82    Generate symbol accesses via the GOT using a single PC relative
83    instruction.  To compute the address of symbol foo, we generate:
84 
85    ldr t0, :got:foo
86 
87    The value of foo can subsequently read using:
88 
89    ldrb    t0, [t0]
90 
91    SYMBOL_FORCE_TO_MEM : Global variables are addressed using
92    constant pool.  All variable addresses are spilled into constant
93    pools.  The constant pools themselves are addressed using PC
94    relative accesses.  This only works for the large code model.
95  */
96 enum aarch64_symbol_type
97 {
98   SYMBOL_SMALL_ABSOLUTE,
99   SYMBOL_SMALL_GOT_28K,
100   SYMBOL_SMALL_GOT_4G,
101   SYMBOL_SMALL_TLSGD,
102   SYMBOL_SMALL_TLSDESC,
103   SYMBOL_SMALL_TLSIE,
104   SYMBOL_TINY_ABSOLUTE,
105   SYMBOL_TINY_GOT,
106   SYMBOL_TINY_TLSIE,
107   SYMBOL_TLSLE12,
108   SYMBOL_TLSLE24,
109   SYMBOL_TLSLE32,
110   SYMBOL_TLSLE48,
111   SYMBOL_FORCE_TO_MEM
112 };
113 
114 /* Classifies the type of an address query.
115 
116    ADDR_QUERY_M
117       Query what is valid for an "m" constraint and a memory_operand
118       (the rules are the same for both).
119 
120    ADDR_QUERY_LDP_STP
121       Query what is valid for a load/store pair.
122 
123    ADDR_QUERY_LDP_STP_N
124       Query what is valid for a load/store pair, but narrow the incoming mode
125       for address checking.  This is used for the store_pair_lanes patterns.
126 
127    ADDR_QUERY_ANY
128       Query what is valid for at least one memory constraint, which may
129       allow things that "m" doesn't.  For example, the SVE LDR and STR
130       addressing modes allow a wider range of immediate offsets than "m"
131       does.  */
132 enum aarch64_addr_query_type {
133   ADDR_QUERY_M,
134   ADDR_QUERY_LDP_STP,
135   ADDR_QUERY_LDP_STP_N,
136   ADDR_QUERY_ANY
137 };
138 
139 /* Enumerates values that can be arbitrarily mixed into a calculation
140    in order to make the result of the calculation unique to its use case.
141 
142    AARCH64_SALT_SSP_SET
143    AARCH64_SALT_SSP_TEST
144       Used when calculating the address of the stack protection canary value.
145       There is a separate value for setting and testing the canary, meaning
146       that these two operations produce unique addresses: they are different
147       from each other, and from all other address calculations.
148 
149       The main purpose of this is to prevent the SET address being spilled
150       to the stack and reloaded for the TEST, since that would give an
151       attacker the opportunity to change the address of the expected
152       canary value.  */
153 enum aarch64_salt_type {
154   AARCH64_SALT_SSP_SET,
155   AARCH64_SALT_SSP_TEST
156 };
157 
158 /* A set of tuning parameters contains references to size and time
159    cost models and vectors for address cost calculations, register
160    move costs and memory move costs.  */
161 
162 /* Scaled addressing modes can vary cost depending on the mode of the
163    value to be loaded/stored.  QImode values cannot use scaled
164    addressing modes.  */
165 
166 struct scale_addr_mode_cost
167 {
168   const int hi;
169   const int si;
170   const int di;
171   const int ti;
172 };
173 
174 /* Additional cost for addresses.  */
175 struct cpu_addrcost_table
176 {
177   const struct scale_addr_mode_cost addr_scale_costs;
178   const int pre_modify;
179   const int post_modify;
180   const int post_modify_ld3_st3;
181   const int post_modify_ld4_st4;
182   const int register_offset;
183   const int register_sextend;
184   const int register_zextend;
185   const int imm_offset;
186 };
187 
188 /* Additional costs for register copies.  Cost is for one register.  */
189 struct cpu_regmove_cost
190 {
191   const int GP2GP;
192   const int GP2FP;
193   const int FP2GP;
194   const int FP2FP;
195 };
196 
197 struct simd_vec_cost
198 {
199   /* Cost of any integer vector operation, excluding the ones handled
200      specially below.  */
201   const int int_stmt_cost;
202 
203   /* Cost of any fp vector operation, excluding the ones handled
204      specially below.  */
205   const int fp_stmt_cost;
206 
207   /* Per-vector cost of permuting vectors after an LD2, LD3 or LD4,
208      as well as the per-vector cost of permuting vectors before
209      an ST2, ST3 or ST4.  */
210   const int ld2_st2_permute_cost;
211   const int ld3_st3_permute_cost;
212   const int ld4_st4_permute_cost;
213 
214   /* Cost of a permute operation.  */
215   const int permute_cost;
216 
217   /* Cost of reductions for various vector types: iN is for N-bit
218      integer elements and fN is for N-bit floating-point elements.
219      We need to single out the element type because it affects the
220      depth of the reduction.  */
221   const int reduc_i8_cost;
222   const int reduc_i16_cost;
223   const int reduc_i32_cost;
224   const int reduc_i64_cost;
225   const int reduc_f16_cost;
226   const int reduc_f32_cost;
227   const int reduc_f64_cost;
228 
229   /* Additional cost of storing a single vector element, on top of the
230      normal cost of a scalar store.  */
231   const int store_elt_extra_cost;
232 
233   /* Cost of a vector-to-scalar operation.  */
234   const int vec_to_scalar_cost;
235 
236   /* Cost of a scalar-to-vector operation.  */
237   const int scalar_to_vec_cost;
238 
239   /* Cost of an aligned vector load.  */
240   const int align_load_cost;
241 
242   /* Cost of an unaligned vector load.  */
243   const int unalign_load_cost;
244 
245   /* Cost of an unaligned vector store.  */
246   const int unalign_store_cost;
247 
248   /* Cost of a vector store.  */
249   const int store_cost;
250 };
251 
252 typedef struct simd_vec_cost advsimd_vec_cost;
253 
254 /* SVE-specific extensions to the information provided by simd_vec_cost.  */
255 struct sve_vec_cost : simd_vec_cost
256 {
sve_vec_costsve_vec_cost257   constexpr sve_vec_cost (const simd_vec_cost &base,
258 			  unsigned int clast_cost,
259 			  unsigned int fadda_f16_cost,
260 			  unsigned int fadda_f32_cost,
261 			  unsigned int fadda_f64_cost,
262 			  unsigned int gather_load_x32_cost,
263 			  unsigned int gather_load_x64_cost,
264 			  unsigned int scatter_store_elt_cost)
265     : simd_vec_cost (base),
266       clast_cost (clast_cost),
267       fadda_f16_cost (fadda_f16_cost),
268       fadda_f32_cost (fadda_f32_cost),
269       fadda_f64_cost (fadda_f64_cost),
270       gather_load_x32_cost (gather_load_x32_cost),
271       gather_load_x64_cost (gather_load_x64_cost),
272       scatter_store_elt_cost (scatter_store_elt_cost)
273   {}
274 
275   /* The cost of a vector-to-scalar CLASTA or CLASTB instruction,
276      with the scalar being stored in FP registers.  This cost is
277      assumed to be a cycle latency.  */
278   const int clast_cost;
279 
280   /* The costs of FADDA for the three data types that it supports.
281      These costs are assumed to be cycle latencies.  */
282   const int fadda_f16_cost;
283   const int fadda_f32_cost;
284   const int fadda_f64_cost;
285 
286   /* The cost of a gather load instruction.  The x32 value is for loads
287      of 32-bit elements and the x64 value is for loads of 64-bit elements.  */
288   const int gather_load_x32_cost;
289   const int gather_load_x64_cost;
290 
291   /* The per-element cost of a scatter store.  */
292   const int scatter_store_elt_cost;
293 };
294 
295 /* Base information about how the CPU issues code, containing
296    information that is relevant to scalar, Advanced SIMD and SVE
297    operations.
298 
299    The structure uses the general term "operation" to refer to
300    whichever subdivision of an instruction makes sense for the CPU.
301    These operations would typically be micro operations or macro
302    operations.
303 
304    Note that this structure and the ones derived from it are only
305    as general as they need to be for the CPUs that currently use them.
306    They will probably need to be extended or refined as more CPUs are
307    added.  */
308 struct aarch64_base_vec_issue_info
309 {
310   /* How many loads and stores can be issued per cycle.  */
311   const unsigned int loads_stores_per_cycle;
312 
313   /* How many stores can be issued per cycle.  */
314   const unsigned int stores_per_cycle;
315 
316   /* How many integer or FP/SIMD operations can be issued per cycle.
317 
318      Currently we don't try to distinguish the two.  For vector code,
319      we only really track FP/SIMD operations during vector costing;
320      we don't for example try to cost arithmetic operations like
321      address calculations, which are only decided later during ivopts.
322 
323      For scalar code, we effectively assume that code operates entirely
324      on integers or entirely on floating-point values.  Again, we don't
325      try to take address calculations into account.
326 
327      This is not very precise, but it's only meant to be a heuristic.
328      We could certainly try to do better in future if there's an example
329      of something that would benefit.  */
330   const unsigned int general_ops_per_cycle;
331 
332   /* How many FP/SIMD operations to count for a floating-point or
333      vector load operation.
334 
335      When constructing an Advanced SIMD vector from elements that have
336      been loaded from memory, these values apply to each individual load.
337      When using an SVE gather load, the values apply to each element of
338      the gather.  */
339   const unsigned int fp_simd_load_general_ops;
340 
341   /* How many FP/SIMD operations to count for a floating-point or
342      vector store operation.
343 
344      When storing individual elements of an Advanced SIMD vector out to
345      memory, these values apply to each individual store.  When using an
346      SVE scatter store, these values apply to each element of the scatter.  */
347   const unsigned int fp_simd_store_general_ops;
348 };
349 
350 using aarch64_scalar_vec_issue_info = aarch64_base_vec_issue_info;
351 
352 /* Base information about the issue stage for vector operations.
353    This structure contains information that is relevant to both
354    Advanced SIMD and SVE.  */
355 struct aarch64_simd_vec_issue_info : aarch64_base_vec_issue_info
356 {
aarch64_simd_vec_issue_infoaarch64_simd_vec_issue_info357   constexpr aarch64_simd_vec_issue_info (aarch64_base_vec_issue_info base,
358 					 unsigned int ld2_st2_general_ops,
359 					 unsigned int ld3_st3_general_ops,
360 					 unsigned int ld4_st4_general_ops)
361     : aarch64_base_vec_issue_info (base),
362       ld2_st2_general_ops (ld2_st2_general_ops),
363       ld3_st3_general_ops (ld3_st3_general_ops),
364       ld4_st4_general_ops (ld4_st4_general_ops)
365   {}
366 
367   /* How many FP/SIMD operations to count for each vector loaded or
368      stored by an LD[234] or ST[234] operation, in addition to the
369      base costs given in the parent class.  For example, the full
370      number of operations for an LD3 would be:
371 
372        load ops:    3
373        general ops: 3 * (fp_simd_load_general_ops + ld3_st3_general_ops).  */
374   const unsigned int ld2_st2_general_ops;
375   const unsigned int ld3_st3_general_ops;
376   const unsigned int ld4_st4_general_ops;
377 };
378 
379 using aarch64_advsimd_vec_issue_info = aarch64_simd_vec_issue_info;
380 
381 /* Information about the issue stage for SVE.  The main thing this adds
382    is a concept of "predicate operations".  */
383 struct aarch64_sve_vec_issue_info : aarch64_simd_vec_issue_info
384 {
aarch64_sve_vec_issue_infoaarch64_sve_vec_issue_info385   constexpr aarch64_sve_vec_issue_info
386     (aarch64_simd_vec_issue_info base,
387      unsigned int pred_ops_per_cycle,
388      unsigned int while_pred_ops,
389      unsigned int int_cmp_pred_ops,
390      unsigned int fp_cmp_pred_ops,
391      unsigned int gather_scatter_pair_general_ops,
392      unsigned int gather_scatter_pair_pred_ops)
393     : aarch64_simd_vec_issue_info (base),
394       pred_ops_per_cycle (pred_ops_per_cycle),
395       while_pred_ops (while_pred_ops),
396       int_cmp_pred_ops (int_cmp_pred_ops),
397       fp_cmp_pred_ops (fp_cmp_pred_ops),
398       gather_scatter_pair_general_ops (gather_scatter_pair_general_ops),
399       gather_scatter_pair_pred_ops (gather_scatter_pair_pred_ops)
400   {}
401 
402   /* How many predicate operations can be issued per cycle.  */
403   const unsigned int pred_ops_per_cycle;
404 
405   /* How many predicate operations are generated by a WHILExx
406      instruction.  */
407   const unsigned int while_pred_ops;
408 
409   /* How many predicate operations are generated by an integer
410      comparison instruction.  */
411   const unsigned int int_cmp_pred_ops;
412 
413   /* How many predicate operations are generated by a floating-point
414      comparison instruction.  */
415   const unsigned int fp_cmp_pred_ops;
416 
417   /* How many general and predicate operations are generated by each pair
418      of elements in a gather load or scatter store.  These values apply
419      on top of the per-element counts recorded in fp_simd_load_general_ops
420      and fp_simd_store_general_ops.
421 
422      The reason for using pairs is that that is the largest possible
423      granule size for 128-bit SVE, which can load and store 2 64-bit
424      elements or 4 32-bit elements.  */
425   const unsigned int gather_scatter_pair_general_ops;
426   const unsigned int gather_scatter_pair_pred_ops;
427 };
428 
429 /* Information related to instruction issue for a particular CPU.  */
430 struct aarch64_vec_issue_info
431 {
432   const aarch64_base_vec_issue_info *const scalar;
433   const aarch64_simd_vec_issue_info *const advsimd;
434   const aarch64_sve_vec_issue_info *const sve;
435 };
436 
437 /* Cost for vector insn classes.  */
438 struct cpu_vector_cost
439 {
440   /* Cost of any integer scalar operation, excluding load and store.  */
441   const int scalar_int_stmt_cost;
442 
443   /* Cost of any fp scalar operation, excluding load and store.  */
444   const int scalar_fp_stmt_cost;
445 
446   /* Cost of a scalar load.  */
447   const int scalar_load_cost;
448 
449   /* Cost of a scalar store.  */
450   const int scalar_store_cost;
451 
452   /* Cost of a taken branch.  */
453   const int cond_taken_branch_cost;
454 
455   /* Cost of a not-taken branch.  */
456   const int cond_not_taken_branch_cost;
457 
458   /* Cost of an Advanced SIMD operations.  */
459   const advsimd_vec_cost *advsimd;
460 
461   /* Cost of an SVE operations, or null if SVE is not implemented.  */
462   const sve_vec_cost *sve;
463 
464   /* Issue information, or null if none is provided.  */
465   const aarch64_vec_issue_info *const issue_info;
466 };
467 
468 /* Branch costs.  */
469 struct cpu_branch_cost
470 {
471   const int predictable;    /* Predictable branch or optimizing for size.  */
472   const int unpredictable;  /* Unpredictable branch or optimizing for speed.  */
473 };
474 
475 /* Control approximate alternatives to certain FP operators.  */
476 #define AARCH64_APPROX_MODE(MODE) \
477   ((MIN_MODE_FLOAT <= (MODE) && (MODE) <= MAX_MODE_FLOAT) \
478    ? ((uint64_t) 1 << ((MODE) - MIN_MODE_FLOAT)) \
479    : (MIN_MODE_VECTOR_FLOAT <= (MODE) && (MODE) <= MAX_MODE_VECTOR_FLOAT) \
480      ? ((uint64_t) 1 << ((MODE) - MIN_MODE_VECTOR_FLOAT \
481 			 + MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) \
482      : (0))
483 #define AARCH64_APPROX_NONE ((uint64_t) 0)
484 #define AARCH64_APPROX_ALL (~(uint64_t) 0)
485 
486 /* Allowed modes for approximations.  */
487 struct cpu_approx_modes
488 {
489   const uint64_t division;	/* Division.  */
490   const uint64_t sqrt;		/* Square root.  */
491   const uint64_t recip_sqrt;	/* Reciprocal square root.  */
492 };
493 
494 /* Cache prefetch settings for prefetch-loop-arrays.  */
495 struct cpu_prefetch_tune
496 {
497   const int num_slots;
498   const int l1_cache_size;
499   const int l1_cache_line_size;
500   const int l2_cache_size;
501   /* Whether software prefetch hints should be issued for non-constant
502      strides.  */
503   const bool prefetch_dynamic_strides;
504   /* The minimum constant stride beyond which we should use prefetch
505      hints for.  */
506   const int minimum_stride;
507   const int default_opt_level;
508 };
509 
510 /* Model the costs for loads/stores for the register allocators so that it can
511    do more accurate spill heuristics.  */
512 struct cpu_memmov_cost
513 {
514   int load_int;
515   int store_int;
516   int load_fp;
517   int store_fp;
518   int load_pred;
519   int store_pred;
520 };
521 
522 struct tune_params
523 {
524   const struct cpu_cost_table *insn_extra_cost;
525   const struct cpu_addrcost_table *addr_cost;
526   const struct cpu_regmove_cost *regmove_cost;
527   const struct cpu_vector_cost *vec_costs;
528   const struct cpu_branch_cost *branch_costs;
529   const struct cpu_approx_modes *approx_modes;
530   /* A bitmask of the possible SVE register widths in bits,
531      or SVE_NOT_IMPLEMENTED if not applicable.  Only used for tuning
532      decisions, does not disable VLA vectorization.  */
533   unsigned int sve_width;
534   /* Structure used by reload to cost spills.  */
535   struct cpu_memmov_cost memmov_cost;
536   int issue_rate;
537   unsigned int fusible_ops;
538   const char *function_align;
539   const char *jump_align;
540   const char *loop_align;
541   int int_reassoc_width;
542   int fp_reassoc_width;
543   int vec_reassoc_width;
544   int min_div_recip_mul_sf;
545   int min_div_recip_mul_df;
546   /* Value for aarch64_case_values_threshold; or 0 for the default.  */
547   unsigned int max_case_values;
548 /* An enum specifying how to take into account CPU autoprefetch capabilities
549    during instruction scheduling:
550    - AUTOPREFETCHER_OFF: Do not take autoprefetch capabilities into account.
551    - AUTOPREFETCHER_WEAK: Attempt to sort sequences of loads/store in order of
552    offsets but allow the pipeline hazard recognizer to alter that order to
553    maximize multi-issue opportunities.
554    - AUTOPREFETCHER_STRONG: Attempt to sort sequences of loads/store in order of
555    offsets and prefer this even if it restricts multi-issue opportunities.  */
556 
557   enum aarch64_autoprefetch_model
558   {
559     AUTOPREFETCHER_OFF,
560     AUTOPREFETCHER_WEAK,
561     AUTOPREFETCHER_STRONG
562   } autoprefetcher_model;
563 
564   unsigned int extra_tuning_flags;
565 
566   /* Place prefetch struct pointer at the end to enable type checking
567      errors when tune_params misses elements (e.g., from erroneous merges).  */
568   const struct cpu_prefetch_tune *prefetch;
569 };
570 
571 /* Classifies an address.
572 
573    ADDRESS_REG_IMM
574        A simple base register plus immediate offset.
575 
576    ADDRESS_REG_WB
577        A base register indexed by immediate offset with writeback.
578 
579    ADDRESS_REG_REG
580        A base register indexed by (optionally scaled) register.
581 
582    ADDRESS_REG_UXTW
583        A base register indexed by (optionally scaled) zero-extended register.
584 
585    ADDRESS_REG_SXTW
586        A base register indexed by (optionally scaled) sign-extended register.
587 
588    ADDRESS_LO_SUM
589        A LO_SUM rtx with a base register and "LO12" symbol relocation.
590 
591    ADDRESS_SYMBOLIC:
592        A constant symbolic address, in pc-relative literal pool.  */
593 
594 enum aarch64_address_type {
595   ADDRESS_REG_IMM,
596   ADDRESS_REG_WB,
597   ADDRESS_REG_REG,
598   ADDRESS_REG_UXTW,
599   ADDRESS_REG_SXTW,
600   ADDRESS_LO_SUM,
601   ADDRESS_SYMBOLIC
602 };
603 
604 /* Address information.  */
605 struct aarch64_address_info {
606   enum aarch64_address_type type;
607   rtx base;
608   rtx offset;
609   poly_int64 const_offset;
610   int shift;
611   enum aarch64_symbol_type symbol_type;
612 };
613 
614 #define AARCH64_FUSION_PAIR(x, name) \
615   AARCH64_FUSE_##name##_index,
616 /* Supported fusion operations.  */
617 enum aarch64_fusion_pairs_index
618 {
619 #include "aarch64-fusion-pairs.def"
620   AARCH64_FUSE_index_END
621 };
622 
623 #define AARCH64_FUSION_PAIR(x, name) \
624   AARCH64_FUSE_##name = (1u << AARCH64_FUSE_##name##_index),
625 /* Supported fusion operations.  */
626 enum aarch64_fusion_pairs
627 {
628   AARCH64_FUSE_NOTHING = 0,
629 #include "aarch64-fusion-pairs.def"
630   AARCH64_FUSE_ALL = (1u << AARCH64_FUSE_index_END) - 1
631 };
632 
633 #define AARCH64_EXTRA_TUNING_OPTION(x, name) \
634   AARCH64_EXTRA_TUNE_##name##_index,
635 /* Supported tuning flags indexes.  */
636 enum aarch64_extra_tuning_flags_index
637 {
638 #include "aarch64-tuning-flags.def"
639   AARCH64_EXTRA_TUNE_index_END
640 };
641 
642 
643 #define AARCH64_EXTRA_TUNING_OPTION(x, name) \
644   AARCH64_EXTRA_TUNE_##name = (1u << AARCH64_EXTRA_TUNE_##name##_index),
645 /* Supported tuning flags.  */
646 enum aarch64_extra_tuning_flags
647 {
648   AARCH64_EXTRA_TUNE_NONE = 0,
649 #include "aarch64-tuning-flags.def"
650   AARCH64_EXTRA_TUNE_ALL = (1u << AARCH64_EXTRA_TUNE_index_END) - 1
651 };
652 
653 /* Enum describing the various ways that the
654    aarch64_parse_{arch,tune,cpu,extension} functions can fail.
655    This way their callers can choose what kind of error to give.  */
656 
657 enum aarch64_parse_opt_result
658 {
659   AARCH64_PARSE_OK,			/* Parsing was successful.  */
660   AARCH64_PARSE_MISSING_ARG,		/* Missing argument.  */
661   AARCH64_PARSE_INVALID_FEATURE,	/* Invalid feature modifier.  */
662   AARCH64_PARSE_INVALID_ARG		/* Invalid arch, tune, cpu arg.  */
663 };
664 
665 /* Enum to distinguish which type of check is to be done in
666    aarch64_simd_valid_immediate.  This is used as a bitmask where
667    AARCH64_CHECK_MOV has both bits set.  Thus AARCH64_CHECK_MOV will
668    perform all checks.  Adding new types would require changes accordingly.  */
669 enum simd_immediate_check {
670   AARCH64_CHECK_ORR  = 1 << 0,
671   AARCH64_CHECK_BIC  = 1 << 1,
672   AARCH64_CHECK_MOV  = AARCH64_CHECK_ORR | AARCH64_CHECK_BIC
673 };
674 
675 /* The key type that -msign-return-address should use.  */
676 enum aarch64_key_type {
677   AARCH64_KEY_A,
678   AARCH64_KEY_B
679 };
680 
681 extern enum aarch64_key_type aarch64_ra_sign_key;
682 
683 extern struct tune_params aarch64_tune_params;
684 
685 /* The available SVE predicate patterns, known in the ACLE as "svpattern".  */
686 #define AARCH64_FOR_SVPATTERN(T) \
687   T (POW2, pow2, 0) \
688   T (VL1, vl1, 1) \
689   T (VL2, vl2, 2) \
690   T (VL3, vl3, 3) \
691   T (VL4, vl4, 4) \
692   T (VL5, vl5, 5) \
693   T (VL6, vl6, 6) \
694   T (VL7, vl7, 7) \
695   T (VL8, vl8, 8) \
696   T (VL16, vl16, 9) \
697   T (VL32, vl32, 10) \
698   T (VL64, vl64, 11) \
699   T (VL128, vl128, 12) \
700   T (VL256, vl256, 13) \
701   T (MUL4, mul4, 29) \
702   T (MUL3, mul3, 30) \
703   T (ALL, all, 31)
704 
705 /* The available SVE prefetch operations, known in the ACLE as "svprfop".  */
706 #define AARCH64_FOR_SVPRFOP(T) \
707   T (PLDL1KEEP, pldl1keep, 0) \
708   T (PLDL1STRM, pldl1strm, 1) \
709   T (PLDL2KEEP, pldl2keep, 2) \
710   T (PLDL2STRM, pldl2strm, 3) \
711   T (PLDL3KEEP, pldl3keep, 4) \
712   T (PLDL3STRM, pldl3strm, 5) \
713   T (PSTL1KEEP, pstl1keep, 8) \
714   T (PSTL1STRM, pstl1strm, 9) \
715   T (PSTL2KEEP, pstl2keep, 10) \
716   T (PSTL2STRM, pstl2strm, 11) \
717   T (PSTL3KEEP, pstl3keep, 12) \
718   T (PSTL3STRM, pstl3strm, 13)
719 
720 #define AARCH64_SVENUM(UPPER, LOWER, VALUE) AARCH64_SV_##UPPER = VALUE,
721 enum aarch64_svpattern {
722   AARCH64_FOR_SVPATTERN (AARCH64_SVENUM)
723   AARCH64_NUM_SVPATTERNS
724 };
725 
726 enum aarch64_svprfop {
727   AARCH64_FOR_SVPRFOP (AARCH64_SVENUM)
728   AARCH64_NUM_SVPRFOPS
729 };
730 #undef AARCH64_SVENUM
731 
732 /* It's convenient to divide the built-in function codes into groups,
733    rather than having everything in a single enum.  This type enumerates
734    those groups.  */
735 enum aarch64_builtin_class
736 {
737   AARCH64_BUILTIN_GENERAL,
738   AARCH64_BUILTIN_SVE
739 };
740 
741 /* Built-in function codes are structured so that the low
742    AARCH64_BUILTIN_SHIFT bits contain the aarch64_builtin_class
743    and the upper bits contain a group-specific subcode.  */
744 const unsigned int AARCH64_BUILTIN_SHIFT = 1;
745 
746 /* Mask that selects the aarch64_builtin_class part of a function code.  */
747 const unsigned int AARCH64_BUILTIN_CLASS = (1 << AARCH64_BUILTIN_SHIFT) - 1;
748 
749 /* RAII class for enabling enough features to define built-in types
750    and implement the arm_neon.h pragma.  */
751 class aarch64_simd_switcher
752 {
753 public:
754   aarch64_simd_switcher (unsigned int extra_flags = 0);
755   ~aarch64_simd_switcher ();
756 
757 private:
758   unsigned long m_old_isa_flags;
759   bool m_old_general_regs_only;
760 };
761 
762 void aarch64_post_cfi_startproc (void);
763 poly_int64 aarch64_initial_elimination_offset (unsigned, unsigned);
764 int aarch64_get_condition_code (rtx);
765 bool aarch64_address_valid_for_prefetch_p (rtx, bool);
766 bool aarch64_bitmask_imm (HOST_WIDE_INT val, machine_mode);
767 unsigned HOST_WIDE_INT aarch64_and_split_imm1 (HOST_WIDE_INT val_in);
768 unsigned HOST_WIDE_INT aarch64_and_split_imm2 (HOST_WIDE_INT val_in);
769 bool aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode);
770 int aarch64_branch_cost (bool, bool);
771 enum aarch64_symbol_type aarch64_classify_symbolic_expression (rtx);
772 bool aarch64_advsimd_struct_mode_p (machine_mode mode);
773 opt_machine_mode aarch64_vq_mode (scalar_mode);
774 opt_machine_mode aarch64_full_sve_mode (scalar_mode);
775 bool aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode);
776 bool aarch64_const_vec_all_same_int_p (rtx, HOST_WIDE_INT);
777 bool aarch64_const_vec_all_same_in_range_p (rtx, HOST_WIDE_INT,
778 					    HOST_WIDE_INT);
779 bool aarch64_constant_address_p (rtx);
780 bool aarch64_emit_approx_div (rtx, rtx, rtx);
781 bool aarch64_emit_approx_sqrt (rtx, rtx, bool);
782 tree aarch64_vector_load_decl (tree);
783 void aarch64_expand_call (rtx, rtx, rtx, bool);
784 bool aarch64_expand_cpymem_mops (rtx *, bool);
785 bool aarch64_expand_cpymem (rtx *);
786 bool aarch64_expand_setmem (rtx *);
787 bool aarch64_float_const_zero_rtx_p (rtx);
788 bool aarch64_float_const_rtx_p (rtx);
789 bool aarch64_function_arg_regno_p (unsigned);
790 bool aarch64_fusion_enabled_p (enum aarch64_fusion_pairs);
791 bool aarch64_gen_cpymemqi (rtx *);
792 bool aarch64_is_extend_from_extract (scalar_int_mode, rtx, rtx);
793 bool aarch64_is_long_call_p (rtx);
794 bool aarch64_is_noplt_call_p (rtx);
795 bool aarch64_label_mentioned_p (rtx);
796 void aarch64_declare_function_name (FILE *, const char*, tree);
797 void aarch64_asm_output_alias (FILE *, const tree, const tree);
798 void aarch64_asm_output_external (FILE *, tree, const char*);
799 bool aarch64_legitimate_pic_operand_p (rtx);
800 bool aarch64_mask_and_shift_for_ubfiz_p (scalar_int_mode, rtx, rtx);
801 bool aarch64_masks_and_shift_for_bfi_p (scalar_int_mode, unsigned HOST_WIDE_INT,
802 					unsigned HOST_WIDE_INT,
803 					unsigned HOST_WIDE_INT);
804 bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx);
805 bool aarch64_move_imm (HOST_WIDE_INT, machine_mode);
806 machine_mode aarch64_sve_int_mode (machine_mode);
807 opt_machine_mode aarch64_sve_pred_mode (unsigned int);
808 machine_mode aarch64_sve_pred_mode (machine_mode);
809 opt_machine_mode aarch64_sve_data_mode (scalar_mode, poly_uint64);
810 bool aarch64_sve_mode_p (machine_mode);
811 HOST_WIDE_INT aarch64_fold_sve_cnt_pat (aarch64_svpattern, unsigned int);
812 bool aarch64_sve_cnt_immediate_p (rtx);
813 bool aarch64_sve_scalar_inc_dec_immediate_p (rtx);
814 bool aarch64_sve_addvl_addpl_immediate_p (rtx);
815 bool aarch64_sve_vector_inc_dec_immediate_p (rtx);
816 int aarch64_add_offset_temporaries (rtx);
817 void aarch64_split_add_offset (scalar_int_mode, rtx, rtx, rtx, rtx, rtx);
818 bool aarch64_mov_operand_p (rtx, machine_mode);
819 rtx aarch64_reverse_mask (machine_mode, unsigned int);
820 bool aarch64_offset_7bit_signed_scaled_p (machine_mode, poly_int64);
821 bool aarch64_offset_9bit_signed_unscaled_p (machine_mode, poly_int64);
822 char *aarch64_output_sve_prefetch (const char *, rtx, const char *);
823 char *aarch64_output_sve_cnt_immediate (const char *, const char *, rtx);
824 char *aarch64_output_sve_cnt_pat_immediate (const char *, const char *, rtx *);
825 char *aarch64_output_sve_scalar_inc_dec (rtx);
826 char *aarch64_output_sve_addvl_addpl (rtx);
827 char *aarch64_output_sve_vector_inc_dec (const char *, rtx);
828 char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
829 char *aarch64_output_simd_mov_immediate (rtx, unsigned,
830 			enum simd_immediate_check w = AARCH64_CHECK_MOV);
831 char *aarch64_output_sve_mov_immediate (rtx);
832 char *aarch64_output_sve_ptrues (rtx);
833 bool aarch64_pad_reg_upward (machine_mode, const_tree, bool);
834 bool aarch64_regno_ok_for_base_p (int, bool);
835 bool aarch64_regno_ok_for_index_p (int, bool);
836 bool aarch64_reinterpret_float_as_int (rtx value, unsigned HOST_WIDE_INT *fail);
837 bool aarch64_simd_check_vect_par_cnst_half (rtx op, machine_mode mode,
838 					    bool high);
839 bool aarch64_simd_scalar_immediate_valid_for_move (rtx, scalar_int_mode);
840 bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool);
841 bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
842 bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
843 			enum simd_immediate_check w = AARCH64_CHECK_MOV);
844 rtx aarch64_check_zero_based_sve_index_immediate (rtx);
845 bool aarch64_sve_index_immediate_p (rtx);
846 bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
847 bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
848 bool aarch64_sve_bitmask_immediate_p (rtx);
849 bool aarch64_sve_dup_immediate_p (rtx);
850 bool aarch64_sve_cmp_immediate_p (rtx, bool);
851 bool aarch64_sve_float_arith_immediate_p (rtx, bool);
852 bool aarch64_sve_float_mul_immediate_p (rtx);
853 bool aarch64_split_dimode_const_store (rtx, rtx);
854 bool aarch64_symbolic_address_p (rtx);
855 bool aarch64_uimm12_shift (HOST_WIDE_INT);
856 int aarch64_movk_shift (const wide_int_ref &, const wide_int_ref &);
857 bool aarch64_use_return_insn_p (void);
858 const char *aarch64_output_casesi (rtx *);
859 
860 unsigned int aarch64_tlsdesc_abi_id ();
861 enum aarch64_symbol_type aarch64_classify_symbol (rtx, HOST_WIDE_INT);
862 enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
863 enum reg_class aarch64_regno_regclass (unsigned);
864 int aarch64_asm_preferred_eh_data_format (int, int);
865 int aarch64_fpconst_pow_of_2 (rtx);
866 int aarch64_fpconst_pow2_recip (rtx);
867 machine_mode aarch64_hard_regno_caller_save_mode (unsigned, unsigned,
868 						       machine_mode);
869 int aarch64_uxt_size (int, HOST_WIDE_INT);
870 int aarch64_vec_fpconst_pow_of_2 (rtx);
871 rtx aarch64_eh_return_handler_rtx (void);
872 rtx aarch64_mask_from_zextract_ops (rtx, rtx);
873 const char *aarch64_output_move_struct (rtx *operands);
874 rtx aarch64_return_addr_rtx (void);
875 rtx aarch64_return_addr (int, rtx);
876 rtx aarch64_simd_gen_const_vector_dup (machine_mode, HOST_WIDE_INT);
877 rtx aarch64_gen_shareable_zero (machine_mode);
878 bool aarch64_simd_mem_operand_p (rtx);
879 bool aarch64_sve_ld1r_operand_p (rtx);
880 bool aarch64_sve_ld1rq_operand_p (rtx);
881 bool aarch64_sve_ld1ro_operand_p (rtx, scalar_mode);
882 bool aarch64_sve_ldff1_operand_p (rtx);
883 bool aarch64_sve_ldnf1_operand_p (rtx);
884 bool aarch64_sve_ldr_operand_p (rtx);
885 bool aarch64_sve_prefetch_operand_p (rtx, machine_mode);
886 bool aarch64_sve_struct_memory_operand_p (rtx);
887 rtx aarch64_simd_vect_par_cnst_half (machine_mode, int, bool);
888 rtx aarch64_gen_stepped_int_parallel (unsigned int, int, int);
889 bool aarch64_stepped_int_parallel_p (rtx, int);
890 rtx aarch64_tls_get_addr (void);
891 unsigned aarch64_dbx_register_number (unsigned);
892 unsigned aarch64_trampoline_size (void);
893 void aarch64_asm_output_labelref (FILE *, const char *);
894 void aarch64_cpu_cpp_builtins (cpp_reader *);
895 const char * aarch64_gen_far_branch (rtx *, int, const char *, const char *);
896 const char * aarch64_output_probe_stack_range (rtx, rtx);
897 const char * aarch64_output_probe_sve_stack_clash (rtx, rtx, rtx, rtx);
898 void aarch64_err_no_fpadvsimd (machine_mode);
899 void aarch64_expand_epilogue (bool);
900 rtx aarch64_ptrue_all (unsigned int);
901 opt_machine_mode aarch64_ptrue_all_mode (rtx);
902 rtx aarch64_convert_sve_data_to_pred (rtx, machine_mode, rtx);
903 rtx aarch64_expand_sve_dupq (rtx, machine_mode, rtx);
904 void aarch64_expand_mov_immediate (rtx, rtx);
905 rtx aarch64_stack_protect_canary_mem (machine_mode, rtx, aarch64_salt_type);
906 rtx aarch64_ptrue_reg (machine_mode);
907 rtx aarch64_pfalse_reg (machine_mode);
908 bool aarch64_sve_same_pred_for_ptest_p (rtx *, rtx *);
909 void aarch64_emit_sve_pred_move (rtx, rtx, rtx);
910 void aarch64_expand_sve_mem_move (rtx, rtx, machine_mode);
911 bool aarch64_maybe_expand_sve_subreg_move (rtx, rtx);
912 rtx aarch64_replace_reg_mode (rtx, machine_mode);
913 void aarch64_split_sve_subreg_move (rtx, rtx, rtx);
914 void aarch64_expand_prologue (void);
915 void aarch64_expand_vector_init (rtx, rtx);
916 void aarch64_sve_expand_vector_init (rtx, rtx);
917 void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
918 				   const_tree, unsigned, bool = false);
919 void aarch64_init_expanders (void);
920 void aarch64_init_simd_builtins (void);
921 void aarch64_emit_call_insn (rtx);
922 void aarch64_register_pragmas (void);
923 void aarch64_relayout_simd_types (void);
924 void aarch64_reset_previous_fndecl (void);
925 bool aarch64_return_address_signing_enabled (void);
926 bool aarch64_bti_enabled (void);
927 void aarch64_save_restore_target_globals (tree);
928 void aarch64_addti_scratch_regs (rtx, rtx, rtx *,
929 				 rtx *, rtx *,
930 				 rtx *, rtx *,
931 				 rtx *);
932 void aarch64_subvti_scratch_regs (rtx, rtx, rtx *,
933 				  rtx *, rtx *,
934 				  rtx *, rtx *, rtx *);
935 void aarch64_expand_subvti (rtx, rtx, rtx,
936 			    rtx, rtx, rtx, rtx, bool);
937 
938 
939 /* Initialize builtins for SIMD intrinsics.  */
940 void init_aarch64_simd_builtins (void);
941 
942 void aarch64_simd_emit_reg_reg_move (rtx *, machine_mode, unsigned int);
943 
944 /* Expand builtins for SIMD intrinsics.  */
945 rtx aarch64_simd_expand_builtin (int, tree, rtx);
946 
947 void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree);
948 rtx aarch64_endian_lane_rtx (machine_mode, unsigned int);
949 
950 void aarch64_split_128bit_move (rtx, rtx);
951 
952 bool aarch64_split_128bit_move_p (rtx, rtx);
953 
954 bool aarch64_mov128_immediate (rtx);
955 
956 void aarch64_split_simd_move (rtx, rtx);
957 
958 /* Check for a legitimate floating point constant for FMOV.  */
959 bool aarch64_float_const_representable_p (rtx);
960 
961 extern int aarch64_epilogue_uses (int);
962 
963 #if defined (RTX_CODE)
964 void aarch64_gen_unlikely_cbranch (enum rtx_code, machine_mode cc_mode,
965 				   rtx label_ref);
966 bool aarch64_legitimate_address_p (machine_mode, rtx, bool,
967 				   aarch64_addr_query_type = ADDR_QUERY_M);
968 machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
969 rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
970 bool aarch64_maxmin_plus_const (rtx_code, rtx *, bool);
971 rtx aarch64_load_tp (rtx);
972 
973 void aarch64_expand_compare_and_swap (rtx op[]);
974 void aarch64_split_compare_and_swap (rtx op[]);
975 
976 void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
977 
978 bool aarch64_gen_adjusted_ldpstp (rtx *, bool, machine_mode, RTX_CODE);
979 
980 void aarch64_expand_sve_vec_cmp_int (rtx, rtx_code, rtx, rtx);
981 bool aarch64_expand_sve_vec_cmp_float (rtx, rtx_code, rtx, rtx, bool);
982 void aarch64_expand_sve_vcond (machine_mode, machine_mode, rtx *);
983 
984 bool aarch64_prepare_sve_int_fma (rtx *, rtx_code);
985 bool aarch64_prepare_sve_cond_int_fma (rtx *, rtx_code);
986 #endif /* RTX_CODE */
987 
988 bool aarch64_process_target_attr (tree);
989 void aarch64_override_options_internal (struct gcc_options *);
990 
991 const char *aarch64_general_mangle_builtin_type (const_tree);
992 void aarch64_general_init_builtins (void);
993 tree aarch64_general_fold_builtin (unsigned int, tree, unsigned int, tree *);
994 gimple *aarch64_general_gimple_fold_builtin (unsigned int, gcall *,
995 					     gimple_stmt_iterator *);
996 rtx aarch64_general_expand_builtin (unsigned int, tree, rtx, int);
997 tree aarch64_general_builtin_decl (unsigned, bool);
998 tree aarch64_general_builtin_rsqrt (unsigned int);
999 tree aarch64_builtin_vectorized_function (unsigned int, tree, tree);
1000 void handle_arm_acle_h (void);
1001 void handle_arm_neon_h (void);
1002 
1003 namespace aarch64_sve {
1004   void init_builtins ();
1005   void handle_arm_sve_h ();
1006   tree builtin_decl (unsigned, bool);
1007   bool builtin_type_p (const_tree);
1008   bool builtin_type_p (const_tree, unsigned int *, unsigned int *);
1009   const char *mangle_builtin_type (const_tree);
1010   tree resolve_overloaded_builtin (location_t, unsigned int,
1011 				   vec<tree, va_gc> *);
1012   bool check_builtin_call (location_t, vec<location_t>, unsigned int,
1013 			   tree, unsigned int, tree *);
1014   gimple *gimple_fold_builtin (unsigned int, gimple_stmt_iterator *, gcall *);
1015   rtx expand_builtin (unsigned int, tree, rtx);
1016   tree handle_arm_sve_vector_bits_attribute (tree *, tree, tree, int, bool *);
1017 #ifdef GCC_TARGET_H
1018   bool verify_type_context (location_t, type_context_kind, const_tree, bool);
1019 #endif
1020 }
1021 
1022 extern void aarch64_split_combinev16qi (rtx operands[3]);
1023 extern void aarch64_expand_vec_perm (rtx, rtx, rtx, rtx, unsigned int);
1024 extern void aarch64_expand_sve_vec_perm (rtx, rtx, rtx, rtx);
1025 extern bool aarch64_madd_needs_nop (rtx_insn *);
1026 extern void aarch64_final_prescan_insn (rtx_insn *);
1027 void aarch64_atomic_assign_expand_fenv (tree *, tree *, tree *);
1028 int aarch64_ccmp_mode_to_code (machine_mode mode);
1029 
1030 bool extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset);
1031 bool aarch64_mergeable_load_pair_p (machine_mode, rtx, rtx);
1032 bool aarch64_operands_ok_for_ldpstp (rtx *, bool, machine_mode);
1033 bool aarch64_operands_adjust_ok_for_ldpstp (rtx *, bool, machine_mode);
1034 void aarch64_swap_ldrstr_operands (rtx *, bool);
1035 
1036 extern void aarch64_asm_output_pool_epilogue (FILE *, const char *,
1037 					      tree, HOST_WIDE_INT);
1038 
1039 
1040 extern bool aarch64_classify_address (struct aarch64_address_info *, rtx,
1041 				      machine_mode, bool,
1042 				      aarch64_addr_query_type = ADDR_QUERY_M);
1043 
1044 /* Defined in common/config/aarch64-common.cc.  */
1045 bool aarch64_handle_option (struct gcc_options *, struct gcc_options *,
1046 			     const struct cl_decoded_option *, location_t);
1047 const char *aarch64_rewrite_selected_cpu (const char *name);
1048 enum aarch64_parse_opt_result aarch64_parse_extension (const char *,
1049 						       uint64_t *,
1050 						       std::string *);
1051 void aarch64_get_all_extension_candidates (auto_vec<const char *> *candidates);
1052 std::string aarch64_get_extension_string_for_isa_flags (uint64_t, uint64_t);
1053 
1054 rtl_opt_pass *make_pass_fma_steering (gcc::context *);
1055 rtl_opt_pass *make_pass_track_speculation (gcc::context *);
1056 rtl_opt_pass *make_pass_tag_collision_avoidance (gcc::context *);
1057 rtl_opt_pass *make_pass_insert_bti (gcc::context *ctxt);
1058 rtl_opt_pass *make_pass_cc_fusion (gcc::context *ctxt);
1059 
1060 poly_uint64 aarch64_regmode_natural_size (machine_mode);
1061 
1062 bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT);
1063 
1064 struct atomic_ool_names
1065 {
1066     const char *str[5][5];
1067 };
1068 
1069 rtx aarch64_atomic_ool_func(machine_mode mode, rtx model_rtx,
1070 			    const atomic_ool_names *names);
1071 extern const atomic_ool_names aarch64_ool_swp_names;
1072 extern const atomic_ool_names aarch64_ool_ldadd_names;
1073 extern const atomic_ool_names aarch64_ool_ldset_names;
1074 extern const atomic_ool_names aarch64_ool_ldclr_names;
1075 extern const atomic_ool_names aarch64_ool_ldeor_names;
1076 
1077 tree aarch64_resolve_overloaded_builtin_general (location_t, tree, void *);
1078 
1079 const char *aarch64_sls_barrier (int);
1080 const char *aarch64_indirect_call_asm (rtx);
1081 extern bool aarch64_harden_sls_retbr_p (void);
1082 extern bool aarch64_harden_sls_blr_p (void);
1083 
1084 extern void aarch64_output_patchable_area (unsigned int, bool);
1085 
1086 #endif /* GCC_AARCH64_PROTOS_H */
1087