xref: /netbsd-src/external/gpl3/gcc/dist/gcc/ira-int.h (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1 /* Integrated Register Allocator (IRA) intercommunication header file.
2    Copyright (C) 2006-2022 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #ifndef GCC_IRA_INT_H
22 #define GCC_IRA_INT_H
23 
24 #include "recog.h"
25 #include "function-abi.h"
26 
27 /* To provide consistency in naming, all IRA external variables,
28    functions, common typedefs start with prefix ira_.  */
29 
30 #if CHECKING_P
31 #define ENABLE_IRA_CHECKING
32 #endif
33 
34 #ifdef ENABLE_IRA_CHECKING
35 #define ira_assert(c) gcc_assert (c)
36 #else
37 /* Always define and include C, so that warnings for empty body in an
38   'if' statement and unused variable do not occur.  */
39 #define ira_assert(c) ((void)(0 && (c)))
40 #endif
41 
42 /* Compute register frequency from edge frequency FREQ.  It is
43    analogous to REG_FREQ_FROM_BB.  When optimizing for size, or
44    profile driven feedback is available and the function is never
45    executed, frequency is always equivalent.  Otherwise rescale the
46    edge frequency.  */
47 #define REG_FREQ_FROM_EDGE_FREQ(freq)				   \
48   (optimize_function_for_size_p (cfun)				   \
49    ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX)		   \
50    ? (freq * REG_FREQ_MAX / BB_FREQ_MAX) : 1)
51 
52 /* A modified value of flag `-fira-verbose' used internally.  */
53 extern int internal_flag_ira_verbose;
54 
55 /* Dump file of the allocator if it is not NULL.  */
56 extern FILE *ira_dump_file;
57 
58 /* Typedefs for pointers to allocno live range, allocno, and copy of
59    allocnos.  */
60 typedef struct live_range *live_range_t;
61 typedef struct ira_allocno *ira_allocno_t;
62 typedef struct ira_allocno_pref *ira_pref_t;
63 typedef struct ira_allocno_copy *ira_copy_t;
64 typedef struct ira_object *ira_object_t;
65 
66 /* Definition of vector of allocnos and copies.  */
67 
68 /* Typedef for pointer to the subsequent structure.  */
69 typedef struct ira_loop_tree_node *ira_loop_tree_node_t;
70 
71 typedef unsigned short move_table[N_REG_CLASSES];
72 
73 /* In general case, IRA is a regional allocator.  The regions are
74    nested and form a tree.  Currently regions are natural loops.  The
75    following structure describes loop tree node (representing basic
76    block or loop).  We need such tree because the loop tree from
77    cfgloop.h is not convenient for the optimization: basic blocks are
78    not a part of the tree from cfgloop.h.  We also use the nodes for
79    storing additional information about basic blocks/loops for the
80    register allocation purposes.  */
81 struct ira_loop_tree_node
82 {
83   /* The node represents basic block if children == NULL.  */
84   basic_block bb;    /* NULL for loop.  */
85   /* NULL for BB or for loop tree root if we did not build CFG loop tree.  */
86   class loop *loop;
87   /* NEXT/SUBLOOP_NEXT is the next node/loop-node of the same parent.
88      SUBLOOP_NEXT is always NULL for BBs.  */
89   ira_loop_tree_node_t subloop_next, next;
90   /* CHILDREN/SUBLOOPS is the first node/loop-node immediately inside
91      the node.  They are NULL for BBs.  */
92   ira_loop_tree_node_t subloops, children;
93   /* The node immediately containing given node.  */
94   ira_loop_tree_node_t parent;
95 
96   /* Loop level in range [0, ira_loop_tree_height).  */
97   int level;
98 
99   /* All the following members are defined only for nodes representing
100      loops.  */
101 
102   /* The loop number from CFG loop tree.  The root number is 0.  */
103   int loop_num;
104 
105   /* True if the loop was marked for removal from the register
106      allocation.  */
107   bool to_remove_p;
108 
109   /* Allocnos in the loop corresponding to their regnos.  If it is
110      NULL the loop does not form a separate register allocation region
111      (e.g. because it has abnormal enter/exit edges and we cannot put
112      code for register shuffling on the edges if a different
113      allocation is used for a pseudo-register on different sides of
114      the edges).  Caps are not in the map (remember we can have more
115      one cap with the same regno in a region).  */
116   ira_allocno_t *regno_allocno_map;
117 
118   /* True if there is an entry to given loop not from its parent (or
119      grandparent) basic block.  For example, it is possible for two
120      adjacent loops inside another loop.  */
121   bool entered_from_non_parent_p;
122 
123   /* Maximal register pressure inside loop for given register class
124      (defined only for the pressure classes).  */
125   int reg_pressure[N_REG_CLASSES];
126 
127   /* Numbers of allocnos referred or living in the loop node (except
128      for its subloops).  */
129   bitmap all_allocnos;
130 
131   /* Numbers of allocnos living at the loop borders.  */
132   bitmap border_allocnos;
133 
134   /* Regnos of pseudos modified in the loop node (including its
135      subloops).  */
136   bitmap modified_regnos;
137 
138   /* Numbers of copies referred in the corresponding loop.  */
139   bitmap local_copies;
140 };
141 
142 /* The root of the loop tree corresponding to the all function.  */
143 extern ira_loop_tree_node_t ira_loop_tree_root;
144 
145 /* Height of the loop tree.  */
146 extern int ira_loop_tree_height;
147 
148 /* All nodes representing basic blocks are referred through the
149    following array.  We cannot use basic block member `aux' for this
150    because it is used for insertion of insns on edges.  */
151 extern ira_loop_tree_node_t ira_bb_nodes;
152 
153 /* Two access macros to the nodes representing basic blocks.  */
154 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
155 #define IRA_BB_NODE_BY_INDEX(index) __extension__			\
156 (({ ira_loop_tree_node_t _node = (&ira_bb_nodes[index]);		\
157      if (_node->children != NULL || _node->loop != NULL || _node->bb == NULL)\
158        {								\
159          fprintf (stderr,						\
160                   "\n%s: %d: error in %s: it is not a block node\n",	\
161                   __FILE__, __LINE__, __FUNCTION__);			\
162          gcc_unreachable ();						\
163        }								\
164      _node; }))
165 #else
166 #define IRA_BB_NODE_BY_INDEX(index) (&ira_bb_nodes[index])
167 #endif
168 
169 #define IRA_BB_NODE(bb) IRA_BB_NODE_BY_INDEX ((bb)->index)
170 
171 /* All nodes representing loops are referred through the following
172    array.  */
173 extern ira_loop_tree_node_t ira_loop_nodes;
174 
175 /* Two access macros to the nodes representing loops.  */
176 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
177 #define IRA_LOOP_NODE_BY_INDEX(index) __extension__			\
178 (({ ira_loop_tree_node_t const _node = (&ira_loop_nodes[index]);	\
179      if (_node->children == NULL || _node->bb != NULL			\
180          || (_node->loop == NULL && current_loops != NULL))		\
181        {								\
182          fprintf (stderr,						\
183                   "\n%s: %d: error in %s: it is not a loop node\n",	\
184                   __FILE__, __LINE__, __FUNCTION__);			\
185          gcc_unreachable ();						\
186        }								\
187      _node; }))
188 #else
189 #define IRA_LOOP_NODE_BY_INDEX(index) (&ira_loop_nodes[index])
190 #endif
191 
192 #define IRA_LOOP_NODE(loop) IRA_LOOP_NODE_BY_INDEX ((loop)->num)
193 
194 
195 /* The structure describes program points where a given allocno lives.
196    If the live ranges of two allocnos are intersected, the allocnos
197    are in conflict.  */
198 struct live_range
199 {
200   /* Object whose live range is described by given structure.  */
201   ira_object_t object;
202   /* Program point range.  */
203   int start, finish;
204   /* Next structure describing program points where the allocno
205      lives.  */
206   live_range_t next;
207   /* Pointer to structures with the same start/finish.  */
208   live_range_t start_next, finish_next;
209 };
210 
211 /* Program points are enumerated by numbers from range
212    0..IRA_MAX_POINT-1.  There are approximately two times more program
213    points than insns.  Program points are places in the program where
214    liveness info can be changed.  In most general case (there are more
215    complicated cases too) some program points correspond to places
216    where input operand dies and other ones correspond to places where
217    output operands are born.  */
218 extern int ira_max_point;
219 
220 /* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
221    live ranges with given start/finish point.  */
222 extern live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
223 
224 /* A structure representing conflict information for an allocno
225    (or one of its subwords).  */
226 struct ira_object
227 {
228   /* The allocno associated with this record.  */
229   ira_allocno_t allocno;
230   /* Vector of accumulated conflicting conflict_redords with NULL end
231      marker (if OBJECT_CONFLICT_VEC_P is true) or conflict bit vector
232      otherwise.  */
233   void *conflicts_array;
234   /* Pointer to structures describing at what program point the
235      object lives.  We always maintain the list in such way that *the
236      ranges in the list are not intersected and ordered by decreasing
237      their program points*.  */
238   live_range_t live_ranges;
239   /* The subword within ALLOCNO which is represented by this object.
240      Zero means the lowest-order subword (or the entire allocno in case
241      it is not being tracked in subwords).  */
242   int subword;
243   /* Allocated size of the conflicts array.  */
244   unsigned int conflicts_array_size;
245   /* A unique number for every instance of this structure, which is used
246      to represent it in conflict bit vectors.  */
247   int id;
248   /* Before building conflicts, MIN and MAX are initialized to
249      correspondingly minimal and maximal points of the accumulated
250      live ranges.  Afterwards, they hold the minimal and maximal ids
251      of other ira_objects that this one can conflict with.  */
252   int min, max;
253   /* Initial and accumulated hard registers conflicting with this
254      object and as a consequences cannot be assigned to the allocno.
255      All non-allocatable hard regs and hard regs of register classes
256      different from given allocno one are included in the sets.  */
257   HARD_REG_SET conflict_hard_regs, total_conflict_hard_regs;
258   /* Number of accumulated conflicts in the vector of conflicting
259      objects.  */
260   int num_accumulated_conflicts;
261   /* TRUE if conflicts are represented by a vector of pointers to
262      ira_object structures.  Otherwise, we use a bit vector indexed
263      by conflict ID numbers.  */
264   unsigned int conflict_vec_p : 1;
265 };
266 
267 /* A structure representing an allocno (allocation entity).  Allocno
268    represents a pseudo-register in an allocation region.  If
269    pseudo-register does not live in a region but it lives in the
270    nested regions, it is represented in the region by special allocno
271    called *cap*.  There may be more one cap representing the same
272    pseudo-register in region.  It means that the corresponding
273    pseudo-register lives in more one non-intersected subregion.  */
274 struct ira_allocno
275 {
276   /* The allocno order number starting with 0.  Each allocno has an
277      unique number and the number is never changed for the
278      allocno.  */
279   int num;
280   /* Regno for allocno or cap.  */
281   int regno;
282   /* Mode of the allocno which is the mode of the corresponding
283      pseudo-register.  */
284   ENUM_BITFIELD (machine_mode) mode : 8;
285   /* Widest mode of the allocno which in at least one case could be
286      for paradoxical subregs where wmode > mode.  */
287   ENUM_BITFIELD (machine_mode) wmode : 8;
288   /* Register class which should be used for allocation for given
289      allocno.  NO_REGS means that we should use memory.  */
290   ENUM_BITFIELD (reg_class) aclass : 16;
291   /* A bitmask of the ABIs used by calls that occur while the allocno
292      is live.  */
293   unsigned int crossed_calls_abis : NUM_ABI_IDS;
294   /* During the reload, value TRUE means that we should not reassign a
295      hard register to the allocno got memory earlier.  It is set up
296      when we removed memory-memory move insn before each iteration of
297      the reload.  */
298   unsigned int dont_reassign_p : 1;
299 #ifdef STACK_REGS
300   /* Set to TRUE if allocno can't be assigned to the stack hard
301      register correspondingly in this region and area including the
302      region and all its subregions recursively.  */
303   unsigned int no_stack_reg_p : 1, total_no_stack_reg_p : 1;
304 #endif
305   /* TRUE value means that there is no sense to spill the allocno
306      during coloring because the spill will result in additional
307      reloads in reload pass.  */
308   unsigned int bad_spill_p : 1;
309   /* TRUE if a hard register or memory has been assigned to the
310      allocno.  */
311   unsigned int assigned_p : 1;
312   /* TRUE if conflicts for given allocno are represented by vector of
313      pointers to the conflicting allocnos.  Otherwise, we use a bit
314      vector where a bit with given index represents allocno with the
315      same number.  */
316   unsigned int conflict_vec_p : 1;
317   /* True if the parent loop has an allocno for the same register and
318      if the parent allocno's assignment might not be valid in this loop.
319      This means that we cannot merge this allocno and the parent allocno
320      together.
321 
322      This is only ever true for non-cap allocnos.  */
323   unsigned int might_conflict_with_parent_p : 1;
324   /* Hard register assigned to given allocno.  Negative value means
325      that memory was allocated to the allocno.  During the reload,
326      spilled allocno has value equal to the corresponding stack slot
327      number (0, ...) - 2.  Value -1 is used for allocnos spilled by the
328      reload (at this point pseudo-register has only one allocno) which
329      did not get stack slot yet.  */
330   signed int hard_regno : 16;
331   /* Allocnos with the same regno are linked by the following member.
332      Allocnos corresponding to inner loops are first in the list (it
333      corresponds to depth-first traverse of the loops).  */
334   ira_allocno_t next_regno_allocno;
335   /* There may be different allocnos with the same regno in different
336      regions.  Allocnos are bound to the corresponding loop tree node.
337      Pseudo-register may have only one regular allocno with given loop
338      tree node but more than one cap (see comments above).  */
339   ira_loop_tree_node_t loop_tree_node;
340   /* Accumulated usage references of the allocno.  Here and below,
341      word 'accumulated' means info for given region and all nested
342      subregions.  In this case, 'accumulated' means sum of references
343      of the corresponding pseudo-register in this region and in all
344      nested subregions recursively. */
345   int nrefs;
346   /* Accumulated frequency of usage of the allocno.  */
347   int freq;
348   /* Minimal accumulated and updated costs of usage register of the
349      allocno class.  */
350   int class_cost, updated_class_cost;
351   /* Minimal accumulated, and updated costs of memory for the allocno.
352      At the allocation start, the original and updated costs are
353      equal.  The updated cost may be changed after finishing
354      allocation in a region and starting allocation in a subregion.
355      The change reflects the cost of spill/restore code on the
356      subregion border if we assign memory to the pseudo in the
357      subregion.  */
358   int memory_cost, updated_memory_cost;
359   /* Accumulated number of points where the allocno lives and there is
360      excess pressure for its class.  Excess pressure for a register
361      class at some point means that there are more allocnos of given
362      register class living at the point than number of hard-registers
363      of the class available for the allocation.  */
364   int excess_pressure_points_num;
365   /* Allocno hard reg preferences.  */
366   ira_pref_t allocno_prefs;
367   /* Copies to other non-conflicting allocnos.  The copies can
368      represent move insn or potential move insn usually because of two
369      operand insn constraints.  */
370   ira_copy_t allocno_copies;
371   /* It is a allocno (cap) representing given allocno on upper loop tree
372      level.  */
373   ira_allocno_t cap;
374   /* It is a link to allocno (cap) on lower loop level represented by
375      given cap.  Null if given allocno is not a cap.  */
376   ira_allocno_t cap_member;
377   /* The number of objects tracked in the following array.  */
378   int num_objects;
379   /* An array of structures describing conflict information and live
380      ranges for each object associated with the allocno.  There may be
381      more than one such object in cases where the allocno represents a
382      multi-word register.  */
383   ira_object_t objects[2];
384   /* Accumulated frequency of calls which given allocno
385      intersects.  */
386   int call_freq;
387   /* Accumulated number of the intersected calls.  */
388   int calls_crossed_num;
389   /* The number of calls across which it is live, but which should not
390      affect register preferences.  */
391   int cheap_calls_crossed_num;
392   /* Registers clobbered by intersected calls.  */
393    HARD_REG_SET crossed_calls_clobbered_regs;
394   /* Array of usage costs (accumulated and the one updated during
395      coloring) for each hard register of the allocno class.  The
396      member value can be NULL if all costs are the same and equal to
397      CLASS_COST.  For example, the costs of two different hard
398      registers can be different if one hard register is callee-saved
399      and another one is callee-used and the allocno lives through
400      calls.  Another example can be case when for some insn the
401      corresponding pseudo-register value should be put in specific
402      register class (e.g. AREG for x86) which is a strict subset of
403      the allocno class (GENERAL_REGS for x86).  We have updated costs
404      to reflect the situation when the usage cost of a hard register
405      is decreased because the allocno is connected to another allocno
406      by a copy and the another allocno has been assigned to the hard
407      register.  */
408   int *hard_reg_costs, *updated_hard_reg_costs;
409   /* Array of decreasing costs (accumulated and the one updated during
410      coloring) for allocnos conflicting with given allocno for hard
411      regno of the allocno class.  The member value can be NULL if all
412      costs are the same.  These costs are used to reflect preferences
413      of other allocnos not assigned yet during assigning to given
414      allocno.  */
415   int *conflict_hard_reg_costs, *updated_conflict_hard_reg_costs;
416   /* Different additional data.  It is used to decrease size of
417      allocno data footprint.  */
418   void *add_data;
419 };
420 
421 
422 /* All members of the allocno structures should be accessed only
423    through the following macros.  */
424 #define ALLOCNO_NUM(A) ((A)->num)
425 #define ALLOCNO_REGNO(A) ((A)->regno)
426 #define ALLOCNO_REG(A) ((A)->reg)
427 #define ALLOCNO_NEXT_REGNO_ALLOCNO(A) ((A)->next_regno_allocno)
428 #define ALLOCNO_LOOP_TREE_NODE(A) ((A)->loop_tree_node)
429 #define ALLOCNO_CAP(A) ((A)->cap)
430 #define ALLOCNO_CAP_MEMBER(A) ((A)->cap_member)
431 #define ALLOCNO_NREFS(A) ((A)->nrefs)
432 #define ALLOCNO_FREQ(A) ((A)->freq)
433 #define ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P(A) \
434   ((A)->might_conflict_with_parent_p)
435 #define ALLOCNO_HARD_REGNO(A) ((A)->hard_regno)
436 #define ALLOCNO_CALL_FREQ(A) ((A)->call_freq)
437 #define ALLOCNO_CALLS_CROSSED_NUM(A) ((A)->calls_crossed_num)
438 #define ALLOCNO_CHEAP_CALLS_CROSSED_NUM(A) ((A)->cheap_calls_crossed_num)
439 #define ALLOCNO_CROSSED_CALLS_ABIS(A) ((A)->crossed_calls_abis)
440 #define ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS(A) \
441   ((A)->crossed_calls_clobbered_regs)
442 #define ALLOCNO_MEM_OPTIMIZED_DEST(A) ((A)->mem_optimized_dest)
443 #define ALLOCNO_MEM_OPTIMIZED_DEST_P(A) ((A)->mem_optimized_dest_p)
444 #define ALLOCNO_SOMEWHERE_RENAMED_P(A) ((A)->somewhere_renamed_p)
445 #define ALLOCNO_CHILD_RENAMED_P(A) ((A)->child_renamed_p)
446 #define ALLOCNO_DONT_REASSIGN_P(A) ((A)->dont_reassign_p)
447 #ifdef STACK_REGS
448 #define ALLOCNO_NO_STACK_REG_P(A) ((A)->no_stack_reg_p)
449 #define ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
450 #endif
451 #define ALLOCNO_BAD_SPILL_P(A) ((A)->bad_spill_p)
452 #define ALLOCNO_ASSIGNED_P(A) ((A)->assigned_p)
453 #define ALLOCNO_MODE(A) ((A)->mode)
454 #define ALLOCNO_WMODE(A) ((A)->wmode)
455 #define ALLOCNO_PREFS(A) ((A)->allocno_prefs)
456 #define ALLOCNO_COPIES(A) ((A)->allocno_copies)
457 #define ALLOCNO_HARD_REG_COSTS(A) ((A)->hard_reg_costs)
458 #define ALLOCNO_UPDATED_HARD_REG_COSTS(A) ((A)->updated_hard_reg_costs)
459 #define ALLOCNO_CONFLICT_HARD_REG_COSTS(A) \
460   ((A)->conflict_hard_reg_costs)
461 #define ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS(A) \
462   ((A)->updated_conflict_hard_reg_costs)
463 #define ALLOCNO_CLASS(A) ((A)->aclass)
464 #define ALLOCNO_CLASS_COST(A) ((A)->class_cost)
465 #define ALLOCNO_UPDATED_CLASS_COST(A) ((A)->updated_class_cost)
466 #define ALLOCNO_MEMORY_COST(A) ((A)->memory_cost)
467 #define ALLOCNO_UPDATED_MEMORY_COST(A) ((A)->updated_memory_cost)
468 #define ALLOCNO_EXCESS_PRESSURE_POINTS_NUM(A) \
469   ((A)->excess_pressure_points_num)
470 #define ALLOCNO_OBJECT(A,N) ((A)->objects[N])
471 #define ALLOCNO_NUM_OBJECTS(A) ((A)->num_objects)
472 #define ALLOCNO_ADD_DATA(A) ((A)->add_data)
473 
474 /* Typedef for pointer to the subsequent structure.  */
475 typedef struct ira_emit_data *ira_emit_data_t;
476 
477 /* Allocno bound data used for emit pseudo live range split insns and
478    to flattening IR.  */
479 struct ira_emit_data
480 {
481   /* TRUE if the allocno assigned to memory was a destination of
482      removed move (see ira-emit.cc) at loop exit because the value of
483      the corresponding pseudo-register is not changed inside the
484      loop.  */
485   unsigned int mem_optimized_dest_p : 1;
486   /* TRUE if the corresponding pseudo-register has disjoint live
487      ranges and the other allocnos of the pseudo-register except this
488      one changed REG.  */
489   unsigned int somewhere_renamed_p : 1;
490   /* TRUE if allocno with the same REGNO in a subregion has been
491      renamed, in other words, got a new pseudo-register.  */
492   unsigned int child_renamed_p : 1;
493   /* Final rtx representation of the allocno.  */
494   rtx reg;
495   /* Non NULL if we remove restoring value from given allocno to
496      MEM_OPTIMIZED_DEST at loop exit (see ira-emit.cc) because the
497      allocno value is not changed inside the loop.  */
498   ira_allocno_t mem_optimized_dest;
499 };
500 
501 #define ALLOCNO_EMIT_DATA(a) ((ira_emit_data_t) ALLOCNO_ADD_DATA (a))
502 
503 /* Data used to emit live range split insns and to flattening IR.  */
504 extern ira_emit_data_t ira_allocno_emit_data;
505 
506 /* Abbreviation for frequent emit data access.  */
507 static inline rtx
allocno_emit_reg(ira_allocno_t a)508 allocno_emit_reg (ira_allocno_t a)
509 {
510   return ALLOCNO_EMIT_DATA (a)->reg;
511 }
512 
513 #define OBJECT_ALLOCNO(O) ((O)->allocno)
514 #define OBJECT_SUBWORD(O) ((O)->subword)
515 #define OBJECT_CONFLICT_ARRAY(O) ((O)->conflicts_array)
516 #define OBJECT_CONFLICT_VEC(O) ((ira_object_t *)(O)->conflicts_array)
517 #define OBJECT_CONFLICT_BITVEC(O) ((IRA_INT_TYPE *)(O)->conflicts_array)
518 #define OBJECT_CONFLICT_ARRAY_SIZE(O) ((O)->conflicts_array_size)
519 #define OBJECT_CONFLICT_VEC_P(O) ((O)->conflict_vec_p)
520 #define OBJECT_NUM_CONFLICTS(O) ((O)->num_accumulated_conflicts)
521 #define OBJECT_CONFLICT_HARD_REGS(O) ((O)->conflict_hard_regs)
522 #define OBJECT_TOTAL_CONFLICT_HARD_REGS(O) ((O)->total_conflict_hard_regs)
523 #define OBJECT_MIN(O) ((O)->min)
524 #define OBJECT_MAX(O) ((O)->max)
525 #define OBJECT_CONFLICT_ID(O) ((O)->id)
526 #define OBJECT_LIVE_RANGES(O) ((O)->live_ranges)
527 
528 /* Map regno -> allocnos with given regno (see comments for
529    allocno member `next_regno_allocno').  */
530 extern ira_allocno_t *ira_regno_allocno_map;
531 
532 /* Array of references to all allocnos.  The order number of the
533    allocno corresponds to the index in the array.  Removed allocnos
534    have NULL element value.  */
535 extern ira_allocno_t *ira_allocnos;
536 
537 /* The size of the previous array.  */
538 extern int ira_allocnos_num;
539 
540 /* Map a conflict id to its corresponding ira_object structure.  */
541 extern ira_object_t *ira_object_id_map;
542 
543 /* The size of the previous array.  */
544 extern int ira_objects_num;
545 
546 /* The following structure represents a hard register preference of
547    allocno.  The preference represent move insns or potential move
548    insns usually because of two operand insn constraints.  One move
549    operand is a hard register.  */
550 struct ira_allocno_pref
551 {
552   /* The unique order number of the preference node starting with 0.  */
553   int num;
554   /* Preferred hard register.  */
555   int hard_regno;
556   /* Accumulated execution frequency of insns from which the
557      preference created.  */
558   int freq;
559   /* Given allocno.  */
560   ira_allocno_t allocno;
561   /* All preferences with the same allocno are linked by the following
562      member.  */
563   ira_pref_t next_pref;
564 };
565 
566 /* Array of references to all allocno preferences.  The order number
567    of the preference corresponds to the index in the array.  */
568 extern ira_pref_t *ira_prefs;
569 
570 /* Size of the previous array.  */
571 extern int ira_prefs_num;
572 
573 /* The following structure represents a copy of two allocnos.  The
574    copies represent move insns or potential move insns usually because
575    of two operand insn constraints.  To remove register shuffle, we
576    also create copies between allocno which is output of an insn and
577    allocno becoming dead in the insn.  */
578 struct ira_allocno_copy
579 {
580   /* The unique order number of the copy node starting with 0.  */
581   int num;
582   /* Allocnos connected by the copy.  The first allocno should have
583      smaller order number than the second one.  */
584   ira_allocno_t first, second;
585   /* Execution frequency of the copy.  */
586   int freq;
587   bool constraint_p;
588   /* It is a move insn which is an origin of the copy.  The member
589      value for the copy representing two operand insn constraints or
590      for the copy created to remove register shuffle is NULL.  In last
591      case the copy frequency is smaller than the corresponding insn
592      execution frequency.  */
593   rtx_insn *insn;
594   /* All copies with the same allocno as FIRST are linked by the two
595      following members.  */
596   ira_copy_t prev_first_allocno_copy, next_first_allocno_copy;
597   /* All copies with the same allocno as SECOND are linked by the two
598      following members.  */
599   ira_copy_t prev_second_allocno_copy, next_second_allocno_copy;
600   /* Region from which given copy is originated.  */
601   ira_loop_tree_node_t loop_tree_node;
602 };
603 
604 /* Array of references to all copies.  The order number of the copy
605    corresponds to the index in the array.  Removed copies have NULL
606    element value.  */
607 extern ira_copy_t *ira_copies;
608 
609 /* Size of the previous array.  */
610 extern int ira_copies_num;
611 
612 /* The following structure describes a stack slot used for spilled
613    pseudo-registers.  */
614 class ira_spilled_reg_stack_slot
615 {
616 public:
617   /* pseudo-registers assigned to the stack slot.  */
618   bitmap_head spilled_regs;
619   /* RTL representation of the stack slot.  */
620   rtx mem;
621   /* Size of the stack slot.  */
622   poly_uint64_pod width;
623 };
624 
625 /* The number of elements in the following array.  */
626 extern int ira_spilled_reg_stack_slots_num;
627 
628 /* The following array contains info about spilled pseudo-registers
629    stack slots used in current function so far.  */
630 extern class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
631 
632 /* Correspondingly overall cost of the allocation, cost of the
633    allocnos assigned to hard-registers, cost of the allocnos assigned
634    to memory, cost of loads, stores and register move insns generated
635    for pseudo-register live range splitting (see ira-emit.cc).  */
636 extern int64_t ira_overall_cost;
637 extern int64_t ira_reg_cost, ira_mem_cost;
638 extern int64_t ira_load_cost, ira_store_cost, ira_shuffle_cost;
639 extern int ira_move_loops_num, ira_additional_jumps_num;
640 
641 
642 /* This page contains a bitset implementation called 'min/max sets' used to
643    record conflicts in IRA.
644    They are named min/maxs set since we keep track of a minimum and a maximum
645    bit number for each set representing the bounds of valid elements.  Otherwise,
646    the implementation resembles sbitmaps in that we store an array of integers
647    whose bits directly represent the members of the set.  */
648 
649 /* The type used as elements in the array, and the number of bits in
650    this type.  */
651 
652 #define IRA_INT_BITS HOST_BITS_PER_WIDE_INT
653 #define IRA_INT_TYPE HOST_WIDE_INT
654 
655 /* Set, clear or test bit number I in R, a bit vector of elements with
656    minimal index and maximal index equal correspondingly to MIN and
657    MAX.  */
658 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
659 
660 #define SET_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__	        \
661   (({ int _min = (MIN), _max = (MAX), _i = (I);				\
662      if (_i < _min || _i > _max)					\
663        {								\
664          fprintf (stderr,						\
665                   "\n%s: %d: error in %s: %d not in range [%d,%d]\n",   \
666                   __FILE__, __LINE__, __FUNCTION__, _i, _min, _max);	\
667          gcc_unreachable ();						\
668        }								\
669      ((R)[(unsigned) (_i - _min) / IRA_INT_BITS]			\
670       |= ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
671 
672 
673 #define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__	        \
674   (({ int _min = (MIN), _max = (MAX), _i = (I);				\
675      if (_i < _min || _i > _max)					\
676        {								\
677          fprintf (stderr,						\
678                   "\n%s: %d: error in %s: %d not in range [%d,%d]\n",   \
679                   __FILE__, __LINE__, __FUNCTION__, _i, _min, _max);	\
680          gcc_unreachable ();						\
681        }								\
682      ((R)[(unsigned) (_i - _min) / IRA_INT_BITS]			\
683       &= ~((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
684 
685 #define TEST_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__	        \
686   (({ int _min = (MIN), _max = (MAX), _i = (I);				\
687      if (_i < _min || _i > _max)					\
688        {								\
689          fprintf (stderr,						\
690                   "\n%s: %d: error in %s: %d not in range [%d,%d]\n",   \
691                   __FILE__, __LINE__, __FUNCTION__, _i, _min, _max);	\
692          gcc_unreachable ();						\
693        }								\
694      ((R)[(unsigned) (_i - _min) / IRA_INT_BITS]			\
695       & ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
696 
697 #else
698 
699 #define SET_MINMAX_SET_BIT(R, I, MIN, MAX)			\
700   ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS]			\
701    |= ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
702 
703 #define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX)			\
704   ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS]			\
705    &= ~((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
706 
707 #define TEST_MINMAX_SET_BIT(R, I, MIN, MAX)			\
708   ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS]			\
709    & ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
710 
711 #endif
712 
713 /* The iterator for min/max sets.  */
714 struct minmax_set_iterator {
715 
716   /* Array containing the bit vector.  */
717   IRA_INT_TYPE *vec;
718 
719   /* The number of the current element in the vector.  */
720   unsigned int word_num;
721 
722   /* The number of bits in the bit vector.  */
723   unsigned int nel;
724 
725   /* The current bit index of the bit vector.  */
726   unsigned int bit_num;
727 
728   /* Index corresponding to the 1st bit of the bit vector.   */
729   int start_val;
730 
731   /* The word of the bit vector currently visited.  */
732   unsigned IRA_INT_TYPE word;
733 };
734 
735 /* Initialize the iterator I for bit vector VEC containing minimal and
736    maximal values MIN and MAX.  */
737 static inline void
minmax_set_iter_init(minmax_set_iterator * i,IRA_INT_TYPE * vec,int min,int max)738 minmax_set_iter_init (minmax_set_iterator *i, IRA_INT_TYPE *vec, int min,
739 		      int max)
740 {
741   i->vec = vec;
742   i->word_num = 0;
743   i->nel = max < min ? 0 : max - min + 1;
744   i->start_val = min;
745   i->bit_num = 0;
746   i->word = i->nel == 0 ? 0 : vec[0];
747 }
748 
749 /* Return TRUE if we have more allocnos to visit, in which case *N is
750    set to the number of the element to be visited.  Otherwise, return
751    FALSE.  */
752 static inline bool
minmax_set_iter_cond(minmax_set_iterator * i,int * n)753 minmax_set_iter_cond (minmax_set_iterator *i, int *n)
754 {
755   /* Skip words that are zeros.  */
756   for (; i->word == 0; i->word = i->vec[i->word_num])
757     {
758       i->word_num++;
759       i->bit_num = i->word_num * IRA_INT_BITS;
760 
761       /* If we have reached the end, break.  */
762       if (i->bit_num >= i->nel)
763 	return false;
764     }
765 
766   /* Skip bits that are zero.  */
767   int off = ctz_hwi (i->word);
768   i->bit_num += off;
769   i->word >>= off;
770 
771   *n = (int) i->bit_num + i->start_val;
772 
773   return true;
774 }
775 
776 /* Advance to the next element in the set.  */
777 static inline void
minmax_set_iter_next(minmax_set_iterator * i)778 minmax_set_iter_next (minmax_set_iterator *i)
779 {
780   i->word >>= 1;
781   i->bit_num++;
782 }
783 
784 /* Loop over all elements of a min/max set given by bit vector VEC and
785    their minimal and maximal values MIN and MAX.  In each iteration, N
786    is set to the number of next allocno.  ITER is an instance of
787    minmax_set_iterator used to iterate over the set.  */
788 #define FOR_EACH_BIT_IN_MINMAX_SET(VEC, MIN, MAX, N, ITER)	\
789   for (minmax_set_iter_init (&(ITER), (VEC), (MIN), (MAX));	\
790        minmax_set_iter_cond (&(ITER), &(N));			\
791        minmax_set_iter_next (&(ITER)))
792 
793 class target_ira_int {
794 public:
795   ~target_ira_int ();
796 
797   void free_ira_costs ();
798   void free_register_move_costs ();
799 
800   /* Initialized once.  It is a maximal possible size of the allocated
801      struct costs.  */
802   size_t x_max_struct_costs_size;
803 
804   /* Allocated and initialized once, and used to initialize cost values
805      for each insn.  */
806   struct costs *x_init_cost;
807 
808   /* Allocated once, and used for temporary purposes.  */
809   struct costs *x_temp_costs;
810 
811   /* Allocated once, and used for the cost calculation.  */
812   struct costs *x_op_costs[MAX_RECOG_OPERANDS];
813   struct costs *x_this_op_costs[MAX_RECOG_OPERANDS];
814 
815   /* Hard registers that cannot be used for the register allocator for
816      all functions of the current compilation unit.  */
817   HARD_REG_SET x_no_unit_alloc_regs;
818 
819   /* Map: hard regs X modes -> set of hard registers for storing value
820      of given mode starting with given hard register.  */
821   HARD_REG_SET (x_ira_reg_mode_hard_regset
822 		[FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES]);
823 
824   /* Maximum cost of moving from a register in one class to a register
825      in another class.  Based on TARGET_REGISTER_MOVE_COST.  */
826   move_table *x_ira_register_move_cost[MAX_MACHINE_MODE];
827 
828   /* Similar, but here we don't have to move if the first index is a
829      subset of the second so in that case the cost is zero.  */
830   move_table *x_ira_may_move_in_cost[MAX_MACHINE_MODE];
831 
832   /* Similar, but here we don't have to move if the first index is a
833      superset of the second so in that case the cost is zero.  */
834   move_table *x_ira_may_move_out_cost[MAX_MACHINE_MODE];
835 
836   /* Keep track of the last mode we initialized move costs for.  */
837   int x_last_mode_for_init_move_cost;
838 
839   /* Array analog of the macro MEMORY_MOVE_COST but they contain maximal
840      cost not minimal.  */
841   short int x_ira_max_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
842 
843   /* Map class->true if class is a possible allocno class, false
844      otherwise. */
845   bool x_ira_reg_allocno_class_p[N_REG_CLASSES];
846 
847   /* Map class->true if class is a pressure class, false otherwise. */
848   bool x_ira_reg_pressure_class_p[N_REG_CLASSES];
849 
850   /* Array of the number of hard registers of given class which are
851      available for allocation.  The order is defined by the hard
852      register numbers.  */
853   short x_ira_non_ordered_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
854 
855   /* Index (in ira_class_hard_regs; for given register class and hard
856      register (in general case a hard register can belong to several
857      register classes;.  The index is negative for hard registers
858      unavailable for the allocation.  */
859   short x_ira_class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
860 
861   /* Index [CL][M] contains R if R appears somewhere in a register of the form:
862 
863          (reg:M R'), R' not in x_ira_prohibited_class_mode_regs[CL][M]
864 
865      For example, if:
866 
867      - (reg:M 2) is valid and occupies two registers;
868      - register 2 belongs to CL; and
869      - register 3 belongs to the same pressure class as CL
870 
871      then (reg:M 2) contributes to [CL][M] and registers 2 and 3 will be
872      in the set.  */
873   HARD_REG_SET x_ira_useful_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
874 
875   /* The value is number of elements in the subsequent array.  */
876   int x_ira_important_classes_num;
877 
878   /* The array containing all non-empty classes.  Such classes is
879      important for calculation of the hard register usage costs.  */
880   enum reg_class x_ira_important_classes[N_REG_CLASSES];
881 
882   /* The array containing indexes of important classes in the previous
883      array.  The array elements are defined only for important
884      classes.  */
885   int x_ira_important_class_nums[N_REG_CLASSES];
886 
887   /* Map class->true if class is an uniform class, false otherwise.  */
888   bool x_ira_uniform_class_p[N_REG_CLASSES];
889 
890   /* The biggest important class inside of intersection of the two
891      classes (that is calculated taking only hard registers available
892      for allocation into account;.  If the both classes contain no hard
893      registers available for allocation, the value is calculated with
894      taking all hard-registers including fixed ones into account.  */
895   enum reg_class x_ira_reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
896 
897   /* Classes with end marker LIM_REG_CLASSES which are intersected with
898      given class (the first index).  That includes given class itself.
899      This is calculated taking only hard registers available for
900      allocation into account.  */
901   enum reg_class x_ira_reg_class_super_classes[N_REG_CLASSES][N_REG_CLASSES];
902 
903   /* The biggest (smallest) important class inside of (covering) union
904      of the two classes (that is calculated taking only hard registers
905      available for allocation into account).  If the both classes
906      contain no hard registers available for allocation, the value is
907      calculated with taking all hard-registers including fixed ones
908      into account.  In other words, the value is the corresponding
909      reg_class_subunion (reg_class_superunion) value.  */
910   enum reg_class x_ira_reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
911   enum reg_class x_ira_reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
912 
913   /* For each reg class, table listing all the classes contained in it
914      (excluding the class itself.  Non-allocatable registers are
915      excluded from the consideration).  */
916   enum reg_class x_alloc_reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
917 
918   /* Array whose values are hard regset of hard registers for which
919      move of the hard register in given mode into itself is
920      prohibited.  */
921   HARD_REG_SET x_ira_prohibited_mode_move_regs[NUM_MACHINE_MODES];
922 
923   /* Flag of that the above array has been initialized.  */
924   bool x_ira_prohibited_mode_move_regs_initialized_p;
925 };
926 
927 extern class target_ira_int default_target_ira_int;
928 #if SWITCHABLE_TARGET
929 extern class target_ira_int *this_target_ira_int;
930 #else
931 #define this_target_ira_int (&default_target_ira_int)
932 #endif
933 
934 #define ira_reg_mode_hard_regset \
935   (this_target_ira_int->x_ira_reg_mode_hard_regset)
936 #define ira_register_move_cost \
937   (this_target_ira_int->x_ira_register_move_cost)
938 #define ira_max_memory_move_cost \
939   (this_target_ira_int->x_ira_max_memory_move_cost)
940 #define ira_may_move_in_cost \
941   (this_target_ira_int->x_ira_may_move_in_cost)
942 #define ira_may_move_out_cost \
943   (this_target_ira_int->x_ira_may_move_out_cost)
944 #define ira_reg_allocno_class_p \
945   (this_target_ira_int->x_ira_reg_allocno_class_p)
946 #define ira_reg_pressure_class_p \
947   (this_target_ira_int->x_ira_reg_pressure_class_p)
948 #define ira_non_ordered_class_hard_regs \
949   (this_target_ira_int->x_ira_non_ordered_class_hard_regs)
950 #define ira_class_hard_reg_index \
951   (this_target_ira_int->x_ira_class_hard_reg_index)
952 #define ira_useful_class_mode_regs \
953   (this_target_ira_int->x_ira_useful_class_mode_regs)
954 #define ira_important_classes_num \
955   (this_target_ira_int->x_ira_important_classes_num)
956 #define ira_important_classes \
957   (this_target_ira_int->x_ira_important_classes)
958 #define ira_important_class_nums \
959   (this_target_ira_int->x_ira_important_class_nums)
960 #define ira_uniform_class_p \
961   (this_target_ira_int->x_ira_uniform_class_p)
962 #define ira_reg_class_intersect \
963   (this_target_ira_int->x_ira_reg_class_intersect)
964 #define ira_reg_class_super_classes \
965   (this_target_ira_int->x_ira_reg_class_super_classes)
966 #define ira_reg_class_subunion \
967   (this_target_ira_int->x_ira_reg_class_subunion)
968 #define ira_reg_class_superunion \
969   (this_target_ira_int->x_ira_reg_class_superunion)
970 #define ira_prohibited_mode_move_regs \
971   (this_target_ira_int->x_ira_prohibited_mode_move_regs)
972 
973 /* ira.cc: */
974 
975 extern void *ira_allocate (size_t);
976 extern void ira_free (void *addr);
977 extern bitmap ira_allocate_bitmap (void);
978 extern void ira_free_bitmap (bitmap);
979 extern void ira_print_disposition (FILE *);
980 extern void ira_debug_disposition (void);
981 extern void ira_debug_allocno_classes (void);
982 extern void ira_init_register_move_cost (machine_mode);
983 extern alternative_mask ira_setup_alts (rtx_insn *);
984 extern int ira_get_dup_out_num (int, alternative_mask, bool &);
985 
986 /* ira-build.cc */
987 
988 /* The current loop tree node and its regno allocno map.  */
989 extern ira_loop_tree_node_t ira_curr_loop_tree_node;
990 extern ira_allocno_t *ira_curr_regno_allocno_map;
991 
992 extern void ira_debug_pref (ira_pref_t);
993 extern void ira_debug_prefs (void);
994 extern void ira_debug_allocno_prefs (ira_allocno_t);
995 
996 extern void ira_debug_copy (ira_copy_t);
997 extern void debug (ira_allocno_copy &ref);
998 extern void debug (ira_allocno_copy *ptr);
999 
1000 extern void ira_debug_copies (void);
1001 extern void ira_debug_allocno_copies (ira_allocno_t);
1002 extern void debug (ira_allocno &ref);
1003 extern void debug (ira_allocno *ptr);
1004 
1005 extern void ira_traverse_loop_tree (bool, ira_loop_tree_node_t,
1006 				    void (*) (ira_loop_tree_node_t),
1007 				    void (*) (ira_loop_tree_node_t));
1008 extern ira_allocno_t ira_parent_allocno (ira_allocno_t);
1009 extern ira_allocno_t ira_parent_or_cap_allocno (ira_allocno_t);
1010 extern ira_allocno_t ira_create_allocno (int, bool, ira_loop_tree_node_t);
1011 extern void ira_create_allocno_objects (ira_allocno_t);
1012 extern void ira_set_allocno_class (ira_allocno_t, enum reg_class);
1013 extern bool ira_conflict_vector_profitable_p (ira_object_t, int);
1014 extern void ira_allocate_conflict_vec (ira_object_t, int);
1015 extern void ira_allocate_object_conflicts (ira_object_t, int);
1016 extern void ior_hard_reg_conflicts (ira_allocno_t, const_hard_reg_set);
1017 extern void ira_print_expanded_allocno (ira_allocno_t);
1018 extern void ira_add_live_range_to_object (ira_object_t, int, int);
1019 extern live_range_t ira_create_live_range (ira_object_t, int, int,
1020 					   live_range_t);
1021 extern live_range_t ira_copy_live_range_list (live_range_t);
1022 extern live_range_t ira_merge_live_ranges (live_range_t, live_range_t);
1023 extern bool ira_live_ranges_intersect_p (live_range_t, live_range_t);
1024 extern void ira_finish_live_range (live_range_t);
1025 extern void ira_finish_live_range_list (live_range_t);
1026 extern void ira_free_allocno_updated_costs (ira_allocno_t);
1027 extern ira_pref_t ira_create_pref (ira_allocno_t, int, int);
1028 extern void ira_add_allocno_pref (ira_allocno_t, int, int);
1029 extern void ira_remove_pref (ira_pref_t);
1030 extern void ira_remove_allocno_prefs (ira_allocno_t);
1031 extern ira_copy_t ira_create_copy (ira_allocno_t, ira_allocno_t,
1032 				   int, bool, rtx_insn *,
1033 				   ira_loop_tree_node_t);
1034 extern ira_copy_t ira_add_allocno_copy (ira_allocno_t, ira_allocno_t, int,
1035 					bool, rtx_insn *,
1036 					ira_loop_tree_node_t);
1037 
1038 extern int *ira_allocate_cost_vector (reg_class_t);
1039 extern void ira_free_cost_vector (int *, reg_class_t);
1040 
1041 extern void ira_flattening (int, int);
1042 extern bool ira_build (void);
1043 extern void ira_destroy (void);
1044 
1045 /* ira-costs.cc */
1046 extern void ira_init_costs_once (void);
1047 extern void ira_init_costs (void);
1048 extern void ira_costs (void);
1049 extern void ira_tune_allocno_costs (void);
1050 
1051 /* ira-lives.cc */
1052 
1053 extern void ira_rebuild_start_finish_chains (void);
1054 extern void ira_print_live_range_list (FILE *, live_range_t);
1055 extern void debug (live_range &ref);
1056 extern void debug (live_range *ptr);
1057 extern void ira_debug_live_range_list (live_range_t);
1058 extern void ira_debug_allocno_live_ranges (ira_allocno_t);
1059 extern void ira_debug_live_ranges (void);
1060 extern void ira_create_allocno_live_ranges (void);
1061 extern void ira_compress_allocno_live_ranges (void);
1062 extern void ira_finish_allocno_live_ranges (void);
1063 extern void ira_implicitly_set_insn_hard_regs (HARD_REG_SET *,
1064 					       alternative_mask);
1065 
1066 /* ira-conflicts.cc */
1067 extern void ira_debug_conflicts (bool);
1068 extern void ira_build_conflicts (void);
1069 
1070 /* ira-color.cc */
1071 extern ira_allocno_t ira_soft_conflict (ira_allocno_t, ira_allocno_t);
1072 extern void ira_debug_hard_regs_forest (void);
1073 extern int ira_loop_edge_freq (ira_loop_tree_node_t, int, bool);
1074 extern void ira_reassign_conflict_allocnos (int);
1075 extern void ira_initiate_assign (void);
1076 extern void ira_finish_assign (void);
1077 extern void ira_color (void);
1078 
1079 /* ira-emit.cc */
1080 extern void ira_initiate_emit_data (void);
1081 extern void ira_finish_emit_data (void);
1082 extern void ira_emit (bool);
1083 
1084 
1085 
1086 /* Return true if equivalence of pseudo REGNO is not a lvalue.  */
1087 static inline bool
ira_equiv_no_lvalue_p(int regno)1088 ira_equiv_no_lvalue_p (int regno)
1089 {
1090   if (regno >= ira_reg_equiv_len)
1091     return false;
1092   return (ira_reg_equiv[regno].constant != NULL_RTX
1093 	  || ira_reg_equiv[regno].invariant != NULL_RTX
1094 	  || (ira_reg_equiv[regno].memory != NULL_RTX
1095 	      && MEM_READONLY_P (ira_reg_equiv[regno].memory)));
1096 }
1097 
1098 
1099 
1100 /* Initialize register costs for MODE if necessary.  */
1101 static inline void
ira_init_register_move_cost_if_necessary(machine_mode mode)1102 ira_init_register_move_cost_if_necessary (machine_mode mode)
1103 {
1104   if (ira_register_move_cost[mode] == NULL)
1105     ira_init_register_move_cost (mode);
1106 }
1107 
1108 
1109 
1110 /* The iterator for all allocnos.  */
1111 struct ira_allocno_iterator {
1112   /* The number of the current element in IRA_ALLOCNOS.  */
1113   int n;
1114 };
1115 
1116 /* Initialize the iterator I.  */
1117 static inline void
ira_allocno_iter_init(ira_allocno_iterator * i)1118 ira_allocno_iter_init (ira_allocno_iterator *i)
1119 {
1120   i->n = 0;
1121 }
1122 
1123 /* Return TRUE if we have more allocnos to visit, in which case *A is
1124    set to the allocno to be visited.  Otherwise, return FALSE.  */
1125 static inline bool
ira_allocno_iter_cond(ira_allocno_iterator * i,ira_allocno_t * a)1126 ira_allocno_iter_cond (ira_allocno_iterator *i, ira_allocno_t *a)
1127 {
1128   int n;
1129 
1130   for (n = i->n; n < ira_allocnos_num; n++)
1131     if (ira_allocnos[n] != NULL)
1132       {
1133 	*a = ira_allocnos[n];
1134 	i->n = n + 1;
1135 	return true;
1136       }
1137   return false;
1138 }
1139 
1140 /* Loop over all allocnos.  In each iteration, A is set to the next
1141    allocno.  ITER is an instance of ira_allocno_iterator used to iterate
1142    the allocnos.  */
1143 #define FOR_EACH_ALLOCNO(A, ITER)			\
1144   for (ira_allocno_iter_init (&(ITER));			\
1145        ira_allocno_iter_cond (&(ITER), &(A));)
1146 
1147 /* The iterator for all objects.  */
1148 struct ira_object_iterator {
1149   /* The number of the current element in ira_object_id_map.  */
1150   int n;
1151 };
1152 
1153 /* Initialize the iterator I.  */
1154 static inline void
ira_object_iter_init(ira_object_iterator * i)1155 ira_object_iter_init (ira_object_iterator *i)
1156 {
1157   i->n = 0;
1158 }
1159 
1160 /* Return TRUE if we have more objects to visit, in which case *OBJ is
1161    set to the object to be visited.  Otherwise, return FALSE.  */
1162 static inline bool
ira_object_iter_cond(ira_object_iterator * i,ira_object_t * obj)1163 ira_object_iter_cond (ira_object_iterator *i, ira_object_t *obj)
1164 {
1165   int n;
1166 
1167   for (n = i->n; n < ira_objects_num; n++)
1168     if (ira_object_id_map[n] != NULL)
1169       {
1170 	*obj = ira_object_id_map[n];
1171 	i->n = n + 1;
1172 	return true;
1173       }
1174   return false;
1175 }
1176 
1177 /* Loop over all objects.  In each iteration, OBJ is set to the next
1178    object.  ITER is an instance of ira_object_iterator used to iterate
1179    the objects.  */
1180 #define FOR_EACH_OBJECT(OBJ, ITER)			\
1181   for (ira_object_iter_init (&(ITER));			\
1182        ira_object_iter_cond (&(ITER), &(OBJ));)
1183 
1184 /* The iterator for objects associated with an allocno.  */
1185 struct ira_allocno_object_iterator {
1186   /* The number of the element the allocno's object array.  */
1187   int n;
1188 };
1189 
1190 /* Initialize the iterator I.  */
1191 static inline void
ira_allocno_object_iter_init(ira_allocno_object_iterator * i)1192 ira_allocno_object_iter_init (ira_allocno_object_iterator *i)
1193 {
1194   i->n = 0;
1195 }
1196 
1197 /* Return TRUE if we have more objects to visit in allocno A, in which
1198    case *O is set to the object to be visited.  Otherwise, return
1199    FALSE.  */
1200 static inline bool
ira_allocno_object_iter_cond(ira_allocno_object_iterator * i,ira_allocno_t a,ira_object_t * o)1201 ira_allocno_object_iter_cond (ira_allocno_object_iterator *i, ira_allocno_t a,
1202 			      ira_object_t *o)
1203 {
1204   int n = i->n++;
1205   if (n < ALLOCNO_NUM_OBJECTS (a))
1206     {
1207       *o = ALLOCNO_OBJECT (a, n);
1208       return true;
1209     }
1210   return false;
1211 }
1212 
1213 /* Loop over all objects associated with allocno A.  In each
1214    iteration, O is set to the next object.  ITER is an instance of
1215    ira_allocno_object_iterator used to iterate the conflicts.  */
1216 #define FOR_EACH_ALLOCNO_OBJECT(A, O, ITER)			\
1217   for (ira_allocno_object_iter_init (&(ITER));			\
1218        ira_allocno_object_iter_cond (&(ITER), (A), &(O));)
1219 
1220 
1221 /* The iterator for prefs.  */
1222 struct ira_pref_iterator {
1223   /* The number of the current element in IRA_PREFS.  */
1224   int n;
1225 };
1226 
1227 /* Initialize the iterator I.  */
1228 static inline void
ira_pref_iter_init(ira_pref_iterator * i)1229 ira_pref_iter_init (ira_pref_iterator *i)
1230 {
1231   i->n = 0;
1232 }
1233 
1234 /* Return TRUE if we have more prefs to visit, in which case *PREF is
1235    set to the pref to be visited.  Otherwise, return FALSE.  */
1236 static inline bool
ira_pref_iter_cond(ira_pref_iterator * i,ira_pref_t * pref)1237 ira_pref_iter_cond (ira_pref_iterator *i, ira_pref_t *pref)
1238 {
1239   int n;
1240 
1241   for (n = i->n; n < ira_prefs_num; n++)
1242     if (ira_prefs[n] != NULL)
1243       {
1244 	*pref = ira_prefs[n];
1245 	i->n = n + 1;
1246 	return true;
1247       }
1248   return false;
1249 }
1250 
1251 /* Loop over all prefs.  In each iteration, P is set to the next
1252    pref.  ITER is an instance of ira_pref_iterator used to iterate
1253    the prefs.  */
1254 #define FOR_EACH_PREF(P, ITER)				\
1255   for (ira_pref_iter_init (&(ITER));			\
1256        ira_pref_iter_cond (&(ITER), &(P));)
1257 
1258 
1259 /* The iterator for copies.  */
1260 struct ira_copy_iterator {
1261   /* The number of the current element in IRA_COPIES.  */
1262   int n;
1263 };
1264 
1265 /* Initialize the iterator I.  */
1266 static inline void
ira_copy_iter_init(ira_copy_iterator * i)1267 ira_copy_iter_init (ira_copy_iterator *i)
1268 {
1269   i->n = 0;
1270 }
1271 
1272 /* Return TRUE if we have more copies to visit, in which case *CP is
1273    set to the copy to be visited.  Otherwise, return FALSE.  */
1274 static inline bool
ira_copy_iter_cond(ira_copy_iterator * i,ira_copy_t * cp)1275 ira_copy_iter_cond (ira_copy_iterator *i, ira_copy_t *cp)
1276 {
1277   int n;
1278 
1279   for (n = i->n; n < ira_copies_num; n++)
1280     if (ira_copies[n] != NULL)
1281       {
1282 	*cp = ira_copies[n];
1283 	i->n = n + 1;
1284 	return true;
1285       }
1286   return false;
1287 }
1288 
1289 /* Loop over all copies.  In each iteration, C is set to the next
1290    copy.  ITER is an instance of ira_copy_iterator used to iterate
1291    the copies.  */
1292 #define FOR_EACH_COPY(C, ITER)				\
1293   for (ira_copy_iter_init (&(ITER));			\
1294        ira_copy_iter_cond (&(ITER), &(C));)
1295 
1296 /* The iterator for object conflicts.  */
1297 struct ira_object_conflict_iterator {
1298 
1299   /* TRUE if the conflicts are represented by vector of allocnos.  */
1300   bool conflict_vec_p;
1301 
1302   /* The conflict vector or conflict bit vector.  */
1303   void *vec;
1304 
1305   /* The number of the current element in the vector (of type
1306      ira_object_t or IRA_INT_TYPE).  */
1307   unsigned int word_num;
1308 
1309   /* The bit vector size.  It is defined only if
1310      OBJECT_CONFLICT_VEC_P is FALSE.  */
1311   unsigned int size;
1312 
1313   /* The current bit index of bit vector.  It is defined only if
1314      OBJECT_CONFLICT_VEC_P is FALSE.  */
1315   unsigned int bit_num;
1316 
1317   /* The object id corresponding to the 1st bit of the bit vector.  It
1318      is defined only if OBJECT_CONFLICT_VEC_P is FALSE.  */
1319   int base_conflict_id;
1320 
1321   /* The word of bit vector currently visited.  It is defined only if
1322      OBJECT_CONFLICT_VEC_P is FALSE.  */
1323   unsigned IRA_INT_TYPE word;
1324 };
1325 
1326 /* Initialize the iterator I with ALLOCNO conflicts.  */
1327 static inline void
ira_object_conflict_iter_init(ira_object_conflict_iterator * i,ira_object_t obj)1328 ira_object_conflict_iter_init (ira_object_conflict_iterator *i,
1329 			       ira_object_t obj)
1330 {
1331   i->conflict_vec_p = OBJECT_CONFLICT_VEC_P (obj);
1332   i->vec = OBJECT_CONFLICT_ARRAY (obj);
1333   i->word_num = 0;
1334   if (i->conflict_vec_p)
1335     i->size = i->bit_num = i->base_conflict_id = i->word = 0;
1336   else
1337     {
1338       if (OBJECT_MIN (obj) > OBJECT_MAX (obj))
1339 	i->size = 0;
1340       else
1341 	i->size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj)
1342 		    + IRA_INT_BITS)
1343 		   / IRA_INT_BITS) * sizeof (IRA_INT_TYPE);
1344       i->bit_num = 0;
1345       i->base_conflict_id = OBJECT_MIN (obj);
1346       i->word = (i->size == 0 ? 0 : ((IRA_INT_TYPE *) i->vec)[0]);
1347     }
1348 }
1349 
1350 /* Return TRUE if we have more conflicting allocnos to visit, in which
1351    case *A is set to the allocno to be visited.  Otherwise, return
1352    FALSE.  */
1353 static inline bool
ira_object_conflict_iter_cond(ira_object_conflict_iterator * i,ira_object_t * pobj)1354 ira_object_conflict_iter_cond (ira_object_conflict_iterator *i,
1355 			       ira_object_t *pobj)
1356 {
1357   ira_object_t obj;
1358 
1359   if (i->conflict_vec_p)
1360     {
1361       obj = ((ira_object_t *) i->vec)[i->word_num++];
1362       if (obj == NULL)
1363 	return false;
1364     }
1365   else
1366     {
1367       unsigned IRA_INT_TYPE word = i->word;
1368       unsigned int bit_num = i->bit_num;
1369 
1370       /* Skip words that are zeros.  */
1371       for (; word == 0; word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
1372 	{
1373 	  i->word_num++;
1374 
1375 	  /* If we have reached the end, break.  */
1376 	  if (i->word_num * sizeof (IRA_INT_TYPE) >= i->size)
1377 	    return false;
1378 
1379 	  bit_num = i->word_num * IRA_INT_BITS;
1380 	}
1381 
1382       /* Skip bits that are zero.  */
1383       int off = ctz_hwi (word);
1384       bit_num += off;
1385       word >>= off;
1386 
1387       obj = ira_object_id_map[bit_num + i->base_conflict_id];
1388       i->bit_num = bit_num + 1;
1389       i->word = word >> 1;
1390     }
1391 
1392   *pobj = obj;
1393   return true;
1394 }
1395 
1396 /* Loop over all objects conflicting with OBJ.  In each iteration,
1397    CONF is set to the next conflicting object.  ITER is an instance
1398    of ira_object_conflict_iterator used to iterate the conflicts.  */
1399 #define FOR_EACH_OBJECT_CONFLICT(OBJ, CONF, ITER)			\
1400   for (ira_object_conflict_iter_init (&(ITER), (OBJ));			\
1401        ira_object_conflict_iter_cond (&(ITER), &(CONF));)
1402 
1403 
1404 
1405 /* The function returns TRUE if at least one hard register from ones
1406    starting with HARD_REGNO and containing value of MODE are in set
1407    HARD_REGSET.  */
1408 static inline bool
ira_hard_reg_set_intersection_p(int hard_regno,machine_mode mode,HARD_REG_SET hard_regset)1409 ira_hard_reg_set_intersection_p (int hard_regno, machine_mode mode,
1410 				 HARD_REG_SET hard_regset)
1411 {
1412   int i;
1413 
1414   gcc_assert (hard_regno >= 0);
1415   for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
1416     if (TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
1417       return true;
1418   return false;
1419 }
1420 
1421 /* Return number of hard registers in hard register SET.  */
1422 static inline int
hard_reg_set_size(HARD_REG_SET set)1423 hard_reg_set_size (HARD_REG_SET set)
1424 {
1425   int i, size;
1426 
1427   for (size = i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1428     if (TEST_HARD_REG_BIT (set, i))
1429       size++;
1430   return size;
1431 }
1432 
1433 /* The function returns TRUE if hard registers starting with
1434    HARD_REGNO and containing value of MODE are fully in set
1435    HARD_REGSET.  */
1436 static inline bool
ira_hard_reg_in_set_p(int hard_regno,machine_mode mode,HARD_REG_SET hard_regset)1437 ira_hard_reg_in_set_p (int hard_regno, machine_mode mode,
1438 		       HARD_REG_SET hard_regset)
1439 {
1440   int i;
1441 
1442   ira_assert (hard_regno >= 0);
1443   for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
1444     if (!TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
1445       return false;
1446   return true;
1447 }
1448 
1449 
1450 
1451 /* To save memory we use a lazy approach for allocation and
1452    initialization of the cost vectors.  We do this only when it is
1453    really necessary.  */
1454 
1455 /* Allocate cost vector *VEC for hard registers of ACLASS and
1456    initialize the elements by VAL if it is necessary */
1457 static inline void
ira_allocate_and_set_costs(int ** vec,reg_class_t aclass,int val)1458 ira_allocate_and_set_costs (int **vec, reg_class_t aclass, int val)
1459 {
1460   int i, *reg_costs;
1461   int len;
1462 
1463   if (*vec != NULL)
1464     return;
1465   *vec = reg_costs = ira_allocate_cost_vector (aclass);
1466   len = ira_class_hard_regs_num[(int) aclass];
1467   for (i = 0; i < len; i++)
1468     reg_costs[i] = val;
1469 }
1470 
1471 /* Allocate cost vector *VEC for hard registers of ACLASS and copy
1472    values of vector SRC into the vector if it is necessary */
1473 static inline void
ira_allocate_and_copy_costs(int ** vec,enum reg_class aclass,int * src)1474 ira_allocate_and_copy_costs (int **vec, enum reg_class aclass, int *src)
1475 {
1476   int len;
1477 
1478   if (*vec != NULL || src == NULL)
1479     return;
1480   *vec = ira_allocate_cost_vector (aclass);
1481   len = ira_class_hard_regs_num[aclass];
1482   memcpy (*vec, src, sizeof (int) * len);
1483 }
1484 
1485 /* Allocate cost vector *VEC for hard registers of ACLASS and add
1486    values of vector SRC into the vector if it is necessary */
1487 static inline void
ira_allocate_and_accumulate_costs(int ** vec,enum reg_class aclass,int * src)1488 ira_allocate_and_accumulate_costs (int **vec, enum reg_class aclass, int *src)
1489 {
1490   int i, len;
1491 
1492   if (src == NULL)
1493     return;
1494   len = ira_class_hard_regs_num[aclass];
1495   if (*vec == NULL)
1496     {
1497       *vec = ira_allocate_cost_vector (aclass);
1498       memset (*vec, 0, sizeof (int) * len);
1499     }
1500   for (i = 0; i < len; i++)
1501     (*vec)[i] += src[i];
1502 }
1503 
1504 /* Allocate cost vector *VEC for hard registers of ACLASS and copy
1505    values of vector SRC into the vector or initialize it by VAL (if
1506    SRC is null).  */
1507 static inline void
ira_allocate_and_set_or_copy_costs(int ** vec,enum reg_class aclass,int val,int * src)1508 ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class aclass,
1509 				    int val, int *src)
1510 {
1511   int i, *reg_costs;
1512   int len;
1513 
1514   if (*vec != NULL)
1515     return;
1516   *vec = reg_costs = ira_allocate_cost_vector (aclass);
1517   len = ira_class_hard_regs_num[aclass];
1518   if (src != NULL)
1519     memcpy (reg_costs, src, sizeof (int) * len);
1520   else
1521     {
1522       for (i = 0; i < len; i++)
1523 	reg_costs[i] = val;
1524     }
1525 }
1526 
1527 extern rtx ira_create_new_reg (rtx);
1528 extern int first_moveable_pseudo, last_moveable_pseudo;
1529 
1530 /* Return the set of registers that would need a caller save if allocno A
1531    overlapped them.  */
1532 
1533 inline HARD_REG_SET
ira_need_caller_save_regs(ira_allocno_t a)1534 ira_need_caller_save_regs (ira_allocno_t a)
1535 {
1536   return call_clobbers_in_region (ALLOCNO_CROSSED_CALLS_ABIS (a),
1537 				  ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
1538 				  ALLOCNO_MODE (a));
1539 }
1540 
1541 /* Return true if we would need to save allocno A around a call if we
1542    assigned hard register REGNO.  */
1543 
1544 inline bool
ira_need_caller_save_p(ira_allocno_t a,unsigned int regno)1545 ira_need_caller_save_p (ira_allocno_t a, unsigned int regno)
1546 {
1547   if (ALLOCNO_CALLS_CROSSED_NUM (a) == 0)
1548     return false;
1549   return call_clobbered_in_region_p (ALLOCNO_CROSSED_CALLS_ABIS (a),
1550 				     ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
1551 				     ALLOCNO_MODE (a), regno);
1552 }
1553 
1554 /* Represents the boundary between an allocno in one loop and its parent
1555    allocno in the enclosing loop.  It is usually possible to change a
1556    register's allocation on this boundary; the class provides routines
1557    for calculating the cost of such changes.  */
1558 class ira_loop_border_costs
1559 {
1560 public:
1561   ira_loop_border_costs (ira_allocno_t);
1562 
1563   int move_between_loops_cost () const;
1564   int spill_outside_loop_cost () const;
1565   int spill_inside_loop_cost () const;
1566 
1567 private:
1568   /* The mode and class of the child allocno.  */
1569   machine_mode m_mode;
1570   reg_class m_class;
1571 
1572   /* Sums the frequencies of the entry edges and the exit edges.  */
1573   int m_entry_freq, m_exit_freq;
1574 };
1575 
1576 /* Return the cost of storing the register on entry to the loop and
1577    loading it back on exit from the loop.  This is the cost to use if
1578    the register is spilled within the loop but is successfully allocated
1579    in the parent loop.  */
1580 inline int
spill_inside_loop_cost()1581 ira_loop_border_costs::spill_inside_loop_cost () const
1582 {
1583   return (m_entry_freq * ira_memory_move_cost[m_mode][m_class][0]
1584 	  + m_exit_freq * ira_memory_move_cost[m_mode][m_class][1]);
1585 }
1586 
1587 /* Return the cost of loading the register on entry to the loop and
1588    storing it back on exit from the loop.  This is the cost to use if
1589    the register is successfully allocated within the loop but is spilled
1590    in the parent loop.  */
1591 inline int
spill_outside_loop_cost()1592 ira_loop_border_costs::spill_outside_loop_cost () const
1593 {
1594   return (m_entry_freq * ira_memory_move_cost[m_mode][m_class][1]
1595 	  + m_exit_freq * ira_memory_move_cost[m_mode][m_class][0]);
1596 }
1597 
1598 /* Return the cost of moving the pseudo register between different hard
1599    registers on entry and exit from the loop.  This is the cost to use
1600    if the register is successfully allocated within both this loop and
1601    the parent loop, but the allocations for the loops differ.  */
1602 inline int
move_between_loops_cost()1603 ira_loop_border_costs::move_between_loops_cost () const
1604 {
1605   ira_init_register_move_cost_if_necessary (m_mode);
1606   auto move_cost = ira_register_move_cost[m_mode][m_class][m_class];
1607   return move_cost * (m_entry_freq + m_exit_freq);
1608 }
1609 
1610 /* Return true if subloops that contain allocnos for A's register can
1611    use a different assignment from A.  ALLOCATED_P is true for the case
1612    in which allocation succeeded for A.  EXCLUDE_OLD_RELOAD is true if
1613    we should always return false for non-LRA targets.  (This is a hack
1614    and should be removed along with old reload.)  */
1615 inline bool
1616 ira_subloop_allocnos_can_differ_p (ira_allocno_t a, bool allocated_p = true,
1617 				   bool exclude_old_reload = true)
1618 {
1619   if (exclude_old_reload && !ira_use_lra_p)
1620     return false;
1621 
1622   auto regno = ALLOCNO_REGNO (a);
1623 
1624   if (pic_offset_table_rtx != NULL
1625       && regno == (int) REGNO (pic_offset_table_rtx))
1626     return false;
1627 
1628   ira_assert (regno < ira_reg_equiv_len);
1629   if (ira_equiv_no_lvalue_p (regno))
1630     return false;
1631 
1632   /* Avoid overlapping multi-registers.  Moves between them might result
1633      in wrong code generation.  */
1634   if (allocated_p)
1635     {
1636       auto pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
1637       if (ira_reg_class_max_nregs[pclass][ALLOCNO_MODE (a)] > 1)
1638 	return false;
1639     }
1640 
1641   return true;
1642 }
1643 
1644 /* Return true if we should treat A and SUBLOOP_A as belonging to a
1645    single region.  */
1646 inline bool
ira_single_region_allocno_p(ira_allocno_t a,ira_allocno_t subloop_a)1647 ira_single_region_allocno_p (ira_allocno_t a, ira_allocno_t subloop_a)
1648 {
1649   if (flag_ira_region != IRA_REGION_MIXED)
1650     return false;
1651 
1652   if (ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (subloop_a))
1653     return false;
1654 
1655   auto rclass = ALLOCNO_CLASS (a);
1656   auto pclass = ira_pressure_class_translate[rclass];
1657   auto loop_used_regs = ALLOCNO_LOOP_TREE_NODE (a)->reg_pressure[pclass];
1658   return loop_used_regs <= ira_class_hard_regs_num[pclass];
1659 }
1660 
1661 /* Return the set of all hard registers that conflict with A.  */
1662 inline HARD_REG_SET
ira_total_conflict_hard_regs(ira_allocno_t a)1663 ira_total_conflict_hard_regs (ira_allocno_t a)
1664 {
1665   auto obj_0 = ALLOCNO_OBJECT (a, 0);
1666   HARD_REG_SET conflicts = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj_0);
1667   for (int i = 1; i < ALLOCNO_NUM_OBJECTS (a); i++)
1668     conflicts |= OBJECT_TOTAL_CONFLICT_HARD_REGS (ALLOCNO_OBJECT (a, i));
1669   return conflicts;
1670 }
1671 
1672 /* Return the cost of saving a caller-saved register before each call
1673    in A's live range and restoring the same register after each call.  */
1674 inline int
ira_caller_save_cost(ira_allocno_t a)1675 ira_caller_save_cost (ira_allocno_t a)
1676 {
1677   auto mode = ALLOCNO_MODE (a);
1678   auto rclass = ALLOCNO_CLASS (a);
1679   return (ALLOCNO_CALL_FREQ (a)
1680 	  * (ira_memory_move_cost[mode][rclass][0]
1681 	     + ira_memory_move_cost[mode][rclass][1]));
1682 }
1683 
1684 /* A and SUBLOOP_A are allocnos for the same pseudo register, with A's
1685    loop immediately enclosing SUBLOOP_A's loop.  If we allocate to A a
1686    hard register R that is clobbered by a call in SUBLOOP_A, decide
1687    which of the following approaches should be used for handling the
1688    conflict:
1689 
1690    (1) Spill R on entry to SUBLOOP_A's loop, assign memory to SUBLOOP_A,
1691        and restore R on exit from SUBLOOP_A's loop.
1692 
1693    (2) Spill R before each necessary call in SUBLOOP_A's live range and
1694        restore R after each such call.
1695 
1696    Return true if (1) is better than (2).  SPILL_COST is the cost of
1697    doing (1).  */
1698 inline bool
ira_caller_save_loop_spill_p(ira_allocno_t a,ira_allocno_t subloop_a,int spill_cost)1699 ira_caller_save_loop_spill_p (ira_allocno_t a, ira_allocno_t subloop_a,
1700 			      int spill_cost)
1701 {
1702   if (!ira_subloop_allocnos_can_differ_p (a))
1703     return false;
1704 
1705   /* Calculate the cost of saving a call-clobbered register
1706      before each call and restoring it afterwards.  */
1707   int call_cost = ira_caller_save_cost (subloop_a);
1708   return call_cost && call_cost >= spill_cost;
1709 }
1710 
1711 #endif /* GCC_IRA_INT_H */
1712