xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/ira-build.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Building internal representation for IRA.
2    Copyright (C) 2006-2015 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "target.h"
28 #include "regs.h"
29 #include "flags.h"
30 #include "hard-reg-set.h"
31 #include "predict.h"
32 #include "vec.h"
33 #include "hashtab.h"
34 #include "hash-set.h"
35 #include "machmode.h"
36 #include "input.h"
37 #include "function.h"
38 #include "dominance.h"
39 #include "cfg.h"
40 #include "basic-block.h"
41 #include "insn-config.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "params.h"
45 #include "df.h"
46 #include "reload.h"
47 #include "sparseset.h"
48 #include "ira-int.h"
49 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
50 
51 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
52 				     ira_loop_tree_node_t);
53 
54 /* The root of the loop tree corresponding to the all function.  */
55 ira_loop_tree_node_t ira_loop_tree_root;
56 
57 /* Height of the loop tree.  */
58 int ira_loop_tree_height;
59 
60 /* All nodes representing basic blocks are referred through the
61    following array.  We can not use basic block member `aux' for this
62    because it is used for insertion of insns on edges.  */
63 ira_loop_tree_node_t ira_bb_nodes;
64 
65 /* All nodes representing loops are referred through the following
66    array.  */
67 ira_loop_tree_node_t ira_loop_nodes;
68 
69 /* And size of the ira_loop_nodes array.  */
70 unsigned int ira_loop_nodes_count;
71 
72 /* Map regno -> allocnos with given regno (see comments for
73    allocno member `next_regno_allocno').  */
74 ira_allocno_t *ira_regno_allocno_map;
75 
76 /* Array of references to all allocnos.  The order number of the
77    allocno corresponds to the index in the array.  Removed allocnos
78    have NULL element value.  */
79 ira_allocno_t *ira_allocnos;
80 
81 /* Sizes of the previous array.  */
82 int ira_allocnos_num;
83 
84 /* Count of conflict record structures we've created, used when creating
85    a new conflict id.  */
86 int ira_objects_num;
87 
88 /* Map a conflict id to its conflict record.  */
89 ira_object_t *ira_object_id_map;
90 
91 /* Array of references to all allocno preferences.  The order number
92    of the preference corresponds to the index in the array.  */
93 ira_pref_t *ira_prefs;
94 
95 /* Size of the previous array.  */
96 int ira_prefs_num;
97 
98 /* Array of references to all copies.  The order number of the copy
99    corresponds to the index in the array.  Removed copies have NULL
100    element value.  */
101 ira_copy_t *ira_copies;
102 
103 /* Size of the previous array.  */
104 int ira_copies_num;
105 
106 
107 
108 /* LAST_BASIC_BLOCK before generating additional insns because of live
109    range splitting.  Emitting insns on a critical edge creates a new
110    basic block.  */
111 static int last_basic_block_before_change;
112 
113 /* Initialize some members in loop tree node NODE.  Use LOOP_NUM for
114    the member loop_num.  */
115 static void
116 init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
117 {
118   int max_regno = max_reg_num ();
119 
120   node->regno_allocno_map
121     = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
122   memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
123   memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
124   node->all_allocnos = ira_allocate_bitmap ();
125   node->modified_regnos = ira_allocate_bitmap ();
126   node->border_allocnos = ira_allocate_bitmap ();
127   node->local_copies = ira_allocate_bitmap ();
128   node->loop_num = loop_num;
129   node->children = NULL;
130   node->subloops = NULL;
131 }
132 
133 
134 /* The following function allocates the loop tree nodes.  If
135    CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
136    the root which corresponds the all function) will be not allocated
137    but nodes will still be allocated for basic blocks.  */
138 static void
139 create_loop_tree_nodes (void)
140 {
141   unsigned int i, j;
142   bool skip_p;
143   edge_iterator ei;
144   edge e;
145   vec<edge> edges;
146   loop_p loop;
147 
148   ira_bb_nodes
149     = ((struct ira_loop_tree_node *)
150        ira_allocate (sizeof (struct ira_loop_tree_node)
151 		     * last_basic_block_for_fn (cfun)));
152   last_basic_block_before_change = last_basic_block_for_fn (cfun);
153   for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
154     {
155       ira_bb_nodes[i].regno_allocno_map = NULL;
156       memset (ira_bb_nodes[i].reg_pressure, 0,
157 	      sizeof (ira_bb_nodes[i].reg_pressure));
158       ira_bb_nodes[i].all_allocnos = NULL;
159       ira_bb_nodes[i].modified_regnos = NULL;
160       ira_bb_nodes[i].border_allocnos = NULL;
161       ira_bb_nodes[i].local_copies = NULL;
162     }
163   if (current_loops == NULL)
164     {
165       ira_loop_nodes_count = 1;
166       ira_loop_nodes = ((struct ira_loop_tree_node *)
167 			ira_allocate (sizeof (struct ira_loop_tree_node)));
168       init_loop_tree_node (ira_loop_nodes, 0);
169       return;
170     }
171   ira_loop_nodes_count = number_of_loops (cfun);
172   ira_loop_nodes = ((struct ira_loop_tree_node *)
173 		    ira_allocate (sizeof (struct ira_loop_tree_node)
174 				  * ira_loop_nodes_count));
175   FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
176     {
177       if (loop_outer (loop) != NULL)
178 	{
179 	  ira_loop_nodes[i].regno_allocno_map = NULL;
180 	  skip_p = false;
181 	  FOR_EACH_EDGE (e, ei, loop->header->preds)
182 	    if (e->src != loop->latch
183 		&& (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
184 	      {
185 		skip_p = true;
186 		break;
187 	      }
188 	  if (skip_p)
189 	    continue;
190 	  edges = get_loop_exit_edges (loop);
191 	  FOR_EACH_VEC_ELT (edges, j, e)
192 	    if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
193 	      {
194 		skip_p = true;
195 		break;
196 	      }
197 	  edges.release ();
198 	  if (skip_p)
199 	    continue;
200 	}
201       init_loop_tree_node (&ira_loop_nodes[i], loop->num);
202     }
203 }
204 
205 /* The function returns TRUE if there are more one allocation
206    region.  */
207 static bool
208 more_one_region_p (void)
209 {
210   unsigned int i;
211   loop_p loop;
212 
213   if (current_loops != NULL)
214     FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
215       if (ira_loop_nodes[i].regno_allocno_map != NULL
216 	  && ira_loop_tree_root != &ira_loop_nodes[i])
217 	return true;
218   return false;
219 }
220 
221 /* Free the loop tree node of a loop.  */
222 static void
223 finish_loop_tree_node (ira_loop_tree_node_t loop)
224 {
225   if (loop->regno_allocno_map != NULL)
226     {
227       ira_assert (loop->bb == NULL);
228       ira_free_bitmap (loop->local_copies);
229       ira_free_bitmap (loop->border_allocnos);
230       ira_free_bitmap (loop->modified_regnos);
231       ira_free_bitmap (loop->all_allocnos);
232       ira_free (loop->regno_allocno_map);
233       loop->regno_allocno_map = NULL;
234     }
235 }
236 
237 /* Free the loop tree nodes.  */
238 static void
239 finish_loop_tree_nodes (void)
240 {
241   unsigned int i;
242 
243   for (i = 0; i < ira_loop_nodes_count; i++)
244     finish_loop_tree_node (&ira_loop_nodes[i]);
245   ira_free (ira_loop_nodes);
246   for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
247     {
248       if (ira_bb_nodes[i].local_copies != NULL)
249 	ira_free_bitmap (ira_bb_nodes[i].local_copies);
250       if (ira_bb_nodes[i].border_allocnos != NULL)
251 	ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
252       if (ira_bb_nodes[i].modified_regnos != NULL)
253 	ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
254       if (ira_bb_nodes[i].all_allocnos != NULL)
255 	ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
256       if (ira_bb_nodes[i].regno_allocno_map != NULL)
257 	ira_free (ira_bb_nodes[i].regno_allocno_map);
258     }
259   ira_free (ira_bb_nodes);
260 }
261 
262 
263 
264 /* The following recursive function adds LOOP to the loop tree
265    hierarchy.  LOOP is added only once.  If LOOP is NULL we adding
266    loop designating the whole function when CFG loops are not
267    built.  */
268 static void
269 add_loop_to_tree (struct loop *loop)
270 {
271   int loop_num;
272   struct loop *parent;
273   ira_loop_tree_node_t loop_node, parent_node;
274 
275   /* We can not use loop node access macros here because of potential
276      checking and because the nodes are not initialized enough
277      yet.  */
278   if (loop != NULL && loop_outer (loop) != NULL)
279     add_loop_to_tree (loop_outer (loop));
280   loop_num = loop != NULL ? loop->num : 0;
281   if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
282       && ira_loop_nodes[loop_num].children == NULL)
283     {
284       /* We have not added loop node to the tree yet.  */
285       loop_node = &ira_loop_nodes[loop_num];
286       loop_node->loop = loop;
287       loop_node->bb = NULL;
288       if (loop == NULL)
289 	parent = NULL;
290       else
291 	{
292 	  for (parent = loop_outer (loop);
293 	       parent != NULL;
294 	       parent = loop_outer (parent))
295 	    if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
296 	      break;
297 	}
298       if (parent == NULL)
299 	{
300 	  loop_node->next = NULL;
301 	  loop_node->subloop_next = NULL;
302 	  loop_node->parent = NULL;
303 	}
304       else
305 	{
306 	  parent_node = &ira_loop_nodes[parent->num];
307 	  loop_node->next = parent_node->children;
308 	  parent_node->children = loop_node;
309 	  loop_node->subloop_next = parent_node->subloops;
310 	  parent_node->subloops = loop_node;
311 	  loop_node->parent = parent_node;
312 	}
313     }
314 }
315 
316 /* The following recursive function sets up levels of nodes of the
317    tree given its root LOOP_NODE.  The enumeration starts with LEVEL.
318    The function returns maximal value of level in the tree + 1.  */
319 static int
320 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
321 {
322   int height, max_height;
323   ira_loop_tree_node_t subloop_node;
324 
325   ira_assert (loop_node->bb == NULL);
326   loop_node->level = level;
327   max_height = level + 1;
328   for (subloop_node = loop_node->subloops;
329        subloop_node != NULL;
330        subloop_node = subloop_node->subloop_next)
331     {
332       ira_assert (subloop_node->bb == NULL);
333       height = setup_loop_tree_level (subloop_node, level + 1);
334       if (height > max_height)
335 	max_height = height;
336     }
337   return max_height;
338 }
339 
340 /* Create the loop tree.  The algorithm is designed to provide correct
341    order of loops (they are ordered by their last loop BB) and basic
342    blocks in the chain formed by member next.  */
343 static void
344 form_loop_tree (void)
345 {
346   basic_block bb;
347   struct loop *parent;
348   ira_loop_tree_node_t bb_node, loop_node;
349 
350   /* We can not use loop/bb node access macros because of potential
351      checking and because the nodes are not initialized enough
352      yet.  */
353   FOR_EACH_BB_FN (bb, cfun)
354     {
355       bb_node = &ira_bb_nodes[bb->index];
356       bb_node->bb = bb;
357       bb_node->loop = NULL;
358       bb_node->subloops = NULL;
359       bb_node->children = NULL;
360       bb_node->subloop_next = NULL;
361       bb_node->next = NULL;
362       if (current_loops == NULL)
363 	parent = NULL;
364       else
365 	{
366 	  for (parent = bb->loop_father;
367 	       parent != NULL;
368 	       parent = loop_outer (parent))
369 	    if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
370 	      break;
371 	}
372       add_loop_to_tree (parent);
373       loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
374       bb_node->next = loop_node->children;
375       bb_node->parent = loop_node;
376       loop_node->children = bb_node;
377     }
378   ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
379   ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
380   ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
381 }
382 
383 
384 
385 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
386    tree nodes.  */
387 static void
388 rebuild_regno_allocno_maps (void)
389 {
390   unsigned int l;
391   int max_regno, regno;
392   ira_allocno_t a;
393   ira_loop_tree_node_t loop_tree_node;
394   loop_p loop;
395   ira_allocno_iterator ai;
396 
397   ira_assert (current_loops != NULL);
398   max_regno = max_reg_num ();
399   FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
400     if (ira_loop_nodes[l].regno_allocno_map != NULL)
401       {
402 	ira_free (ira_loop_nodes[l].regno_allocno_map);
403 	ira_loop_nodes[l].regno_allocno_map
404 	  = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
405 					    * max_regno);
406 	memset (ira_loop_nodes[l].regno_allocno_map, 0,
407 		sizeof (ira_allocno_t) * max_regno);
408       }
409   ira_free (ira_regno_allocno_map);
410   ira_regno_allocno_map
411     = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
412   memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
413   FOR_EACH_ALLOCNO (a, ai)
414     {
415       if (ALLOCNO_CAP_MEMBER (a) != NULL)
416 	/* Caps are not in the regno allocno maps.  */
417 	continue;
418       regno = ALLOCNO_REGNO (a);
419       loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
420       ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
421       ira_regno_allocno_map[regno] = a;
422       if (loop_tree_node->regno_allocno_map[regno] == NULL)
423 	/* Remember that we can create temporary allocnos to break
424 	   cycles in register shuffle.  */
425 	loop_tree_node->regno_allocno_map[regno] = a;
426     }
427 }
428 
429 
430 /* Pools for allocnos, allocno live ranges and objects.  */
431 static alloc_pool allocno_pool, live_range_pool, object_pool;
432 
433 /* Vec containing references to all created allocnos.  It is a
434    container of array allocnos.  */
435 static vec<ira_allocno_t> allocno_vec;
436 
437 /* Vec containing references to all created ira_objects.  It is a
438    container of ira_object_id_map.  */
439 static vec<ira_object_t> ira_object_id_map_vec;
440 
441 /* Initialize data concerning allocnos.  */
442 static void
443 initiate_allocnos (void)
444 {
445   live_range_pool
446     = create_alloc_pool ("live ranges",
447 			 sizeof (struct live_range), 100);
448   allocno_pool
449     = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
450   object_pool
451     = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
452   allocno_vec.create (max_reg_num () * 2);
453   ira_allocnos = NULL;
454   ira_allocnos_num = 0;
455   ira_objects_num = 0;
456   ira_object_id_map_vec.create (max_reg_num () * 2);
457   ira_object_id_map = NULL;
458   ira_regno_allocno_map
459     = (ira_allocno_t *) ira_allocate (max_reg_num ()
460 				      * sizeof (ira_allocno_t));
461   memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
462 }
463 
464 /* Create and return an object corresponding to a new allocno A.  */
465 static ira_object_t
466 ira_create_object (ira_allocno_t a, int subword)
467 {
468   enum reg_class aclass = ALLOCNO_CLASS (a);
469   ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
470 
471   OBJECT_ALLOCNO (obj) = a;
472   OBJECT_SUBWORD (obj) = subword;
473   OBJECT_CONFLICT_ID (obj) = ira_objects_num;
474   OBJECT_CONFLICT_VEC_P (obj) = false;
475   OBJECT_CONFLICT_ARRAY (obj) = NULL;
476   OBJECT_NUM_CONFLICTS (obj) = 0;
477   COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
478   COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
479   IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
480 			  reg_class_contents[aclass]);
481   IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
482 			  reg_class_contents[aclass]);
483   OBJECT_MIN (obj) = INT_MAX;
484   OBJECT_MAX (obj) = -1;
485   OBJECT_LIVE_RANGES (obj) = NULL;
486 
487   ira_object_id_map_vec.safe_push (obj);
488   ira_object_id_map
489     = ira_object_id_map_vec.address ();
490   ira_objects_num = ira_object_id_map_vec.length ();
491 
492   return obj;
493 }
494 
495 /* Create and return the allocno corresponding to REGNO in
496    LOOP_TREE_NODE.  Add the allocno to the list of allocnos with the
497    same regno if CAP_P is FALSE.  */
498 ira_allocno_t
499 ira_create_allocno (int regno, bool cap_p,
500 		    ira_loop_tree_node_t loop_tree_node)
501 {
502   ira_allocno_t a;
503 
504   a = (ira_allocno_t) pool_alloc (allocno_pool);
505   ALLOCNO_REGNO (a) = regno;
506   ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
507   if (! cap_p)
508     {
509       ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
510       ira_regno_allocno_map[regno] = a;
511       if (loop_tree_node->regno_allocno_map[regno] == NULL)
512 	/* Remember that we can create temporary allocnos to break
513 	   cycles in register shuffle on region borders (see
514 	   ira-emit.c).  */
515 	loop_tree_node->regno_allocno_map[regno] = a;
516     }
517   ALLOCNO_CAP (a) = NULL;
518   ALLOCNO_CAP_MEMBER (a) = NULL;
519   ALLOCNO_NUM (a) = ira_allocnos_num;
520   bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
521   ALLOCNO_NREFS (a) = 0;
522   ALLOCNO_FREQ (a) = 0;
523   ALLOCNO_HARD_REGNO (a) = -1;
524   ALLOCNO_CALL_FREQ (a) = 0;
525   ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
526   ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
527   CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
528 #ifdef STACK_REGS
529   ALLOCNO_NO_STACK_REG_P (a) = false;
530   ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
531 #endif
532   ALLOCNO_DONT_REASSIGN_P (a) = false;
533   ALLOCNO_BAD_SPILL_P (a) = false;
534   ALLOCNO_ASSIGNED_P (a) = false;
535   ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
536   ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
537   ALLOCNO_PREFS (a) = NULL;
538   ALLOCNO_COPIES (a) = NULL;
539   ALLOCNO_HARD_REG_COSTS (a) = NULL;
540   ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
541   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
542   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
543   ALLOCNO_CLASS (a) = NO_REGS;
544   ALLOCNO_UPDATED_CLASS_COST (a) = 0;
545   ALLOCNO_CLASS_COST (a) = 0;
546   ALLOCNO_MEMORY_COST (a) = 0;
547   ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
548   ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
549   ALLOCNO_NUM_OBJECTS (a) = 0;
550 
551   ALLOCNO_ADD_DATA (a) = NULL;
552   allocno_vec.safe_push (a);
553   ira_allocnos = allocno_vec.address ();
554   ira_allocnos_num = allocno_vec.length ();
555 
556   return a;
557 }
558 
559 /* Set up register class for A and update its conflict hard
560    registers.  */
561 void
562 ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
563 {
564   ira_allocno_object_iterator oi;
565   ira_object_t obj;
566 
567   ALLOCNO_CLASS (a) = aclass;
568   FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
569     {
570       IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
571 			      reg_class_contents[aclass]);
572       IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
573 			      reg_class_contents[aclass]);
574     }
575 }
576 
577 /* Determine the number of objects we should associate with allocno A
578    and allocate them.  */
579 void
580 ira_create_allocno_objects (ira_allocno_t a)
581 {
582   machine_mode mode = ALLOCNO_MODE (a);
583   enum reg_class aclass = ALLOCNO_CLASS (a);
584   int n = ira_reg_class_max_nregs[aclass][mode];
585   int i;
586 
587   if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
588     n = 1;
589 
590   ALLOCNO_NUM_OBJECTS (a) = n;
591   for (i = 0; i < n; i++)
592     ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
593 }
594 
595 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
596    ALLOCNO_OBJECT structures.  This must be called after the allocno
597    classes are known.  */
598 static void
599 create_allocno_objects (void)
600 {
601   ira_allocno_t a;
602   ira_allocno_iterator ai;
603 
604   FOR_EACH_ALLOCNO (a, ai)
605     ira_create_allocno_objects (a);
606 }
607 
608 /* Merge hard register conflict information for all objects associated with
609    allocno TO into the corresponding objects associated with FROM.
610    If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS.  */
611 static void
612 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
613 			  bool total_only)
614 {
615   int i;
616   gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
617   for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
618     {
619       ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
620       ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
621 
622       if (!total_only)
623 	IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
624 			  OBJECT_CONFLICT_HARD_REGS (from_obj));
625       IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
626 			OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
627     }
628 #ifdef STACK_REGS
629   if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
630     ALLOCNO_NO_STACK_REG_P (to) = true;
631   if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
632     ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
633 #endif
634 }
635 
636 /* Update hard register conflict information for all objects associated with
637    A to include the regs in SET.  */
638 void
639 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
640 {
641   ira_allocno_object_iterator i;
642   ira_object_t obj;
643 
644   FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
645     {
646       IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
647       IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
648     }
649 }
650 
651 /* Return TRUE if a conflict vector with NUM elements is more
652    profitable than a conflict bit vector for OBJ.  */
653 bool
654 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
655 {
656   int nw;
657   int max = OBJECT_MAX (obj);
658   int min = OBJECT_MIN (obj);
659 
660   if (max < min)
661     /* We prefer a bit vector in such case because it does not result
662        in allocation.  */
663     return false;
664 
665   nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
666   return (2 * sizeof (ira_object_t) * (num + 1)
667 	  < 3 * nw * sizeof (IRA_INT_TYPE));
668 }
669 
670 /* Allocates and initialize the conflict vector of OBJ for NUM
671    conflicting objects.  */
672 void
673 ira_allocate_conflict_vec (ira_object_t obj, int num)
674 {
675   int size;
676   ira_object_t *vec;
677 
678   ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
679   num++; /* for NULL end marker  */
680   size = sizeof (ira_object_t) * num;
681   OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
682   vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
683   vec[0] = NULL;
684   OBJECT_NUM_CONFLICTS (obj) = 0;
685   OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
686   OBJECT_CONFLICT_VEC_P (obj) = true;
687 }
688 
689 /* Allocate and initialize the conflict bit vector of OBJ.  */
690 static void
691 allocate_conflict_bit_vec (ira_object_t obj)
692 {
693   unsigned int size;
694 
695   ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
696   size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
697 	  / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
698   OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
699   memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
700   OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
701   OBJECT_CONFLICT_VEC_P (obj) = false;
702 }
703 
704 /* Allocate and initialize the conflict vector or conflict bit vector
705    of OBJ for NUM conflicting allocnos whatever is more profitable.  */
706 void
707 ira_allocate_object_conflicts (ira_object_t obj, int num)
708 {
709   if (ira_conflict_vector_profitable_p (obj, num))
710     ira_allocate_conflict_vec (obj, num);
711   else
712     allocate_conflict_bit_vec (obj);
713 }
714 
715 /* Add OBJ2 to the conflicts of OBJ1.  */
716 static void
717 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
718 {
719   int num;
720   unsigned int size;
721 
722   if (OBJECT_CONFLICT_VEC_P (obj1))
723     {
724       ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
725       int curr_num = OBJECT_NUM_CONFLICTS (obj1);
726       num = curr_num + 2;
727       if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
728 	{
729 	  ira_object_t *newvec;
730 	  size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
731 	  newvec = (ira_object_t *) ira_allocate (size);
732 	  memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
733 	  ira_free (vec);
734 	  vec = newvec;
735 	  OBJECT_CONFLICT_ARRAY (obj1) = vec;
736 	  OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
737 	}
738       vec[num - 2] = obj2;
739       vec[num - 1] = NULL;
740       OBJECT_NUM_CONFLICTS (obj1)++;
741     }
742   else
743     {
744       int nw, added_head_nw, id;
745       IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
746 
747       id = OBJECT_CONFLICT_ID (obj2);
748       if (OBJECT_MIN (obj1) > id)
749 	{
750 	  /* Expand head of the bit vector.  */
751 	  added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
752 	  nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
753 	  size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
754 	  if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
755 	    {
756 	      memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
757 		       vec, nw * sizeof (IRA_INT_TYPE));
758 	      memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
759 	    }
760 	  else
761 	    {
762 	      size
763 		= (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
764 	      vec = (IRA_INT_TYPE *) ira_allocate (size);
765 	      memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
766 		      OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
767 	      memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
768 	      memset ((char *) vec
769 		      + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
770 		      0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
771 	      ira_free (OBJECT_CONFLICT_ARRAY (obj1));
772 	      OBJECT_CONFLICT_ARRAY (obj1) = vec;
773 	      OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
774 	    }
775 	  OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
776 	}
777       else if (OBJECT_MAX (obj1) < id)
778 	{
779 	  nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
780 	  size = nw * sizeof (IRA_INT_TYPE);
781 	  if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
782 	    {
783 	      /* Expand tail of the bit vector.  */
784 	      size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
785 	      vec = (IRA_INT_TYPE *) ira_allocate (size);
786 	      memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
787 	      memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
788 		      0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
789 	      ira_free (OBJECT_CONFLICT_ARRAY (obj1));
790 	      OBJECT_CONFLICT_ARRAY (obj1) = vec;
791 	      OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
792 	    }
793 	  OBJECT_MAX (obj1) = id;
794 	}
795       SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
796     }
797 }
798 
799 /* Add OBJ1 to the conflicts of OBJ2 and vice versa.  */
800 static void
801 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
802 {
803   add_to_conflicts (obj1, obj2);
804   add_to_conflicts (obj2, obj1);
805 }
806 
807 /* Clear all conflicts of OBJ.  */
808 static void
809 clear_conflicts (ira_object_t obj)
810 {
811   if (OBJECT_CONFLICT_VEC_P (obj))
812     {
813       OBJECT_NUM_CONFLICTS (obj) = 0;
814       OBJECT_CONFLICT_VEC (obj)[0] = NULL;
815     }
816   else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
817     {
818       int nw;
819 
820       nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
821       memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
822     }
823 }
824 
825 /* The array used to find duplications in conflict vectors of
826    allocnos.  */
827 static int *conflict_check;
828 
829 /* The value used to mark allocation presence in conflict vector of
830    the current allocno.  */
831 static int curr_conflict_check_tick;
832 
833 /* Remove duplications in conflict vector of OBJ.  */
834 static void
835 compress_conflict_vec (ira_object_t obj)
836 {
837   ira_object_t *vec, conflict_obj;
838   int i, j;
839 
840   ira_assert (OBJECT_CONFLICT_VEC_P (obj));
841   vec = OBJECT_CONFLICT_VEC (obj);
842   curr_conflict_check_tick++;
843   for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
844     {
845       int id = OBJECT_CONFLICT_ID (conflict_obj);
846       if (conflict_check[id] != curr_conflict_check_tick)
847 	{
848 	  conflict_check[id] = curr_conflict_check_tick;
849 	  vec[j++] = conflict_obj;
850 	}
851     }
852   OBJECT_NUM_CONFLICTS (obj) = j;
853   vec[j] = NULL;
854 }
855 
856 /* Remove duplications in conflict vectors of all allocnos.  */
857 static void
858 compress_conflict_vecs (void)
859 {
860   ira_object_t obj;
861   ira_object_iterator oi;
862 
863   conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
864   memset (conflict_check, 0, sizeof (int) * ira_objects_num);
865   curr_conflict_check_tick = 0;
866   FOR_EACH_OBJECT (obj, oi)
867     {
868       if (OBJECT_CONFLICT_VEC_P (obj))
869 	compress_conflict_vec (obj);
870     }
871   ira_free (conflict_check);
872 }
873 
874 /* This recursive function outputs allocno A and if it is a cap the
875    function outputs its members.  */
876 void
877 ira_print_expanded_allocno (ira_allocno_t a)
878 {
879   basic_block bb;
880 
881   fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
882   if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
883     fprintf (ira_dump_file, ",b%d", bb->index);
884   else
885     fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
886   if (ALLOCNO_CAP_MEMBER (a) != NULL)
887     {
888       fprintf (ira_dump_file, ":");
889       ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
890     }
891   fprintf (ira_dump_file, ")");
892 }
893 
894 /* Create and return the cap representing allocno A in the
895    parent loop.  */
896 static ira_allocno_t
897 create_cap_allocno (ira_allocno_t a)
898 {
899   ira_allocno_t cap;
900   ira_loop_tree_node_t parent;
901   enum reg_class aclass;
902 
903   parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
904   cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
905   ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
906   ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
907   aclass = ALLOCNO_CLASS (a);
908   ira_set_allocno_class (cap, aclass);
909   ira_create_allocno_objects (cap);
910   ALLOCNO_CAP_MEMBER (cap) = a;
911   ALLOCNO_CAP (a) = cap;
912   ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
913   ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
914   ira_allocate_and_copy_costs
915     (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
916   ira_allocate_and_copy_costs
917     (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
918      ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
919   ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
920   ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
921   ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
922   ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
923 
924   merge_hard_reg_conflicts (a, cap, false);
925 
926   ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
927   ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
928   IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap),
929 		    ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
930   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
931     {
932       fprintf (ira_dump_file, "    Creating cap ");
933       ira_print_expanded_allocno (cap);
934       fprintf (ira_dump_file, "\n");
935     }
936   return cap;
937 }
938 
939 /* Create and return a live range for OBJECT with given attributes.  */
940 live_range_t
941 ira_create_live_range (ira_object_t obj, int start, int finish,
942 		       live_range_t next)
943 {
944   live_range_t p;
945 
946   p = (live_range_t) pool_alloc (live_range_pool);
947   p->object = obj;
948   p->start = start;
949   p->finish = finish;
950   p->next = next;
951   return p;
952 }
953 
954 /* Create a new live range for OBJECT and queue it at the head of its
955    live range list.  */
956 void
957 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
958 {
959   live_range_t p;
960   p = ira_create_live_range (object, start, finish,
961 			     OBJECT_LIVE_RANGES (object));
962   OBJECT_LIVE_RANGES (object) = p;
963 }
964 
965 /* Copy allocno live range R and return the result.  */
966 static live_range_t
967 copy_live_range (live_range_t r)
968 {
969   live_range_t p;
970 
971   p = (live_range_t) pool_alloc (live_range_pool);
972   *p = *r;
973   return p;
974 }
975 
976 /* Copy allocno live range list given by its head R and return the
977    result.  */
978 live_range_t
979 ira_copy_live_range_list (live_range_t r)
980 {
981   live_range_t p, first, last;
982 
983   if (r == NULL)
984     return NULL;
985   for (first = last = NULL; r != NULL; r = r->next)
986     {
987       p = copy_live_range (r);
988       if (first == NULL)
989 	first = p;
990       else
991 	last->next = p;
992       last = p;
993     }
994   return first;
995 }
996 
997 /* Merge ranges R1 and R2 and returns the result.  The function
998    maintains the order of ranges and tries to minimize number of the
999    result ranges.  */
1000 live_range_t
1001 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
1002 {
1003   live_range_t first, last, temp;
1004 
1005   if (r1 == NULL)
1006     return r2;
1007   if (r2 == NULL)
1008     return r1;
1009   for (first = last = NULL; r1 != NULL && r2 != NULL;)
1010     {
1011       if (r1->start < r2->start)
1012 	{
1013 	  temp = r1;
1014 	  r1 = r2;
1015 	  r2 = temp;
1016 	}
1017       if (r1->start <= r2->finish + 1)
1018 	{
1019 	  /* Intersected ranges: merge r1 and r2 into r1.  */
1020 	  r1->start = r2->start;
1021 	  if (r1->finish < r2->finish)
1022 	    r1->finish = r2->finish;
1023 	  temp = r2;
1024 	  r2 = r2->next;
1025 	  ira_finish_live_range (temp);
1026 	  if (r2 == NULL)
1027 	    {
1028 	      /* To try to merge with subsequent ranges in r1.  */
1029 	      r2 = r1->next;
1030 	      r1->next = NULL;
1031 	    }
1032 	}
1033       else
1034 	{
1035 	  /* Add r1 to the result.  */
1036 	  if (first == NULL)
1037 	    first = last = r1;
1038 	  else
1039 	    {
1040 	      last->next = r1;
1041 	      last = r1;
1042 	    }
1043 	  r1 = r1->next;
1044 	  if (r1 == NULL)
1045 	    {
1046 	      /* To try to merge with subsequent ranges in r2.  */
1047 	      r1 = r2->next;
1048 	      r2->next = NULL;
1049 	    }
1050 	}
1051     }
1052   if (r1 != NULL)
1053     {
1054       if (first == NULL)
1055 	first = r1;
1056       else
1057 	last->next = r1;
1058       ira_assert (r1->next == NULL);
1059     }
1060   else if (r2 != NULL)
1061     {
1062       if (first == NULL)
1063 	first = r2;
1064       else
1065 	last->next = r2;
1066       ira_assert (r2->next == NULL);
1067     }
1068   else
1069     {
1070       ira_assert (last->next == NULL);
1071     }
1072   return first;
1073 }
1074 
1075 /* Return TRUE if live ranges R1 and R2 intersect.  */
1076 bool
1077 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1078 {
1079   /* Remember the live ranges are always kept ordered.  */
1080   while (r1 != NULL && r2 != NULL)
1081     {
1082       if (r1->start > r2->finish)
1083 	r1 = r1->next;
1084       else if (r2->start > r1->finish)
1085 	r2 = r2->next;
1086       else
1087 	return true;
1088     }
1089   return false;
1090 }
1091 
1092 /* Free allocno live range R.  */
1093 void
1094 ira_finish_live_range (live_range_t r)
1095 {
1096   pool_free (live_range_pool, r);
1097 }
1098 
1099 /* Free list of allocno live ranges starting with R.  */
1100 void
1101 ira_finish_live_range_list (live_range_t r)
1102 {
1103   live_range_t next_r;
1104 
1105   for (; r != NULL; r = next_r)
1106     {
1107       next_r = r->next;
1108       ira_finish_live_range (r);
1109     }
1110 }
1111 
1112 /* Free updated register costs of allocno A.  */
1113 void
1114 ira_free_allocno_updated_costs (ira_allocno_t a)
1115 {
1116   enum reg_class aclass;
1117 
1118   aclass = ALLOCNO_CLASS (a);
1119   if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1120     ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1121   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1122   if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1123     ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1124 			  aclass);
1125   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1126 }
1127 
1128 /* Free and nullify all cost vectors allocated earlier for allocno
1129    A.  */
1130 static void
1131 ira_free_allocno_costs (ira_allocno_t a)
1132 {
1133   enum reg_class aclass = ALLOCNO_CLASS (a);
1134   ira_object_t obj;
1135   ira_allocno_object_iterator oi;
1136 
1137   FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1138     {
1139       ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1140       ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1141       if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1142 	ira_free (OBJECT_CONFLICT_ARRAY (obj));
1143       pool_free (object_pool, obj);
1144     }
1145 
1146   ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1147   if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1148     ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1149   if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1150     ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1151   if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1152     ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1153   if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1154     ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1155 			  aclass);
1156   ALLOCNO_HARD_REG_COSTS (a) = NULL;
1157   ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1158   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1159   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1160 }
1161 
1162 /* Free the memory allocated for allocno A.  */
1163 static void
1164 finish_allocno (ira_allocno_t a)
1165 {
1166   ira_free_allocno_costs (a);
1167   pool_free (allocno_pool, a);
1168 }
1169 
1170 /* Free the memory allocated for all allocnos.  */
1171 static void
1172 finish_allocnos (void)
1173 {
1174   ira_allocno_t a;
1175   ira_allocno_iterator ai;
1176 
1177   FOR_EACH_ALLOCNO (a, ai)
1178     finish_allocno (a);
1179   ira_free (ira_regno_allocno_map);
1180   ira_object_id_map_vec.release ();
1181   allocno_vec.release ();
1182   free_alloc_pool (allocno_pool);
1183   free_alloc_pool (object_pool);
1184   free_alloc_pool (live_range_pool);
1185 }
1186 
1187 
1188 
1189 /* Pools for allocno preferences.  */
1190 static alloc_pool pref_pool;
1191 
1192 /* Vec containing references to all created preferences.  It is a
1193    container of array ira_prefs.  */
1194 static vec<ira_pref_t> pref_vec;
1195 
1196 /* The function initializes data concerning allocno prefs.  */
1197 static void
1198 initiate_prefs (void)
1199 {
1200   pref_pool
1201     = create_alloc_pool ("prefs", sizeof (struct ira_allocno_pref), 100);
1202   pref_vec.create (get_max_uid ());
1203   ira_prefs = NULL;
1204   ira_prefs_num = 0;
1205 }
1206 
1207 /* Return pref for A and HARD_REGNO if any.  */
1208 static ira_pref_t
1209 find_allocno_pref (ira_allocno_t a, int hard_regno)
1210 {
1211   ira_pref_t pref;
1212 
1213   for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1214     if (pref->allocno == a && pref->hard_regno == hard_regno)
1215       return pref;
1216   return NULL;
1217 }
1218 
1219 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ.  */
1220 ira_pref_t
1221 ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
1222 {
1223   ira_pref_t pref;
1224 
1225   pref = (ira_pref_t) pool_alloc (pref_pool);
1226   pref->num = ira_prefs_num;
1227   pref->allocno = a;
1228   pref->hard_regno = hard_regno;
1229   pref->freq = freq;
1230   pref_vec.safe_push (pref);
1231   ira_prefs = pref_vec.address ();
1232   ira_prefs_num = pref_vec.length ();
1233   return pref;
1234 }
1235 
1236 /* Attach a pref PREF to the corresponding allocno.  */
1237 static void
1238 add_allocno_pref_to_list (ira_pref_t pref)
1239 {
1240   ira_allocno_t a = pref->allocno;
1241 
1242   pref->next_pref = ALLOCNO_PREFS (a);
1243   ALLOCNO_PREFS (a) = pref;
1244 }
1245 
1246 /* Create (or update frequency if the pref already exists) the pref of
1247    allocnos A preferring HARD_REGNO with frequency FREQ.  */
1248 void
1249 ira_add_allocno_pref (ira_allocno_t a, int hard_regno, int freq)
1250 {
1251   ira_pref_t pref;
1252 
1253   if (freq <= 0)
1254     return;
1255   if ((pref = find_allocno_pref (a, hard_regno)) != NULL)
1256     {
1257       pref->freq += freq;
1258       return;
1259     }
1260   pref = ira_create_pref (a, hard_regno, freq);
1261   ira_assert (a != NULL);
1262   add_allocno_pref_to_list (pref);
1263 }
1264 
1265 /* Print info about PREF into file F.  */
1266 static void
1267 print_pref (FILE *f, ira_pref_t pref)
1268 {
1269   fprintf (f, "  pref%d:a%d(r%d)<-hr%d@%d\n", pref->num,
1270 	   ALLOCNO_NUM (pref->allocno), ALLOCNO_REGNO (pref->allocno),
1271 	   pref->hard_regno, pref->freq);
1272 }
1273 
1274 /* Print info about PREF into stderr.  */
1275 void
1276 ira_debug_pref (ira_pref_t pref)
1277 {
1278   print_pref (stderr, pref);
1279 }
1280 
1281 /* Print info about all prefs into file F.  */
1282 static void
1283 print_prefs (FILE *f)
1284 {
1285   ira_pref_t pref;
1286   ira_pref_iterator pi;
1287 
1288   FOR_EACH_PREF (pref, pi)
1289     print_pref (f, pref);
1290 }
1291 
1292 /* Print info about all prefs into stderr.  */
1293 void
1294 ira_debug_prefs (void)
1295 {
1296   print_prefs (stderr);
1297 }
1298 
1299 /* Print info about prefs involving allocno A into file F.  */
1300 static void
1301 print_allocno_prefs (FILE *f, ira_allocno_t a)
1302 {
1303   ira_pref_t pref;
1304 
1305   fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1306   for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1307     fprintf (f, " pref%d:hr%d@%d", pref->num, pref->hard_regno, pref->freq);
1308   fprintf (f, "\n");
1309 }
1310 
1311 /* Print info about prefs involving allocno A into stderr.  */
1312 void
1313 ira_debug_allocno_prefs (ira_allocno_t a)
1314 {
1315   print_allocno_prefs (stderr, a);
1316 }
1317 
1318 /* The function frees memory allocated for PREF.  */
1319 static void
1320 finish_pref (ira_pref_t pref)
1321 {
1322   ira_prefs[pref->num] = NULL;
1323   pool_free (pref_pool, pref);
1324 }
1325 
1326 /* Remove PREF from the list of allocno prefs and free memory for
1327    it.  */
1328 void
1329 ira_remove_pref (ira_pref_t pref)
1330 {
1331   ira_pref_t cpref, prev;
1332 
1333   if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1334     fprintf (ira_dump_file, " Removing pref%d:hr%d@%d\n",
1335 	     pref->num, pref->hard_regno, pref->freq);
1336   for (prev = NULL, cpref = ALLOCNO_PREFS (pref->allocno);
1337        cpref != NULL;
1338        prev = cpref, cpref = cpref->next_pref)
1339     if (cpref == pref)
1340       break;
1341   ira_assert (cpref != NULL);
1342   if (prev == NULL)
1343     ALLOCNO_PREFS (pref->allocno) = pref->next_pref;
1344   else
1345     prev->next_pref = pref->next_pref;
1346   finish_pref (pref);
1347 }
1348 
1349 /* Remove all prefs of allocno A.  */
1350 void
1351 ira_remove_allocno_prefs (ira_allocno_t a)
1352 {
1353   ira_pref_t pref, next_pref;
1354 
1355   for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
1356     {
1357       next_pref = pref->next_pref;
1358       finish_pref (pref);
1359     }
1360   ALLOCNO_PREFS (a) = NULL;
1361 }
1362 
1363 /* Free memory allocated for all prefs.  */
1364 static void
1365 finish_prefs (void)
1366 {
1367   ira_pref_t pref;
1368   ira_pref_iterator pi;
1369 
1370   FOR_EACH_PREF (pref, pi)
1371     finish_pref (pref);
1372   pref_vec.release ();
1373   free_alloc_pool (pref_pool);
1374 }
1375 
1376 
1377 
1378 /* Pools for copies.  */
1379 static alloc_pool copy_pool;
1380 
1381 /* Vec containing references to all created copies.  It is a
1382    container of array ira_copies.  */
1383 static vec<ira_copy_t> copy_vec;
1384 
1385 /* The function initializes data concerning allocno copies.  */
1386 static void
1387 initiate_copies (void)
1388 {
1389   copy_pool
1390     = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
1391   copy_vec.create (get_max_uid ());
1392   ira_copies = NULL;
1393   ira_copies_num = 0;
1394 }
1395 
1396 /* Return copy connecting A1 and A2 and originated from INSN of
1397    LOOP_TREE_NODE if any.  */
1398 static ira_copy_t
1399 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx_insn *insn,
1400 		   ira_loop_tree_node_t loop_tree_node)
1401 {
1402   ira_copy_t cp, next_cp;
1403   ira_allocno_t another_a;
1404 
1405   for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1406     {
1407       if (cp->first == a1)
1408 	{
1409 	  next_cp = cp->next_first_allocno_copy;
1410 	  another_a = cp->second;
1411 	}
1412       else if (cp->second == a1)
1413 	{
1414 	  next_cp = cp->next_second_allocno_copy;
1415 	  another_a = cp->first;
1416 	}
1417       else
1418 	gcc_unreachable ();
1419       if (another_a == a2 && cp->insn == insn
1420 	  && cp->loop_tree_node == loop_tree_node)
1421 	return cp;
1422     }
1423   return NULL;
1424 }
1425 
1426 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1427    SECOND, FREQ, CONSTRAINT_P, and INSN.  */
1428 ira_copy_t
1429 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1430 		 bool constraint_p, rtx_insn *insn,
1431 		 ira_loop_tree_node_t loop_tree_node)
1432 {
1433   ira_copy_t cp;
1434 
1435   cp = (ira_copy_t) pool_alloc (copy_pool);
1436   cp->num = ira_copies_num;
1437   cp->first = first;
1438   cp->second = second;
1439   cp->freq = freq;
1440   cp->constraint_p = constraint_p;
1441   cp->insn = insn;
1442   cp->loop_tree_node = loop_tree_node;
1443   copy_vec.safe_push (cp);
1444   ira_copies = copy_vec.address ();
1445   ira_copies_num = copy_vec.length ();
1446   return cp;
1447 }
1448 
1449 /* Attach a copy CP to allocnos involved into the copy.  */
1450 static void
1451 add_allocno_copy_to_list (ira_copy_t cp)
1452 {
1453   ira_allocno_t first = cp->first, second = cp->second;
1454 
1455   cp->prev_first_allocno_copy = NULL;
1456   cp->prev_second_allocno_copy = NULL;
1457   cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1458   if (cp->next_first_allocno_copy != NULL)
1459     {
1460       if (cp->next_first_allocno_copy->first == first)
1461 	cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1462       else
1463 	cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1464     }
1465   cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1466   if (cp->next_second_allocno_copy != NULL)
1467     {
1468       if (cp->next_second_allocno_copy->second == second)
1469 	cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1470       else
1471 	cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1472     }
1473   ALLOCNO_COPIES (first) = cp;
1474   ALLOCNO_COPIES (second) = cp;
1475 }
1476 
1477 /* Make a copy CP a canonical copy where number of the
1478    first allocno is less than the second one.  */
1479 static void
1480 swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1481 {
1482   ira_allocno_t temp;
1483   ira_copy_t temp_cp;
1484 
1485   if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1486     return;
1487 
1488   temp = cp->first;
1489   cp->first = cp->second;
1490   cp->second = temp;
1491 
1492   temp_cp = cp->prev_first_allocno_copy;
1493   cp->prev_first_allocno_copy = cp->prev_second_allocno_copy;
1494   cp->prev_second_allocno_copy = temp_cp;
1495 
1496   temp_cp = cp->next_first_allocno_copy;
1497   cp->next_first_allocno_copy = cp->next_second_allocno_copy;
1498   cp->next_second_allocno_copy = temp_cp;
1499 }
1500 
1501 /* Create (or update frequency if the copy already exists) and return
1502    the copy of allocnos FIRST and SECOND with frequency FREQ
1503    corresponding to move insn INSN (if any) and originated from
1504    LOOP_TREE_NODE.  */
1505 ira_copy_t
1506 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1507 		      bool constraint_p, rtx_insn *insn,
1508 		      ira_loop_tree_node_t loop_tree_node)
1509 {
1510   ira_copy_t cp;
1511 
1512   if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1513     {
1514       cp->freq += freq;
1515       return cp;
1516     }
1517   cp = ira_create_copy (first, second, freq, constraint_p, insn,
1518 			loop_tree_node);
1519   ira_assert (first != NULL && second != NULL);
1520   add_allocno_copy_to_list (cp);
1521   swap_allocno_copy_ends_if_necessary (cp);
1522   return cp;
1523 }
1524 
1525 /* Print info about copy CP into file F.  */
1526 static void
1527 print_copy (FILE *f, ira_copy_t cp)
1528 {
1529   fprintf (f, "  cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1530 	   ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1531 	   ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1532 	   cp->insn != NULL
1533 	   ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1534 }
1535 
1536 DEBUG_FUNCTION void
1537 debug (ira_allocno_copy &ref)
1538 {
1539   print_copy (stderr, &ref);
1540 }
1541 
1542 DEBUG_FUNCTION void
1543 debug (ira_allocno_copy *ptr)
1544 {
1545   if (ptr)
1546     debug (*ptr);
1547   else
1548     fprintf (stderr, "<nil>\n");
1549 }
1550 
1551 /* Print info about copy CP into stderr.  */
1552 void
1553 ira_debug_copy (ira_copy_t cp)
1554 {
1555   print_copy (stderr, cp);
1556 }
1557 
1558 /* Print info about all copies into file F.  */
1559 static void
1560 print_copies (FILE *f)
1561 {
1562   ira_copy_t cp;
1563   ira_copy_iterator ci;
1564 
1565   FOR_EACH_COPY (cp, ci)
1566     print_copy (f, cp);
1567 }
1568 
1569 /* Print info about all copies into stderr.  */
1570 void
1571 ira_debug_copies (void)
1572 {
1573   print_copies (stderr);
1574 }
1575 
1576 /* Print info about copies involving allocno A into file F.  */
1577 static void
1578 print_allocno_copies (FILE *f, ira_allocno_t a)
1579 {
1580   ira_allocno_t another_a;
1581   ira_copy_t cp, next_cp;
1582 
1583   fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1584   for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1585     {
1586       if (cp->first == a)
1587 	{
1588 	  next_cp = cp->next_first_allocno_copy;
1589 	  another_a = cp->second;
1590 	}
1591       else if (cp->second == a)
1592 	{
1593 	  next_cp = cp->next_second_allocno_copy;
1594 	  another_a = cp->first;
1595 	}
1596       else
1597 	gcc_unreachable ();
1598       fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1599 	       ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1600     }
1601   fprintf (f, "\n");
1602 }
1603 
1604 DEBUG_FUNCTION void
1605 debug (ira_allocno &ref)
1606 {
1607   print_allocno_copies (stderr, &ref);
1608 }
1609 
1610 DEBUG_FUNCTION void
1611 debug (ira_allocno *ptr)
1612 {
1613   if (ptr)
1614     debug (*ptr);
1615   else
1616     fprintf (stderr, "<nil>\n");
1617 }
1618 
1619 
1620 /* Print info about copies involving allocno A into stderr.  */
1621 void
1622 ira_debug_allocno_copies (ira_allocno_t a)
1623 {
1624   print_allocno_copies (stderr, a);
1625 }
1626 
1627 /* The function frees memory allocated for copy CP.  */
1628 static void
1629 finish_copy (ira_copy_t cp)
1630 {
1631   pool_free (copy_pool, cp);
1632 }
1633 
1634 
1635 /* Free memory allocated for all copies.  */
1636 static void
1637 finish_copies (void)
1638 {
1639   ira_copy_t cp;
1640   ira_copy_iterator ci;
1641 
1642   FOR_EACH_COPY (cp, ci)
1643     finish_copy (cp);
1644   copy_vec.release ();
1645   free_alloc_pool (copy_pool);
1646 }
1647 
1648 
1649 
1650 /* Pools for cost vectors.  It is defined only for allocno classes.  */
1651 static alloc_pool cost_vector_pool[N_REG_CLASSES];
1652 
1653 /* The function initiates work with hard register cost vectors.  It
1654    creates allocation pool for each allocno class.  */
1655 static void
1656 initiate_cost_vectors (void)
1657 {
1658   int i;
1659   enum reg_class aclass;
1660 
1661   for (i = 0; i < ira_allocno_classes_num; i++)
1662     {
1663       aclass = ira_allocno_classes[i];
1664       cost_vector_pool[aclass]
1665 	= create_alloc_pool ("cost vectors",
1666 			     sizeof (int) * ira_class_hard_regs_num[aclass],
1667 			     100);
1668     }
1669 }
1670 
1671 /* Allocate and return a cost vector VEC for ACLASS.  */
1672 int *
1673 ira_allocate_cost_vector (reg_class_t aclass)
1674 {
1675   return (int *) pool_alloc (cost_vector_pool[(int) aclass]);
1676 }
1677 
1678 /* Free a cost vector VEC for ACLASS.  */
1679 void
1680 ira_free_cost_vector (int *vec, reg_class_t aclass)
1681 {
1682   ira_assert (vec != NULL);
1683   pool_free (cost_vector_pool[(int) aclass], vec);
1684 }
1685 
1686 /* Finish work with hard register cost vectors.  Release allocation
1687    pool for each allocno class.  */
1688 static void
1689 finish_cost_vectors (void)
1690 {
1691   int i;
1692   enum reg_class aclass;
1693 
1694   for (i = 0; i < ira_allocno_classes_num; i++)
1695     {
1696       aclass = ira_allocno_classes[i];
1697       free_alloc_pool (cost_vector_pool[aclass]);
1698     }
1699 }
1700 
1701 
1702 
1703 /* Compute a post-ordering of the reverse control flow of the loop body
1704    designated by the children nodes of LOOP_NODE, whose body nodes in
1705    pre-order are input as LOOP_PREORDER.  Return a VEC with a post-order
1706    of the reverse loop body.
1707 
1708    For the post-order of the reverse CFG, we visit the basic blocks in
1709    LOOP_PREORDER array in the reverse order of where they appear.
1710    This is important: We do not just want to compute a post-order of
1711    the reverse CFG, we want to make a best-guess for a visiting order that
1712    minimizes the number of chain elements per allocno live range.  If the
1713    blocks would be visited in a different order, we would still compute a
1714    correct post-ordering but it would be less likely that two nodes
1715    connected by an edge in the CFG are neighbours in the topsort.  */
1716 
1717 static vec<ira_loop_tree_node_t>
1718 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
1719 				  vec<ira_loop_tree_node_t> loop_preorder)
1720 {
1721   vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
1722   unsigned int n_loop_preorder;
1723 
1724   n_loop_preorder = loop_preorder.length ();
1725   if (n_loop_preorder != 0)
1726     {
1727       ira_loop_tree_node_t subloop_node;
1728       unsigned int i;
1729       auto_vec<ira_loop_tree_node_t> dfs_stack;
1730 
1731       /* This is a bit of strange abuse of the BB_VISITED flag:  We use
1732 	 the flag to mark blocks we still have to visit to add them to
1733 	 our post-order.  Define an alias to avoid confusion.  */
1734 #define BB_TO_VISIT BB_VISITED
1735 
1736       FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1737 	{
1738 	  gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
1739 	  subloop_node->bb->flags |= BB_TO_VISIT;
1740 	}
1741 
1742       topsort_nodes.create (n_loop_preorder);
1743       dfs_stack.create (n_loop_preorder);
1744 
1745       FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
1746 	{
1747 	  if (! (subloop_node->bb->flags & BB_TO_VISIT))
1748 	    continue;
1749 
1750 	  subloop_node->bb->flags &= ~BB_TO_VISIT;
1751 	  dfs_stack.quick_push (subloop_node);
1752 	  while (! dfs_stack.is_empty ())
1753 	    {
1754 	      edge e;
1755 	      edge_iterator ei;
1756 
1757 	      ira_loop_tree_node_t n = dfs_stack.last ();
1758 	      FOR_EACH_EDGE (e, ei, n->bb->preds)
1759 		{
1760 		  ira_loop_tree_node_t pred_node;
1761 		  basic_block pred_bb = e->src;
1762 
1763 		  if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1764 		    continue;
1765 
1766 		  pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
1767 		  if (pred_node != n
1768 		      && (pred_node->bb->flags & BB_TO_VISIT))
1769 		    {
1770 		      pred_node->bb->flags &= ~BB_TO_VISIT;
1771 		      dfs_stack.quick_push (pred_node);
1772 		    }
1773 		}
1774 	      if (n == dfs_stack.last ())
1775 		{
1776 		  dfs_stack.pop ();
1777 		  topsort_nodes.quick_push (n);
1778 		}
1779 	    }
1780 	}
1781 
1782 #undef BB_TO_VISIT
1783     }
1784 
1785   gcc_assert (topsort_nodes.length () == n_loop_preorder);
1786   return topsort_nodes;
1787 }
1788 
1789 /* The current loop tree node and its regno allocno map.  */
1790 ira_loop_tree_node_t ira_curr_loop_tree_node;
1791 ira_allocno_t *ira_curr_regno_allocno_map;
1792 
1793 /* This recursive function traverses loop tree with root LOOP_NODE
1794    calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1795    correspondingly in preorder and postorder.  The function sets up
1796    IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP.  If BB_P,
1797    basic block nodes of LOOP_NODE is also processed (before its
1798    subloop nodes).
1799 
1800    If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1801    the loop are passed in the *reverse* post-order of the *reverse*
1802    CFG.  This is only used by ira_create_allocno_live_ranges, which
1803    wants to visit basic blocks in this order to minimize the number
1804    of elements per live range chain.
1805    Note that the loop tree nodes are still visited in the normal,
1806    forward post-order of  the loop tree.  */
1807 
1808 void
1809 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1810 			void (*preorder_func) (ira_loop_tree_node_t),
1811 			void (*postorder_func) (ira_loop_tree_node_t))
1812 {
1813   ira_loop_tree_node_t subloop_node;
1814 
1815   ira_assert (loop_node->bb == NULL);
1816   ira_curr_loop_tree_node = loop_node;
1817   ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1818 
1819   if (preorder_func != NULL)
1820     (*preorder_func) (loop_node);
1821 
1822   if (bb_p)
1823     {
1824       auto_vec<ira_loop_tree_node_t> loop_preorder;
1825       unsigned int i;
1826 
1827       /* Add all nodes to the set of nodes to visit.  The IRA loop tree
1828 	 is set up such that nodes in the loop body appear in a pre-order
1829 	 of their place in the CFG.  */
1830       for (subloop_node = loop_node->children;
1831 	   subloop_node != NULL;
1832 	   subloop_node = subloop_node->next)
1833 	if (subloop_node->bb != NULL)
1834 	  loop_preorder.safe_push (subloop_node);
1835 
1836       if (preorder_func != NULL)
1837 	FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1838 	  (*preorder_func) (subloop_node);
1839 
1840       if (postorder_func != NULL)
1841 	{
1842 	  vec<ira_loop_tree_node_t> loop_rev_postorder =
1843 	    ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
1844 	  FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
1845 	    (*postorder_func) (subloop_node);
1846 	  loop_rev_postorder.release ();
1847 	}
1848     }
1849 
1850   for (subloop_node = loop_node->subloops;
1851        subloop_node != NULL;
1852        subloop_node = subloop_node->subloop_next)
1853     {
1854       ira_assert (subloop_node->bb == NULL);
1855       ira_traverse_loop_tree (bb_p, subloop_node,
1856 			      preorder_func, postorder_func);
1857     }
1858 
1859   ira_curr_loop_tree_node = loop_node;
1860   ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1861 
1862   if (postorder_func != NULL)
1863     (*postorder_func) (loop_node);
1864 }
1865 
1866 
1867 
1868 /* The basic block currently being processed.  */
1869 static basic_block curr_bb;
1870 
1871 /* This recursive function creates allocnos corresponding to
1872    pseudo-registers containing in X.  True OUTPUT_P means that X is
1873    an lvalue.  PARENT corresponds to the parent expression of X.  */
1874 static void
1875 create_insn_allocnos (rtx x, rtx outer, bool output_p)
1876 {
1877   int i, j;
1878   const char *fmt;
1879   enum rtx_code code = GET_CODE (x);
1880 
1881   if (code == REG)
1882     {
1883       int regno;
1884 
1885       if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1886 	{
1887 	  ira_allocno_t a;
1888 
1889 	  if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1890 	    {
1891 	      a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1892 	      if (outer != NULL && GET_CODE (outer) == SUBREG)
1893 		{
1894 		  machine_mode wmode = GET_MODE (outer);
1895 		  if (GET_MODE_SIZE (wmode) > GET_MODE_SIZE (ALLOCNO_WMODE (a)))
1896 		    ALLOCNO_WMODE (a) = wmode;
1897 		}
1898 	    }
1899 
1900 	  ALLOCNO_NREFS (a)++;
1901 	  ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1902 	  if (output_p)
1903 	    bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1904 	}
1905       return;
1906     }
1907   else if (code == SET)
1908     {
1909       create_insn_allocnos (SET_DEST (x), NULL, true);
1910       create_insn_allocnos (SET_SRC (x), NULL, false);
1911       return;
1912     }
1913   else if (code == CLOBBER)
1914     {
1915       create_insn_allocnos (XEXP (x, 0), NULL, true);
1916       return;
1917     }
1918   else if (code == MEM)
1919     {
1920       create_insn_allocnos (XEXP (x, 0), NULL, false);
1921       return;
1922     }
1923   else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1924 	   code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1925     {
1926       create_insn_allocnos (XEXP (x, 0), NULL, true);
1927       create_insn_allocnos (XEXP (x, 0), NULL, false);
1928       return;
1929     }
1930 
1931   fmt = GET_RTX_FORMAT (code);
1932   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1933     {
1934       if (fmt[i] == 'e')
1935 	create_insn_allocnos (XEXP (x, i), x, output_p);
1936       else if (fmt[i] == 'E')
1937 	for (j = 0; j < XVECLEN (x, i); j++)
1938 	  create_insn_allocnos (XVECEXP (x, i, j), x, output_p);
1939     }
1940 }
1941 
1942 /* Create allocnos corresponding to pseudo-registers living in the
1943    basic block represented by the corresponding loop tree node
1944    BB_NODE.  */
1945 static void
1946 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1947 {
1948   basic_block bb;
1949   rtx_insn *insn;
1950   unsigned int i;
1951   bitmap_iterator bi;
1952 
1953   curr_bb = bb = bb_node->bb;
1954   ira_assert (bb != NULL);
1955   FOR_BB_INSNS_REVERSE (bb, insn)
1956     if (NONDEBUG_INSN_P (insn))
1957       create_insn_allocnos (PATTERN (insn), NULL, false);
1958   /* It might be a allocno living through from one subloop to
1959      another.  */
1960   EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
1961     if (ira_curr_regno_allocno_map[i] == NULL)
1962       ira_create_allocno (i, false, ira_curr_loop_tree_node);
1963 }
1964 
1965 /* Create allocnos corresponding to pseudo-registers living on edge E
1966    (a loop entry or exit).  Also mark the allocnos as living on the
1967    loop border. */
1968 static void
1969 create_loop_allocnos (edge e)
1970 {
1971   unsigned int i;
1972   bitmap live_in_regs, border_allocnos;
1973   bitmap_iterator bi;
1974   ira_loop_tree_node_t parent;
1975 
1976   live_in_regs = df_get_live_in (e->dest);
1977   border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1978   EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e->src),
1979 			     FIRST_PSEUDO_REGISTER, i, bi)
1980     if (bitmap_bit_p (live_in_regs, i))
1981       {
1982 	if (ira_curr_regno_allocno_map[i] == NULL)
1983 	  {
1984 	    /* The order of creations is important for right
1985 	       ira_regno_allocno_map.  */
1986 	    if ((parent = ira_curr_loop_tree_node->parent) != NULL
1987 		&& parent->regno_allocno_map[i] == NULL)
1988 	      ira_create_allocno (i, false, parent);
1989 	    ira_create_allocno (i, false, ira_curr_loop_tree_node);
1990 	  }
1991 	bitmap_set_bit (border_allocnos,
1992 			ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1993       }
1994 }
1995 
1996 /* Create allocnos corresponding to pseudo-registers living in loop
1997    represented by the corresponding loop tree node LOOP_NODE.  This
1998    function is called by ira_traverse_loop_tree.  */
1999 static void
2000 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
2001 {
2002   if (loop_node->bb != NULL)
2003     create_bb_allocnos (loop_node);
2004   else if (loop_node != ira_loop_tree_root)
2005     {
2006       int i;
2007       edge_iterator ei;
2008       edge e;
2009       vec<edge> edges;
2010 
2011       ira_assert (current_loops != NULL);
2012       FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
2013 	if (e->src != loop_node->loop->latch)
2014 	  create_loop_allocnos (e);
2015 
2016       edges = get_loop_exit_edges (loop_node->loop);
2017       FOR_EACH_VEC_ELT (edges, i, e)
2018 	create_loop_allocnos (e);
2019       edges.release ();
2020     }
2021 }
2022 
2023 /* Propagate information about allocnos modified inside the loop given
2024    by its LOOP_TREE_NODE to its parent.  */
2025 static void
2026 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
2027 {
2028   if (loop_tree_node == ira_loop_tree_root)
2029     return;
2030   ira_assert (loop_tree_node->bb == NULL);
2031   bitmap_ior_into (loop_tree_node->parent->modified_regnos,
2032 		   loop_tree_node->modified_regnos);
2033 }
2034 
2035 /* Propagate new info about allocno A (see comments about accumulated
2036    info in allocno definition) to the corresponding allocno on upper
2037    loop tree level.  So allocnos on upper levels accumulate
2038    information about the corresponding allocnos in nested regions.
2039    The new info means allocno info finally calculated in this
2040    file.  */
2041 static void
2042 propagate_allocno_info (void)
2043 {
2044   int i;
2045   ira_allocno_t a, parent_a;
2046   ira_loop_tree_node_t parent;
2047   enum reg_class aclass;
2048 
2049   if (flag_ira_region != IRA_REGION_ALL
2050       && flag_ira_region != IRA_REGION_MIXED)
2051     return;
2052   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2053     for (a = ira_regno_allocno_map[i];
2054 	 a != NULL;
2055 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2056       if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
2057 	  && (parent_a = parent->regno_allocno_map[i]) != NULL
2058 	  /* There are no caps yet at this point.  So use
2059 	     border_allocnos to find allocnos for the propagation.  */
2060 	  && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
2061 			   ALLOCNO_NUM (a)))
2062 	{
2063 	  if (! ALLOCNO_BAD_SPILL_P (a))
2064 	    ALLOCNO_BAD_SPILL_P (parent_a) = false;
2065 	  ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
2066 	  ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
2067 	  ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2068 	  merge_hard_reg_conflicts (a, parent_a, true);
2069 	  ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2070 	    += ALLOCNO_CALLS_CROSSED_NUM (a);
2071 	  ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2072 	    += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
2073  	  IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
2074  			    ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2075 	  ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2076 	    += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2077 	  aclass = ALLOCNO_CLASS (a);
2078 	  ira_assert (aclass == ALLOCNO_CLASS (parent_a));
2079 	  ira_allocate_and_accumulate_costs
2080 	    (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
2081 	     ALLOCNO_HARD_REG_COSTS (a));
2082 	  ira_allocate_and_accumulate_costs
2083 	    (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
2084 	     aclass,
2085 	     ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
2086 	  ALLOCNO_CLASS_COST (parent_a)
2087 	    += ALLOCNO_CLASS_COST (a);
2088 	  ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
2089 	}
2090 }
2091 
2092 /* Create allocnos corresponding to pseudo-registers in the current
2093    function.  Traverse the loop tree for this.  */
2094 static void
2095 create_allocnos (void)
2096 {
2097   /* We need to process BB first to correctly link allocnos by member
2098      next_regno_allocno.  */
2099   ira_traverse_loop_tree (true, ira_loop_tree_root,
2100 			  create_loop_tree_node_allocnos, NULL);
2101   if (optimize)
2102     ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
2103 			    propagate_modified_regnos);
2104 }
2105 
2106 
2107 
2108 /* The page contains function to remove some regions from a separate
2109    register allocation.  We remove regions whose separate allocation
2110    will hardly improve the result.  As a result we speed up regional
2111    register allocation.  */
2112 
2113 /* The function changes the object in range list given by R to OBJ.  */
2114 static void
2115 change_object_in_range_list (live_range_t r, ira_object_t obj)
2116 {
2117   for (; r != NULL; r = r->next)
2118     r->object = obj;
2119 }
2120 
2121 /* Move all live ranges associated with allocno FROM to allocno TO.  */
2122 static void
2123 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2124 {
2125   int i;
2126   int n = ALLOCNO_NUM_OBJECTS (from);
2127 
2128   gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2129 
2130   for (i = 0; i < n; i++)
2131     {
2132       ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2133       ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2134       live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2135 
2136       if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2137 	{
2138 	  fprintf (ira_dump_file,
2139 		   "      Moving ranges of a%dr%d to a%dr%d: ",
2140 		   ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2141 		   ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2142 	  ira_print_live_range_list (ira_dump_file, lr);
2143 	}
2144       change_object_in_range_list (lr, to_obj);
2145       OBJECT_LIVE_RANGES (to_obj)
2146 	= ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2147       OBJECT_LIVE_RANGES (from_obj) = NULL;
2148     }
2149 }
2150 
2151 static void
2152 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2153 {
2154   int i;
2155   int n = ALLOCNO_NUM_OBJECTS (from);
2156 
2157   gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2158 
2159   for (i = 0; i < n; i++)
2160     {
2161       ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2162       ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2163       live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2164 
2165       if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2166 	{
2167 	  fprintf (ira_dump_file, "      Copying ranges of a%dr%d to a%dr%d: ",
2168 		   ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2169 		   ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2170 	  ira_print_live_range_list (ira_dump_file, lr);
2171 	}
2172       lr = ira_copy_live_range_list (lr);
2173       change_object_in_range_list (lr, to_obj);
2174       OBJECT_LIVE_RANGES (to_obj)
2175 	= ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2176     }
2177 }
2178 
2179 /* Return TRUE if NODE represents a loop with low register
2180    pressure.  */
2181 static bool
2182 low_pressure_loop_node_p (ira_loop_tree_node_t node)
2183 {
2184   int i;
2185   enum reg_class pclass;
2186 
2187   if (node->bb != NULL)
2188     return false;
2189 
2190   for (i = 0; i < ira_pressure_classes_num; i++)
2191     {
2192       pclass = ira_pressure_classes[i];
2193       if (node->reg_pressure[pclass] > ira_class_hard_regs_num[pclass]
2194 	  && ira_class_hard_regs_num[pclass] > 1)
2195 	return false;
2196     }
2197   return true;
2198 }
2199 
2200 #ifdef STACK_REGS
2201 /* Return TRUE if LOOP has a complex enter or exit edge.  We don't
2202    form a region from such loop if the target use stack register
2203    because reg-stack.c can not deal with such edges.  */
2204 static bool
2205 loop_with_complex_edge_p (struct loop *loop)
2206 {
2207   int i;
2208   edge_iterator ei;
2209   edge e;
2210   vec<edge> edges;
2211   bool res;
2212 
2213   FOR_EACH_EDGE (e, ei, loop->header->preds)
2214     if (e->flags & EDGE_EH)
2215       return true;
2216   edges = get_loop_exit_edges (loop);
2217   res = false;
2218   FOR_EACH_VEC_ELT (edges, i, e)
2219     if (e->flags & EDGE_COMPLEX)
2220       {
2221 	res = true;
2222 	break;
2223       }
2224   edges.release ();
2225   return res;
2226 }
2227 #endif
2228 
2229 /* Sort loops for marking them for removal.  We put already marked
2230    loops first, then less frequent loops next, and then outer loops
2231    next.  */
2232 static int
2233 loop_compare_func (const void *v1p, const void *v2p)
2234 {
2235   int diff;
2236   ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
2237   ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
2238 
2239   ira_assert (l1->parent != NULL && l2->parent != NULL);
2240   if (l1->to_remove_p && ! l2->to_remove_p)
2241     return -1;
2242   if (! l1->to_remove_p && l2->to_remove_p)
2243     return 1;
2244   if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
2245     return diff;
2246   if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
2247     return diff;
2248   /* Make sorting stable.  */
2249   return l1->loop_num - l2->loop_num;
2250 }
2251 
2252 /* Mark loops which should be removed from regional allocation.  We
2253    remove a loop with low register pressure inside another loop with
2254    register pressure.  In this case a separate allocation of the loop
2255    hardly helps (for irregular register file architecture it could
2256    help by choosing a better hard register in the loop but we prefer
2257    faster allocation even in this case).  We also remove cheap loops
2258    if there are more than IRA_MAX_LOOPS_NUM of them.  Loop with EH
2259    exit or enter edges are removed too because the allocation might
2260    require put pseudo moves on the EH edges (we could still do this
2261    for pseudos with caller saved hard registers in some cases but it
2262    is impossible to say here or during top-down allocation pass what
2263    hard register the pseudos get finally).  */
2264 static void
2265 mark_loops_for_removal (void)
2266 {
2267   int i, n;
2268   ira_loop_tree_node_t *sorted_loops;
2269   loop_p loop;
2270 
2271   ira_assert (current_loops != NULL);
2272   sorted_loops
2273     = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
2274 					     * number_of_loops (cfun));
2275   for (n = i = 0; vec_safe_iterate (get_loops (cfun), i, &loop); i++)
2276     if (ira_loop_nodes[i].regno_allocno_map != NULL)
2277       {
2278 	if (ira_loop_nodes[i].parent == NULL)
2279 	  {
2280 	    /* Don't remove the root.  */
2281 	    ira_loop_nodes[i].to_remove_p = false;
2282 	    continue;
2283 	  }
2284 	sorted_loops[n++] = &ira_loop_nodes[i];
2285 	ira_loop_nodes[i].to_remove_p
2286 	  = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
2287 	      && low_pressure_loop_node_p (&ira_loop_nodes[i]))
2288 #ifdef STACK_REGS
2289 	     || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
2290 #endif
2291 	     );
2292       }
2293   qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
2294   for (i = 0; i < n - IRA_MAX_LOOPS_NUM; i++)
2295     {
2296       sorted_loops[i]->to_remove_p = true;
2297       if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2298 	fprintf
2299 	  (ira_dump_file,
2300 	   "  Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2301 	   sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
2302 	   sorted_loops[i]->loop->header->frequency,
2303 	   loop_depth (sorted_loops[i]->loop),
2304 	   low_pressure_loop_node_p (sorted_loops[i]->parent)
2305 	   && low_pressure_loop_node_p (sorted_loops[i])
2306 	   ? "low pressure" : "cheap loop");
2307     }
2308   ira_free (sorted_loops);
2309 }
2310 
2311 /* Mark all loops but root for removing.  */
2312 static void
2313 mark_all_loops_for_removal (void)
2314 {
2315   int i;
2316   loop_p loop;
2317 
2318   ira_assert (current_loops != NULL);
2319   FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
2320     if (ira_loop_nodes[i].regno_allocno_map != NULL)
2321       {
2322 	if (ira_loop_nodes[i].parent == NULL)
2323 	  {
2324 	    /* Don't remove the root.  */
2325 	    ira_loop_nodes[i].to_remove_p = false;
2326 	    continue;
2327 	  }
2328 	ira_loop_nodes[i].to_remove_p = true;
2329 	if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2330 	  fprintf
2331 	    (ira_dump_file,
2332 	     "  Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2333 	     ira_loop_nodes[i].loop_num,
2334 	     ira_loop_nodes[i].loop->header->index,
2335 	     ira_loop_nodes[i].loop->header->frequency,
2336 	     loop_depth (ira_loop_nodes[i].loop));
2337       }
2338 }
2339 
2340 /* Definition of vector of loop tree nodes.  */
2341 
2342 /* Vec containing references to all removed loop tree nodes.  */
2343 static vec<ira_loop_tree_node_t> removed_loop_vec;
2344 
2345 /* Vec containing references to all children of loop tree nodes.  */
2346 static vec<ira_loop_tree_node_t> children_vec;
2347 
2348 /* Remove subregions of NODE if their separate allocation will not
2349    improve the result.  */
2350 static void
2351 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
2352 {
2353   unsigned int start;
2354   bool remove_p;
2355   ira_loop_tree_node_t subnode;
2356 
2357   remove_p = node->to_remove_p;
2358   if (! remove_p)
2359     children_vec.safe_push (node);
2360   start = children_vec.length ();
2361   for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2362     if (subnode->bb == NULL)
2363       remove_uneccesary_loop_nodes_from_loop_tree (subnode);
2364     else
2365       children_vec.safe_push (subnode);
2366   node->children = node->subloops = NULL;
2367   if (remove_p)
2368     {
2369       removed_loop_vec.safe_push (node);
2370       return;
2371     }
2372   while (children_vec.length () > start)
2373     {
2374       subnode = children_vec.pop ();
2375       subnode->parent = node;
2376       subnode->next = node->children;
2377       node->children = subnode;
2378       if (subnode->bb == NULL)
2379 	{
2380 	  subnode->subloop_next = node->subloops;
2381 	  node->subloops = subnode;
2382 	}
2383     }
2384 }
2385 
2386 /* Return TRUE if NODE is inside PARENT.  */
2387 static bool
2388 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2389 {
2390   for (node = node->parent; node != NULL; node = node->parent)
2391     if (node == parent)
2392       return true;
2393   return false;
2394 }
2395 
2396 /* Sort allocnos according to their order in regno allocno list.  */
2397 static int
2398 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2399 {
2400   ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2401   ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2402   ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2403   ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2404 
2405   if (loop_is_inside_p (n1, n2))
2406     return -1;
2407   else if (loop_is_inside_p (n2, n1))
2408     return 1;
2409   /* If allocnos are equally good, sort by allocno numbers, so that
2410      the results of qsort leave nothing to chance.  We put allocnos
2411      with higher number first in the list because it is the original
2412      order for allocnos from loops on the same levels.  */
2413   return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2414 }
2415 
2416 /* This array is used to sort allocnos to restore allocno order in
2417    the regno allocno list.  */
2418 static ira_allocno_t *regno_allocnos;
2419 
2420 /* Restore allocno order for REGNO in the regno allocno list.  */
2421 static void
2422 ira_rebuild_regno_allocno_list (int regno)
2423 {
2424   int i, n;
2425   ira_allocno_t a;
2426 
2427   for (n = 0, a = ira_regno_allocno_map[regno];
2428        a != NULL;
2429        a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2430     regno_allocnos[n++] = a;
2431   ira_assert (n > 0);
2432   qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2433 	 regno_allocno_order_compare_func);
2434   for (i = 1; i < n; i++)
2435     ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2436   ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2437   ira_regno_allocno_map[regno] = regno_allocnos[0];
2438   if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2439     fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2440 }
2441 
2442 /* Propagate info from allocno FROM_A to allocno A.  */
2443 static void
2444 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2445 {
2446   enum reg_class aclass;
2447 
2448   merge_hard_reg_conflicts (from_a, a, false);
2449   ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2450   ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2451   ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2452   ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2453   ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
2454     += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
2455   IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
2456  		    ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a));
2457 
2458   ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2459     += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2460   if (! ALLOCNO_BAD_SPILL_P (from_a))
2461     ALLOCNO_BAD_SPILL_P (a) = false;
2462   aclass = ALLOCNO_CLASS (from_a);
2463   ira_assert (aclass == ALLOCNO_CLASS (a));
2464   ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2465 				     ALLOCNO_HARD_REG_COSTS (from_a));
2466   ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2467 				     aclass,
2468 				     ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2469   ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2470   ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2471 }
2472 
2473 /* Remove allocnos from loops removed from the allocation
2474    consideration.  */
2475 static void
2476 remove_unnecessary_allocnos (void)
2477 {
2478   int regno;
2479   bool merged_p, rebuild_p;
2480   ira_allocno_t a, prev_a, next_a, parent_a;
2481   ira_loop_tree_node_t a_node, parent;
2482 
2483   merged_p = false;
2484   regno_allocnos = NULL;
2485   for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2486     {
2487       rebuild_p = false;
2488       for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2489 	   a != NULL;
2490 	   a = next_a)
2491 	{
2492 	  next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2493 	  a_node = ALLOCNO_LOOP_TREE_NODE (a);
2494 	  if (! a_node->to_remove_p)
2495 	    prev_a = a;
2496 	  else
2497 	    {
2498 	      for (parent = a_node->parent;
2499 		   (parent_a = parent->regno_allocno_map[regno]) == NULL
2500 		     && parent->to_remove_p;
2501 		   parent = parent->parent)
2502 		;
2503 	      if (parent_a == NULL)
2504 		{
2505 		  /* There are no allocnos with the same regno in
2506 		     upper region -- just move the allocno to the
2507 		     upper region.  */
2508 		  prev_a = a;
2509 		  ALLOCNO_LOOP_TREE_NODE (a) = parent;
2510 		  parent->regno_allocno_map[regno] = a;
2511 		  bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2512 		  rebuild_p = true;
2513 		}
2514 	      else
2515 		{
2516 		  /* Remove the allocno and update info of allocno in
2517 		     the upper region.  */
2518 		  if (prev_a == NULL)
2519 		    ira_regno_allocno_map[regno] = next_a;
2520 		  else
2521 		    ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2522 		  move_allocno_live_ranges (a, parent_a);
2523 		  merged_p = true;
2524 		  propagate_some_info_from_allocno (parent_a, a);
2525 		  /* Remove it from the corresponding regno allocno
2526 		     map to avoid info propagation of subsequent
2527 		     allocno into this already removed allocno.  */
2528 		  a_node->regno_allocno_map[regno] = NULL;
2529 		  ira_remove_allocno_prefs (a);
2530 		  finish_allocno (a);
2531 		}
2532 	    }
2533 	}
2534       if (rebuild_p)
2535 	/* We need to restore the order in regno allocno list.  */
2536 	{
2537 	  if (regno_allocnos == NULL)
2538 	    regno_allocnos
2539 	      = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2540 						* ira_allocnos_num);
2541 	  ira_rebuild_regno_allocno_list (regno);
2542 	}
2543     }
2544   if (merged_p)
2545     ira_rebuild_start_finish_chains ();
2546   if (regno_allocnos != NULL)
2547     ira_free (regno_allocnos);
2548 }
2549 
2550 /* Remove allocnos from all loops but the root.  */
2551 static void
2552 remove_low_level_allocnos (void)
2553 {
2554   int regno;
2555   bool merged_p, propagate_p;
2556   ira_allocno_t a, top_a;
2557   ira_loop_tree_node_t a_node, parent;
2558   ira_allocno_iterator ai;
2559 
2560   merged_p = false;
2561   FOR_EACH_ALLOCNO (a, ai)
2562     {
2563       a_node = ALLOCNO_LOOP_TREE_NODE (a);
2564       if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2565 	continue;
2566       regno = ALLOCNO_REGNO (a);
2567       if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2568 	{
2569 	  ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2570 	  ira_loop_tree_root->regno_allocno_map[regno] = a;
2571 	  continue;
2572 	}
2573       propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2574       /* Remove the allocno and update info of allocno in the upper
2575 	 region.  */
2576       move_allocno_live_ranges (a, top_a);
2577       merged_p = true;
2578       if (propagate_p)
2579 	propagate_some_info_from_allocno (top_a, a);
2580     }
2581   FOR_EACH_ALLOCNO (a, ai)
2582     {
2583       a_node = ALLOCNO_LOOP_TREE_NODE (a);
2584       if (a_node == ira_loop_tree_root)
2585 	continue;
2586       parent = a_node->parent;
2587       regno = ALLOCNO_REGNO (a);
2588       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2589 	ira_assert (ALLOCNO_CAP (a) != NULL);
2590       else if (ALLOCNO_CAP (a) == NULL)
2591  	ira_assert (parent->regno_allocno_map[regno] != NULL);
2592     }
2593   FOR_EACH_ALLOCNO (a, ai)
2594     {
2595       regno = ALLOCNO_REGNO (a);
2596       if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2597 	{
2598 	  ira_object_t obj;
2599 	  ira_allocno_object_iterator oi;
2600 
2601 	  ira_regno_allocno_map[regno] = a;
2602 	  ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2603 	  ALLOCNO_CAP_MEMBER (a) = NULL;
2604 	  FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2605 	    COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2606 			       OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2607 #ifdef STACK_REGS
2608 	  if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2609 	    ALLOCNO_NO_STACK_REG_P (a) = true;
2610 #endif
2611 	}
2612       else
2613 	{
2614 	  ira_remove_allocno_prefs (a);
2615 	  finish_allocno (a);
2616 	}
2617     }
2618   if (merged_p)
2619     ira_rebuild_start_finish_chains ();
2620 }
2621 
2622 /* Remove loops from consideration.  We remove all loops except for
2623    root if ALL_P or loops for which a separate allocation will not
2624    improve the result.  We have to do this after allocno creation and
2625    their costs and allocno class evaluation because only after that
2626    the register pressure can be known and is calculated.  */
2627 static void
2628 remove_unnecessary_regions (bool all_p)
2629 {
2630   if (current_loops == NULL)
2631     return;
2632   if (all_p)
2633     mark_all_loops_for_removal ();
2634   else
2635     mark_loops_for_removal ();
2636   children_vec.create (last_basic_block_for_fn (cfun)
2637 		       + number_of_loops (cfun));
2638   removed_loop_vec.create (last_basic_block_for_fn (cfun)
2639 			   + number_of_loops (cfun));
2640   remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
2641   children_vec.release ();
2642   if (all_p)
2643     remove_low_level_allocnos ();
2644   else
2645     remove_unnecessary_allocnos ();
2646   while (removed_loop_vec.length () > 0)
2647     finish_loop_tree_node (removed_loop_vec.pop ());
2648   removed_loop_vec.release ();
2649 }
2650 
2651 
2652 
2653 /* At this point true value of allocno attribute bad_spill_p means
2654    that there is an insn where allocno occurs and where the allocno
2655    can not be used as memory.  The function updates the attribute, now
2656    it can be true only for allocnos which can not be used as memory in
2657    an insn and in whose live ranges there is other allocno deaths.
2658    Spilling allocnos with true value will not improve the code because
2659    it will not make other allocnos colorable and additional reloads
2660    for the corresponding pseudo will be generated in reload pass for
2661    each insn it occurs.
2662 
2663    This is a trick mentioned in one classic article of Chaitin etc
2664    which is frequently omitted in other implementations of RA based on
2665    graph coloring.  */
2666 static void
2667 update_bad_spill_attribute (void)
2668 {
2669   int i;
2670   ira_allocno_t a;
2671   ira_allocno_iterator ai;
2672   ira_allocno_object_iterator aoi;
2673   ira_object_t obj;
2674   live_range_t r;
2675   enum reg_class aclass;
2676   bitmap_head dead_points[N_REG_CLASSES];
2677 
2678   for (i = 0; i < ira_allocno_classes_num; i++)
2679     {
2680       aclass = ira_allocno_classes[i];
2681       bitmap_initialize (&dead_points[aclass], &reg_obstack);
2682     }
2683   FOR_EACH_ALLOCNO (a, ai)
2684     {
2685       aclass = ALLOCNO_CLASS (a);
2686       if (aclass == NO_REGS)
2687 	continue;
2688       FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2689 	for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2690 	  bitmap_set_bit (&dead_points[aclass], r->finish);
2691     }
2692   FOR_EACH_ALLOCNO (a, ai)
2693     {
2694       aclass = ALLOCNO_CLASS (a);
2695       if (aclass == NO_REGS)
2696 	continue;
2697       if (! ALLOCNO_BAD_SPILL_P (a))
2698 	continue;
2699       FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2700 	{
2701 	  for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2702 	    {
2703 	      for (i = r->start + 1; i < r->finish; i++)
2704 		if (bitmap_bit_p (&dead_points[aclass], i))
2705 		  break;
2706 	      if (i < r->finish)
2707 		break;
2708 	    }
2709 	  if (r != NULL)
2710 	    {
2711 	      ALLOCNO_BAD_SPILL_P (a) = false;
2712 	      break;
2713 	    }
2714 	}
2715     }
2716   for (i = 0; i < ira_allocno_classes_num; i++)
2717     {
2718       aclass = ira_allocno_classes[i];
2719       bitmap_clear (&dead_points[aclass]);
2720     }
2721 }
2722 
2723 
2724 
2725 /* Set up minimal and maximal live range points for allocnos.  */
2726 static void
2727 setup_min_max_allocno_live_range_point (void)
2728 {
2729   int i;
2730   ira_allocno_t a, parent_a, cap;
2731   ira_allocno_iterator ai;
2732 #ifdef ENABLE_IRA_CHECKING
2733   ira_object_iterator oi;
2734   ira_object_t obj;
2735 #endif
2736   live_range_t r;
2737   ira_loop_tree_node_t parent;
2738 
2739   FOR_EACH_ALLOCNO (a, ai)
2740     {
2741       int n = ALLOCNO_NUM_OBJECTS (a);
2742 
2743       for (i = 0; i < n; i++)
2744 	{
2745 	  ira_object_t obj = ALLOCNO_OBJECT (a, i);
2746 	  r = OBJECT_LIVE_RANGES (obj);
2747 	  if (r == NULL)
2748 	    continue;
2749 	  OBJECT_MAX (obj) = r->finish;
2750 	  for (; r->next != NULL; r = r->next)
2751 	    ;
2752 	  OBJECT_MIN (obj) = r->start;
2753 	}
2754     }
2755   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2756     for (a = ira_regno_allocno_map[i];
2757 	 a != NULL;
2758 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2759       {
2760 	int j;
2761 	int n = ALLOCNO_NUM_OBJECTS (a);
2762 
2763 	for (j = 0; j < n; j++)
2764 	  {
2765 	    ira_object_t obj = ALLOCNO_OBJECT (a, j);
2766 	    ira_object_t parent_obj;
2767 
2768 	    if (OBJECT_MAX (obj) < 0)
2769 	      continue;
2770 	    ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2771 	    /* Accumulation of range info.  */
2772 	    if (ALLOCNO_CAP (a) != NULL)
2773 	      {
2774 		for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2775 		  {
2776 		    ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2777 		    if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2778 		      OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2779 		    if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2780 		      OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2781 		  }
2782 		continue;
2783 	      }
2784 	    if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2785 	      continue;
2786 	    parent_a = parent->regno_allocno_map[i];
2787 	    parent_obj = ALLOCNO_OBJECT (parent_a, j);
2788 	    if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2789 	      OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2790 	    if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2791 	      OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2792 	  }
2793       }
2794 #ifdef ENABLE_IRA_CHECKING
2795   FOR_EACH_OBJECT (obj, oi)
2796     {
2797       if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2798 	  && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2799 	continue;
2800       gcc_unreachable ();
2801     }
2802 #endif
2803 }
2804 
2805 /* Sort allocnos according to their live ranges.  Allocnos with
2806    smaller allocno class are put first unless we use priority
2807    coloring.  Allocnos with the same class are ordered according
2808    their start (min).  Allocnos with the same start are ordered
2809    according their finish (max).  */
2810 static int
2811 object_range_compare_func (const void *v1p, const void *v2p)
2812 {
2813   int diff;
2814   ira_object_t obj1 = *(const ira_object_t *) v1p;
2815   ira_object_t obj2 = *(const ira_object_t *) v2p;
2816   ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2817   ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2818 
2819   if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2820     return diff;
2821   if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2822      return diff;
2823   return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2824 }
2825 
2826 /* Sort ira_object_id_map and set up conflict id of allocnos.  */
2827 static void
2828 sort_conflict_id_map (void)
2829 {
2830   int i, num;
2831   ira_allocno_t a;
2832   ira_allocno_iterator ai;
2833 
2834   num = 0;
2835   FOR_EACH_ALLOCNO (a, ai)
2836     {
2837       ira_allocno_object_iterator oi;
2838       ira_object_t obj;
2839 
2840       FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2841 	ira_object_id_map[num++] = obj;
2842     }
2843   if (num > 1)
2844     qsort (ira_object_id_map, num, sizeof (ira_object_t),
2845 	   object_range_compare_func);
2846   for (i = 0; i < num; i++)
2847     {
2848       ira_object_t obj = ira_object_id_map[i];
2849 
2850       gcc_assert (obj != NULL);
2851       OBJECT_CONFLICT_ID (obj) = i;
2852     }
2853   for (i = num; i < ira_objects_num; i++)
2854     ira_object_id_map[i] = NULL;
2855 }
2856 
2857 /* Set up minimal and maximal conflict ids of allocnos with which
2858    given allocno can conflict.  */
2859 static void
2860 setup_min_max_conflict_allocno_ids (void)
2861 {
2862   int aclass;
2863   int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2864   int *live_range_min, *last_lived;
2865   int word0_min, word0_max;
2866   ira_allocno_t a;
2867   ira_allocno_iterator ai;
2868 
2869   live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2870   aclass = -1;
2871   first_not_finished = -1;
2872   for (i = 0; i < ira_objects_num; i++)
2873     {
2874       ira_object_t obj = ira_object_id_map[i];
2875 
2876       if (obj == NULL)
2877 	continue;
2878 
2879       a = OBJECT_ALLOCNO (obj);
2880 
2881       if (aclass < 0)
2882 	{
2883 	  aclass = ALLOCNO_CLASS (a);
2884 	  min = i;
2885 	  first_not_finished = i;
2886 	}
2887       else
2888 	{
2889 	  start = OBJECT_MIN (obj);
2890 	  /* If we skip an allocno, the allocno with smaller ids will
2891 	     be also skipped because of the secondary sorting the
2892 	     range finishes (see function
2893 	     object_range_compare_func).  */
2894 	  while (first_not_finished < i
2895 		 && start > OBJECT_MAX (ira_object_id_map
2896 					[first_not_finished]))
2897 	    first_not_finished++;
2898 	  min = first_not_finished;
2899 	}
2900       if (min == i)
2901 	/* We could increase min further in this case but it is good
2902 	   enough.  */
2903 	min++;
2904       live_range_min[i] = OBJECT_MIN (obj);
2905       OBJECT_MIN (obj) = min;
2906     }
2907   last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2908   aclass = -1;
2909   filled_area_start = -1;
2910   for (i = ira_objects_num - 1; i >= 0; i--)
2911     {
2912       ira_object_t obj = ira_object_id_map[i];
2913 
2914       if (obj == NULL)
2915 	continue;
2916 
2917       a = OBJECT_ALLOCNO (obj);
2918       if (aclass < 0)
2919 	{
2920 	  aclass = ALLOCNO_CLASS (a);
2921 	  for (j = 0; j < ira_max_point; j++)
2922 	    last_lived[j] = -1;
2923 	  filled_area_start = ira_max_point;
2924 	}
2925       min = live_range_min[i];
2926       finish = OBJECT_MAX (obj);
2927       max = last_lived[finish];
2928       if (max < 0)
2929 	/* We could decrease max further in this case but it is good
2930 	   enough.  */
2931 	max = OBJECT_CONFLICT_ID (obj) - 1;
2932       OBJECT_MAX (obj) = max;
2933       /* In filling, we can go further A range finish to recognize
2934 	 intersection quickly because if the finish of subsequently
2935 	 processed allocno (it has smaller conflict id) range is
2936 	 further A range finish than they are definitely intersected
2937 	 (the reason for this is the allocnos with bigger conflict id
2938 	 have their range starts not smaller than allocnos with
2939 	 smaller ids.  */
2940       for (j = min; j < filled_area_start; j++)
2941 	last_lived[j] = i;
2942       filled_area_start = min;
2943     }
2944   ira_free (last_lived);
2945   ira_free (live_range_min);
2946 
2947   /* For allocnos with more than one object, we may later record extra conflicts in
2948      subobject 0 that we cannot really know about here.
2949      For now, simply widen the min/max range of these subobjects.  */
2950 
2951   word0_min = INT_MAX;
2952   word0_max = INT_MIN;
2953 
2954   FOR_EACH_ALLOCNO (a, ai)
2955     {
2956       int n = ALLOCNO_NUM_OBJECTS (a);
2957       ira_object_t obj0;
2958 
2959       if (n < 2)
2960 	continue;
2961       obj0 = ALLOCNO_OBJECT (a, 0);
2962       if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2963 	word0_min = OBJECT_CONFLICT_ID (obj0);
2964       if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2965 	word0_max = OBJECT_CONFLICT_ID (obj0);
2966     }
2967   FOR_EACH_ALLOCNO (a, ai)
2968     {
2969       int n = ALLOCNO_NUM_OBJECTS (a);
2970       ira_object_t obj0;
2971 
2972       if (n < 2)
2973 	continue;
2974       obj0 = ALLOCNO_OBJECT (a, 0);
2975       if (OBJECT_MIN (obj0) > word0_min)
2976 	OBJECT_MIN (obj0) = word0_min;
2977       if (OBJECT_MAX (obj0) < word0_max)
2978 	OBJECT_MAX (obj0) = word0_max;
2979     }
2980 }
2981 
2982 
2983 
2984 static void
2985 create_caps (void)
2986 {
2987   ira_allocno_t a;
2988   ira_allocno_iterator ai;
2989   ira_loop_tree_node_t loop_tree_node;
2990 
2991   FOR_EACH_ALLOCNO (a, ai)
2992     {
2993       if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2994 	continue;
2995       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2996 	create_cap_allocno (a);
2997       else if (ALLOCNO_CAP (a) == NULL)
2998 	{
2999 	  loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3000 	  if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
3001 	    create_cap_allocno (a);
3002 	}
3003     }
3004 }
3005 
3006 
3007 
3008 /* The page contains code transforming more one region internal
3009    representation (IR) to one region IR which is necessary for reload.
3010    This transformation is called IR flattening.  We might just rebuild
3011    the IR for one region but we don't do it because it takes a lot of
3012    time.  */
3013 
3014 /* Map: regno -> allocnos which will finally represent the regno for
3015    IR with one region.  */
3016 static ira_allocno_t *regno_top_level_allocno_map;
3017 
3018 /* Find the allocno that corresponds to A at a level one higher up in the
3019    loop tree.  Returns NULL if A is a cap, or if it has no parent.  */
3020 ira_allocno_t
3021 ira_parent_allocno (ira_allocno_t a)
3022 {
3023   ira_loop_tree_node_t parent;
3024 
3025   if (ALLOCNO_CAP (a) != NULL)
3026     return NULL;
3027 
3028   parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3029   if (parent == NULL)
3030     return NULL;
3031 
3032   return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
3033 }
3034 
3035 /* Find the allocno that corresponds to A at a level one higher up in the
3036    loop tree.  If ALLOCNO_CAP is set for A, return that.  */
3037 ira_allocno_t
3038 ira_parent_or_cap_allocno (ira_allocno_t a)
3039 {
3040   if (ALLOCNO_CAP (a) != NULL)
3041     return ALLOCNO_CAP (a);
3042 
3043   return ira_parent_allocno (a);
3044 }
3045 
3046 /* Process all allocnos originated from pseudo REGNO and copy live
3047    ranges, hard reg conflicts, and allocno stack reg attributes from
3048    low level allocnos to final allocnos which are destinations of
3049    removed stores at a loop exit.  Return true if we copied live
3050    ranges.  */
3051 static bool
3052 copy_info_to_removed_store_destinations (int regno)
3053 {
3054   ira_allocno_t a;
3055   ira_allocno_t parent_a = NULL;
3056   ira_loop_tree_node_t parent;
3057   bool merged_p;
3058 
3059   merged_p = false;
3060   for (a = ira_regno_allocno_map[regno];
3061        a != NULL;
3062        a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3063     {
3064       if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
3065 	/* This allocno will be removed.  */
3066 	continue;
3067 
3068       /* Caps will be removed.  */
3069       ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3070       for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3071 	   parent != NULL;
3072 	   parent = parent->parent)
3073 	if ((parent_a = parent->regno_allocno_map[regno]) == NULL
3074 	    || (parent_a
3075 		== regno_top_level_allocno_map[REGNO
3076 					       (allocno_emit_reg (parent_a))]
3077 		&& ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
3078 	  break;
3079       if (parent == NULL || parent_a == NULL)
3080 	continue;
3081 
3082       copy_allocno_live_ranges (a, parent_a);
3083       merge_hard_reg_conflicts (a, parent_a, true);
3084 
3085       ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
3086       ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3087 	+= ALLOCNO_CALLS_CROSSED_NUM (a);
3088       ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3089 	+= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3090       IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
3091  			ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
3092       ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3093 	+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3094       merged_p = true;
3095     }
3096   return merged_p;
3097 }
3098 
3099 /* Flatten the IR.  In other words, this function transforms IR as if
3100    it were built with one region (without loops).  We could make it
3101    much simpler by rebuilding IR with one region, but unfortunately it
3102    takes a lot of time.  MAX_REGNO_BEFORE_EMIT and
3103    IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3104    IRA_MAX_POINT before emitting insns on the loop borders.  */
3105 void
3106 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
3107 {
3108   int i, j;
3109   bool keep_p;
3110   int hard_regs_num;
3111   bool new_pseudos_p, merged_p, mem_dest_p;
3112   unsigned int n;
3113   enum reg_class aclass;
3114   ira_allocno_t a, parent_a, first, second, node_first, node_second;
3115   ira_copy_t cp;
3116   ira_loop_tree_node_t node;
3117   live_range_t r;
3118   ira_allocno_iterator ai;
3119   ira_copy_iterator ci;
3120 
3121   regno_top_level_allocno_map
3122     = (ira_allocno_t *) ira_allocate (max_reg_num ()
3123 				      * sizeof (ira_allocno_t));
3124   memset (regno_top_level_allocno_map, 0,
3125 	  max_reg_num () * sizeof (ira_allocno_t));
3126   new_pseudos_p = merged_p = false;
3127   FOR_EACH_ALLOCNO (a, ai)
3128     {
3129       ira_allocno_object_iterator oi;
3130       ira_object_t obj;
3131 
3132       if (ALLOCNO_CAP_MEMBER (a) != NULL)
3133 	/* Caps are not in the regno allocno maps and they are never
3134 	   will be transformed into allocnos existing after IR
3135 	   flattening.  */
3136 	continue;
3137       FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3138 	COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
3139 			   OBJECT_CONFLICT_HARD_REGS (obj));
3140 #ifdef STACK_REGS
3141       ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
3142 #endif
3143     }
3144   /* Fix final allocno attributes.  */
3145   for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
3146     {
3147       mem_dest_p = false;
3148       for (a = ira_regno_allocno_map[i];
3149 	   a != NULL;
3150 	   a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3151 	{
3152 	  ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
3153 
3154 	  ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3155 	  if (data->somewhere_renamed_p)
3156 	    new_pseudos_p = true;
3157 	  parent_a = ira_parent_allocno (a);
3158 	  if (parent_a == NULL)
3159 	    {
3160 	      ALLOCNO_COPIES (a) = NULL;
3161 	      regno_top_level_allocno_map[REGNO (data->reg)] = a;
3162 	      continue;
3163 	    }
3164 	  ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
3165 
3166 	  if (data->mem_optimized_dest != NULL)
3167 	    mem_dest_p = true;
3168 	  parent_data = ALLOCNO_EMIT_DATA (parent_a);
3169 	  if (REGNO (data->reg) == REGNO (parent_data->reg))
3170 	    {
3171 	      merge_hard_reg_conflicts (a, parent_a, true);
3172 	      move_allocno_live_ranges (a, parent_a);
3173 	      merged_p = true;
3174 	      parent_data->mem_optimized_dest_p
3175 		= (parent_data->mem_optimized_dest_p
3176 		   || data->mem_optimized_dest_p);
3177 	      continue;
3178 	    }
3179 	  new_pseudos_p = true;
3180 	  for (;;)
3181 	    {
3182 	      ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
3183 	      ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
3184 	      ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
3185 	      ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3186 		-= ALLOCNO_CALLS_CROSSED_NUM (a);
3187 	      ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3188 		-= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3189 	      ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3190 		-= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3191 	      ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
3192 			  && ALLOCNO_NREFS (parent_a) >= 0
3193 			  && ALLOCNO_FREQ (parent_a) >= 0);
3194 	      aclass = ALLOCNO_CLASS (parent_a);
3195 	      hard_regs_num = ira_class_hard_regs_num[aclass];
3196 	      if (ALLOCNO_HARD_REG_COSTS (a) != NULL
3197 		  && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
3198 		for (j = 0; j < hard_regs_num; j++)
3199 		  ALLOCNO_HARD_REG_COSTS (parent_a)[j]
3200 		    -= ALLOCNO_HARD_REG_COSTS (a)[j];
3201 	      if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
3202 		  && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
3203 		for (j = 0; j < hard_regs_num; j++)
3204 		  ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
3205 		    -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
3206 	      ALLOCNO_CLASS_COST (parent_a)
3207 		-= ALLOCNO_CLASS_COST (a);
3208 	      ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
3209 	      parent_a = ira_parent_allocno (parent_a);
3210 	      if (parent_a == NULL)
3211 		break;
3212 	    }
3213 	  ALLOCNO_COPIES (a) = NULL;
3214 	  regno_top_level_allocno_map[REGNO (data->reg)] = a;
3215 	}
3216       if (mem_dest_p && copy_info_to_removed_store_destinations (i))
3217 	merged_p = true;
3218     }
3219   ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
3220   if (merged_p || ira_max_point_before_emit != ira_max_point)
3221     ira_rebuild_start_finish_chains ();
3222   if (new_pseudos_p)
3223     {
3224       sparseset objects_live;
3225 
3226       /* Rebuild conflicts.  */
3227       FOR_EACH_ALLOCNO (a, ai)
3228 	{
3229 	  ira_allocno_object_iterator oi;
3230 	  ira_object_t obj;
3231 
3232 	  if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3233 	      || ALLOCNO_CAP_MEMBER (a) != NULL)
3234 	    continue;
3235 	  FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3236 	    {
3237 	      for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3238 		ira_assert (r->object == obj);
3239 	      clear_conflicts (obj);
3240 	    }
3241 	}
3242       objects_live = sparseset_alloc (ira_objects_num);
3243       for (i = 0; i < ira_max_point; i++)
3244 	{
3245 	  for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
3246 	    {
3247 	      ira_object_t obj = r->object;
3248 
3249 	      a = OBJECT_ALLOCNO (obj);
3250 	      if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3251 		  || ALLOCNO_CAP_MEMBER (a) != NULL)
3252 		continue;
3253 
3254 	      aclass = ALLOCNO_CLASS (a);
3255 	      EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
3256 		{
3257 		  ira_object_t live_obj = ira_object_id_map[n];
3258 		  ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
3259 		  enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
3260 
3261 		  if (ira_reg_classes_intersect_p[aclass][live_aclass]
3262 		      /* Don't set up conflict for the allocno with itself.  */
3263 		      && live_a != a)
3264 		    ira_add_conflict (obj, live_obj);
3265 		}
3266 	      sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
3267 	    }
3268 
3269 	  for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
3270 	    sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
3271 	}
3272       sparseset_free (objects_live);
3273       compress_conflict_vecs ();
3274     }
3275   /* Mark some copies for removing and change allocnos in the rest
3276      copies.  */
3277   FOR_EACH_COPY (cp, ci)
3278     {
3279       if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
3280 	  || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
3281 	{
3282 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3283 	    fprintf
3284 	      (ira_dump_file, "      Remove cp%d:%c%dr%d-%c%dr%d\n",
3285 	       cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
3286 	       ALLOCNO_NUM (cp->first),
3287 	       REGNO (allocno_emit_reg (cp->first)),
3288 	       ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
3289 	       ALLOCNO_NUM (cp->second),
3290 	       REGNO (allocno_emit_reg (cp->second)));
3291 	  cp->loop_tree_node = NULL;
3292 	  continue;
3293 	}
3294       first
3295 	= regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
3296       second
3297 	= regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
3298       node = cp->loop_tree_node;
3299       if (node == NULL)
3300 	keep_p = true; /* It copy generated in ira-emit.c.  */
3301       else
3302 	{
3303 	  /* Check that the copy was not propagated from level on
3304 	     which we will have different pseudos.  */
3305 	  node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
3306 	  node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
3307 	  keep_p = ((REGNO (allocno_emit_reg (first))
3308 		     == REGNO (allocno_emit_reg (node_first)))
3309 		     && (REGNO (allocno_emit_reg (second))
3310 			 == REGNO (allocno_emit_reg (node_second))));
3311 	}
3312       if (keep_p)
3313 	{
3314 	  cp->loop_tree_node = ira_loop_tree_root;
3315 	  cp->first = first;
3316 	  cp->second = second;
3317 	}
3318       else
3319 	{
3320 	  cp->loop_tree_node = NULL;
3321 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3322 	    fprintf (ira_dump_file, "      Remove cp%d:a%dr%d-a%dr%d\n",
3323 		     cp->num, ALLOCNO_NUM (cp->first),
3324 		     REGNO (allocno_emit_reg (cp->first)),
3325 		     ALLOCNO_NUM (cp->second),
3326 		     REGNO (allocno_emit_reg (cp->second)));
3327 	}
3328     }
3329   /* Remove unnecessary allocnos on lower levels of the loop tree.  */
3330   FOR_EACH_ALLOCNO (a, ai)
3331     {
3332       if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3333 	  || ALLOCNO_CAP_MEMBER (a) != NULL)
3334 	{
3335 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3336 	    fprintf (ira_dump_file, "      Remove a%dr%d\n",
3337 		     ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
3338 	  ira_remove_allocno_prefs (a);
3339 	  finish_allocno (a);
3340 	  continue;
3341 	}
3342       ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
3343       ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
3344       ALLOCNO_CAP (a) = NULL;
3345       /* Restore updated costs for assignments from reload.  */
3346       ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3347       ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
3348       if (! ALLOCNO_ASSIGNED_P (a))
3349 	ira_free_allocno_updated_costs (a);
3350       ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3351       ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3352     }
3353   /* Remove unnecessary copies.  */
3354   FOR_EACH_COPY (cp, ci)
3355     {
3356       if (cp->loop_tree_node == NULL)
3357 	{
3358 	  ira_copies[cp->num] = NULL;
3359 	  finish_copy (cp);
3360 	  continue;
3361 	}
3362       ira_assert
3363 	(ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
3364 	 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
3365       add_allocno_copy_to_list (cp);
3366       swap_allocno_copy_ends_if_necessary (cp);
3367     }
3368   rebuild_regno_allocno_maps ();
3369   if (ira_max_point != ira_max_point_before_emit)
3370     ira_compress_allocno_live_ranges ();
3371   ira_free (regno_top_level_allocno_map);
3372 }
3373 
3374 
3375 
3376 #ifdef ENABLE_IRA_CHECKING
3377 /* Check creation of all allocnos.  Allocnos on lower levels should
3378    have allocnos or caps on all upper levels.  */
3379 static void
3380 check_allocno_creation (void)
3381 {
3382   ira_allocno_t a;
3383   ira_allocno_iterator ai;
3384   ira_loop_tree_node_t loop_tree_node;
3385 
3386   FOR_EACH_ALLOCNO (a, ai)
3387     {
3388       loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3389       ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3390 				ALLOCNO_NUM (a)));
3391       if (loop_tree_node == ira_loop_tree_root)
3392 	continue;
3393       if (ALLOCNO_CAP_MEMBER (a) != NULL)
3394 	ira_assert (ALLOCNO_CAP (a) != NULL);
3395       else if (ALLOCNO_CAP (a) == NULL)
3396 	ira_assert (loop_tree_node->parent
3397 		    ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3398 		    && bitmap_bit_p (loop_tree_node->border_allocnos,
3399 				     ALLOCNO_NUM (a)));
3400     }
3401 }
3402 #endif
3403 
3404 /* Identify allocnos which prefer a register class with a single hard register.
3405    Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3406    less likely to use the preferred singleton register.  */
3407 static void
3408 update_conflict_hard_reg_costs (void)
3409 {
3410   ira_allocno_t a;
3411   ira_allocno_iterator ai;
3412   int i, index, min;
3413 
3414   FOR_EACH_ALLOCNO (a, ai)
3415     {
3416       reg_class_t aclass = ALLOCNO_CLASS (a);
3417       reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3418       int singleton = ira_class_singleton[pref][ALLOCNO_MODE (a)];
3419       if (singleton < 0)
3420 	continue;
3421       index = ira_class_hard_reg_index[(int) aclass][singleton];
3422       if (index < 0)
3423 	continue;
3424       if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3425 	  || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3426 	continue;
3427       min = INT_MAX;
3428       for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3429 	if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3430 	    && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3431 	  min = ALLOCNO_HARD_REG_COSTS (a)[i];
3432       if (min == INT_MAX)
3433 	continue;
3434       ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3435 				  aclass, 0);
3436       ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3437 	-= min - ALLOCNO_CLASS_COST (a);
3438     }
3439 }
3440 
3441 /* Create a internal representation (IR) for IRA (allocnos, copies,
3442    loop tree nodes).  The function returns TRUE if we generate loop
3443    structure (besides nodes representing all function and the basic
3444    blocks) for regional allocation.  A true return means that we
3445    really need to flatten IR before the reload.  */
3446 bool
3447 ira_build (void)
3448 {
3449   bool loops_p;
3450 
3451   df_analyze ();
3452   initiate_cost_vectors ();
3453   initiate_allocnos ();
3454   initiate_prefs ();
3455   initiate_copies ();
3456   create_loop_tree_nodes ();
3457   form_loop_tree ();
3458   create_allocnos ();
3459   ira_costs ();
3460   create_allocno_objects ();
3461   ira_create_allocno_live_ranges ();
3462   remove_unnecessary_regions (false);
3463   ira_compress_allocno_live_ranges ();
3464   update_bad_spill_attribute ();
3465   loops_p = more_one_region_p ();
3466   if (loops_p)
3467     {
3468       propagate_allocno_info ();
3469       create_caps ();
3470     }
3471   ira_tune_allocno_costs ();
3472 #ifdef ENABLE_IRA_CHECKING
3473   check_allocno_creation ();
3474 #endif
3475   setup_min_max_allocno_live_range_point ();
3476   sort_conflict_id_map ();
3477   setup_min_max_conflict_allocno_ids ();
3478   ira_build_conflicts ();
3479   update_conflict_hard_reg_costs ();
3480   if (! ira_conflicts_p)
3481     {
3482       ira_allocno_t a;
3483       ira_allocno_iterator ai;
3484 
3485       /* Remove all regions but root one.  */
3486       if (loops_p)
3487 	{
3488 	  remove_unnecessary_regions (true);
3489 	  loops_p = false;
3490 	}
3491       /* We don't save hard registers around calls for fast allocation
3492 	 -- add caller clobbered registers as conflicting ones to
3493 	 allocno crossing calls.  */
3494       FOR_EACH_ALLOCNO (a, ai)
3495 	if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3496 	  ior_hard_reg_conflicts (a, &call_used_reg_set);
3497     }
3498   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3499     print_copies (ira_dump_file);
3500   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3501     print_prefs (ira_dump_file);
3502   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3503     {
3504       int n, nr, nr_big;
3505       ira_allocno_t a;
3506       live_range_t r;
3507       ira_allocno_iterator ai;
3508 
3509       n = 0;
3510       nr = 0;
3511       nr_big = 0;
3512       FOR_EACH_ALLOCNO (a, ai)
3513 	{
3514 	  int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3515 
3516 	  if (nobj > 1)
3517 	    nr_big++;
3518 	  for (j = 0; j < nobj; j++)
3519 	    {
3520 	      ira_object_t obj = ALLOCNO_OBJECT (a, j);
3521 	      n += OBJECT_NUM_CONFLICTS (obj);
3522 	      for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3523 		nr++;
3524 	    }
3525 	}
3526       fprintf (ira_dump_file, "  regions=%d, blocks=%d, points=%d\n",
3527 	       current_loops == NULL ? 1 : number_of_loops (cfun),
3528 	       n_basic_blocks_for_fn (cfun), ira_max_point);
3529       fprintf (ira_dump_file,
3530 	       "    allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3531 	       ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3532     }
3533   return loops_p;
3534 }
3535 
3536 /* Release the data created by function ira_build.  */
3537 void
3538 ira_destroy (void)
3539 {
3540   finish_loop_tree_nodes ();
3541   finish_prefs ();
3542   finish_copies ();
3543   finish_allocnos ();
3544   finish_cost_vectors ();
3545   ira_finish_allocno_live_ranges ();
3546 }
3547