xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/ira-build.c (revision 413d532bcc3f62d122e56d92e13ac64825a40baf)
1 /* Building internal representation for IRA.
2    Copyright (C) 2006, 2007, 2008, 2009
3    Free Software Foundation, Inc.
4    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "target.h"
29 #include "regs.h"
30 #include "flags.h"
31 #include "hard-reg-set.h"
32 #include "basic-block.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "toplev.h"
36 #include "params.h"
37 #include "df.h"
38 #include "output.h"
39 #include "reload.h"
40 #include "sparseset.h"
41 #include "ira-int.h"
42 
43 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx,
44 				     ira_loop_tree_node_t);
45 
46 /* The root of the loop tree corresponding to the all function.  */
47 ira_loop_tree_node_t ira_loop_tree_root;
48 
49 /* Height of the loop tree.  */
50 int ira_loop_tree_height;
51 
52 /* All nodes representing basic blocks are referred through the
53    following array.  We can not use basic block member `aux' for this
54    because it is used for insertion of insns on edges.  */
55 ira_loop_tree_node_t ira_bb_nodes;
56 
57 /* All nodes representing loops are referred through the following
58    array.  */
59 ira_loop_tree_node_t ira_loop_nodes;
60 
61 /* Map regno -> allocnos with given regno (see comments for
62    allocno member `next_regno_allocno').  */
63 ira_allocno_t *ira_regno_allocno_map;
64 
65 /* Array of references to all allocnos.  The order number of the
66    allocno corresponds to the index in the array.  Removed allocnos
67    have NULL element value.  */
68 ira_allocno_t *ira_allocnos;
69 
70 /* Sizes of the previous array.  */
71 int ira_allocnos_num;
72 
73 /* Map conflict id -> allocno with given conflict id (see comments for
74    allocno member `conflict_id').  */
75 ira_allocno_t *ira_conflict_id_allocno_map;
76 
77 /* Array of references to all copies.  The order number of the copy
78    corresponds to the index in the array.  Removed copies have NULL
79    element value.  */
80 ira_copy_t *ira_copies;
81 
82 /* Size of the previous array.  */
83 int ira_copies_num;
84 
85 
86 
87 /* LAST_BASIC_BLOCK before generating additional insns because of live
88    range splitting.  Emitting insns on a critical edge creates a new
89    basic block.  */
90 static int last_basic_block_before_change;
91 
92 /* The following function allocates the loop tree nodes.  If LOOPS_P
93    is FALSE, the nodes corresponding to the loops (except the root
94    which corresponds the all function) will be not allocated but nodes
95    will still be allocated for basic blocks.  */
96 static void
97 create_loop_tree_nodes (bool loops_p)
98 {
99   unsigned int i, j;
100   int max_regno;
101   bool skip_p;
102   edge_iterator ei;
103   edge e;
104   VEC (edge, heap) *edges;
105   loop_p loop;
106 
107   ira_bb_nodes
108     = ((struct ira_loop_tree_node *)
109        ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block));
110   last_basic_block_before_change = last_basic_block;
111   for (i = 0; i < (unsigned int) last_basic_block; i++)
112     {
113       ira_bb_nodes[i].regno_allocno_map = NULL;
114       memset (ira_bb_nodes[i].reg_pressure, 0,
115 	      sizeof (ira_bb_nodes[i].reg_pressure));
116       ira_bb_nodes[i].all_allocnos = NULL;
117       ira_bb_nodes[i].modified_regnos = NULL;
118       ira_bb_nodes[i].border_allocnos = NULL;
119       ira_bb_nodes[i].local_copies = NULL;
120     }
121   ira_loop_nodes = ((struct ira_loop_tree_node *)
122 		    ira_allocate (sizeof (struct ira_loop_tree_node)
123 				  * VEC_length (loop_p, ira_loops.larray)));
124   max_regno = max_reg_num ();
125   for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
126     {
127       if (loop != ira_loops.tree_root)
128 	{
129 	  ira_loop_nodes[i].regno_allocno_map = NULL;
130 	  if (! loops_p)
131 	    continue;
132 	  skip_p = false;
133 	  FOR_EACH_EDGE (e, ei, loop->header->preds)
134 	    if (e->src != loop->latch
135 		&& (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
136 	      {
137 		skip_p = true;
138 		break;
139 	      }
140 	  if (skip_p)
141 	    continue;
142 	  edges = get_loop_exit_edges (loop);
143 	  for (j = 0; VEC_iterate (edge, edges, j, e); j++)
144 	    if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
145 	      {
146 		skip_p = true;
147 		break;
148 	      }
149 	  VEC_free (edge, heap, edges);
150 	  if (skip_p)
151 	    continue;
152 	}
153       ira_loop_nodes[i].regno_allocno_map
154 	= (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
155       memset (ira_loop_nodes[i].regno_allocno_map, 0,
156 	      sizeof (ira_allocno_t) * max_regno);
157       memset (ira_loop_nodes[i].reg_pressure, 0,
158 	      sizeof (ira_loop_nodes[i].reg_pressure));
159       ira_loop_nodes[i].all_allocnos = ira_allocate_bitmap ();
160       ira_loop_nodes[i].modified_regnos = ira_allocate_bitmap ();
161       ira_loop_nodes[i].border_allocnos = ira_allocate_bitmap ();
162       ira_loop_nodes[i].local_copies = ira_allocate_bitmap ();
163     }
164 }
165 
166 /* The function returns TRUE if there are more one allocation
167    region.  */
168 static bool
169 more_one_region_p (void)
170 {
171   unsigned int i;
172   loop_p loop;
173 
174   for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
175     if (ira_loop_nodes[i].regno_allocno_map != NULL
176 	&& ira_loop_tree_root != &ira_loop_nodes[i])
177       return true;
178   return false;
179 }
180 
181 /* Free the loop tree node of a loop.  */
182 static void
183 finish_loop_tree_node (ira_loop_tree_node_t loop)
184 {
185   if (loop->regno_allocno_map != NULL)
186     {
187       ira_assert (loop->bb == NULL);
188       ira_free_bitmap (loop->local_copies);
189       ira_free_bitmap (loop->border_allocnos);
190       ira_free_bitmap (loop->modified_regnos);
191       ira_free_bitmap (loop->all_allocnos);
192       ira_free (loop->regno_allocno_map);
193       loop->regno_allocno_map = NULL;
194     }
195 }
196 
197 /* Free the loop tree nodes.  */
198 static void
199 finish_loop_tree_nodes (void)
200 {
201   unsigned int i;
202   loop_p loop;
203 
204   for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
205     finish_loop_tree_node (&ira_loop_nodes[i]);
206   ira_free (ira_loop_nodes);
207   for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
208     {
209       if (ira_bb_nodes[i].local_copies != NULL)
210 	ira_free_bitmap (ira_bb_nodes[i].local_copies);
211       if (ira_bb_nodes[i].border_allocnos != NULL)
212 	ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
213       if (ira_bb_nodes[i].modified_regnos != NULL)
214 	ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
215       if (ira_bb_nodes[i].all_allocnos != NULL)
216 	ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
217       if (ira_bb_nodes[i].regno_allocno_map != NULL)
218 	ira_free (ira_bb_nodes[i].regno_allocno_map);
219     }
220   ira_free (ira_bb_nodes);
221 }
222 
223 
224 
225 /* The following recursive function adds LOOP to the loop tree
226    hierarchy.  LOOP is added only once.  */
227 static void
228 add_loop_to_tree (struct loop *loop)
229 {
230   struct loop *parent;
231   ira_loop_tree_node_t loop_node, parent_node;
232 
233   /* We can not use loop node access macros here because of potential
234      checking and because the nodes are not initialized enough
235      yet.  */
236   if (loop_outer (loop) != NULL)
237     add_loop_to_tree (loop_outer (loop));
238   if (ira_loop_nodes[loop->num].regno_allocno_map != NULL
239       && ira_loop_nodes[loop->num].children == NULL)
240     {
241       /* We have not added loop node to the tree yet.  */
242       loop_node = &ira_loop_nodes[loop->num];
243       loop_node->loop = loop;
244       loop_node->bb = NULL;
245       for (parent = loop_outer (loop);
246 	   parent != NULL;
247 	   parent = loop_outer (parent))
248 	if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
249 	  break;
250       if (parent == NULL)
251 	{
252 	  loop_node->next = NULL;
253 	  loop_node->subloop_next = NULL;
254 	  loop_node->parent = NULL;
255 	}
256       else
257 	{
258 	  parent_node = &ira_loop_nodes[parent->num];
259 	  loop_node->next = parent_node->children;
260 	  parent_node->children = loop_node;
261 	  loop_node->subloop_next = parent_node->subloops;
262 	  parent_node->subloops = loop_node;
263 	  loop_node->parent = parent_node;
264 	}
265     }
266 }
267 
268 /* The following recursive function sets up levels of nodes of the
269    tree given its root LOOP_NODE.  The enumeration starts with LEVEL.
270    The function returns maximal value of level in the tree + 1.  */
271 static int
272 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
273 {
274   int height, max_height;
275   ira_loop_tree_node_t subloop_node;
276 
277   ira_assert (loop_node->bb == NULL);
278   loop_node->level = level;
279   max_height = level + 1;
280   for (subloop_node = loop_node->subloops;
281        subloop_node != NULL;
282        subloop_node = subloop_node->subloop_next)
283     {
284       ira_assert (subloop_node->bb == NULL);
285       height = setup_loop_tree_level (subloop_node, level + 1);
286       if (height > max_height)
287 	max_height = height;
288     }
289   return max_height;
290 }
291 
292 /* Create the loop tree.  The algorithm is designed to provide correct
293    order of loops (they are ordered by their last loop BB) and basic
294    blocks in the chain formed by member next.  */
295 static void
296 form_loop_tree (void)
297 {
298   unsigned int i;
299   basic_block bb;
300   struct loop *parent;
301   ira_loop_tree_node_t bb_node, loop_node;
302   loop_p loop;
303 
304   /* We can not use loop/bb node access macros because of potential
305      checking and because the nodes are not initialized enough
306      yet.  */
307   for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
308      if (ira_loop_nodes[i].regno_allocno_map != NULL)
309        {
310 	 ira_loop_nodes[i].children = NULL;
311 	 ira_loop_nodes[i].subloops = NULL;
312        }
313   FOR_EACH_BB (bb)
314     {
315       bb_node = &ira_bb_nodes[bb->index];
316       bb_node->bb = bb;
317       bb_node->loop = NULL;
318       bb_node->subloops = NULL;
319       bb_node->children = NULL;
320       bb_node->subloop_next = NULL;
321       bb_node->next = NULL;
322       for (parent = bb->loop_father;
323 	   parent != NULL;
324 	   parent = loop_outer (parent))
325 	if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
326 	  break;
327       add_loop_to_tree (parent);
328       loop_node = &ira_loop_nodes[parent->num];
329       bb_node->next = loop_node->children;
330       bb_node->parent = loop_node;
331       loop_node->children = bb_node;
332     }
333   ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (ira_loops.tree_root->num);
334   ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
335   ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
336 }
337 
338 
339 
340 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
341    tree nodes.  */
342 static void
343 rebuild_regno_allocno_maps (void)
344 {
345   unsigned int l;
346   int max_regno, regno;
347   ira_allocno_t a;
348   ira_loop_tree_node_t loop_tree_node;
349   loop_p loop;
350   ira_allocno_iterator ai;
351 
352   max_regno = max_reg_num ();
353   for (l = 0; VEC_iterate (loop_p, ira_loops.larray, l, loop); l++)
354     if (ira_loop_nodes[l].regno_allocno_map != NULL)
355       {
356 	ira_free (ira_loop_nodes[l].regno_allocno_map);
357 	ira_loop_nodes[l].regno_allocno_map
358 	  = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
359 					    * max_regno);
360 	memset (ira_loop_nodes[l].regno_allocno_map, 0,
361 		sizeof (ira_allocno_t) * max_regno);
362       }
363   ira_free (ira_regno_allocno_map);
364   ira_regno_allocno_map
365     = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
366   memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
367   FOR_EACH_ALLOCNO (a, ai)
368     {
369       if (ALLOCNO_CAP_MEMBER (a) != NULL)
370 	/* Caps are not in the regno allocno maps.  */
371 	continue;
372       regno = ALLOCNO_REGNO (a);
373       loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
374       ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
375       ira_regno_allocno_map[regno] = a;
376       if (loop_tree_node->regno_allocno_map[regno] == NULL)
377 	/* Remember that we can create temporary allocnos to break
378 	   cycles in register shuffle.  */
379 	loop_tree_node->regno_allocno_map[regno] = a;
380     }
381 }
382 
383 
384 
385 /* Pools for allocnos and allocno live ranges.  */
386 static alloc_pool allocno_pool, allocno_live_range_pool;
387 
388 /* Vec containing references to all created allocnos.  It is a
389    container of array allocnos.  */
390 static VEC(ira_allocno_t,heap) *allocno_vec;
391 
392 /* Vec containing references to all created allocnos.  It is a
393    container of ira_conflict_id_allocno_map.  */
394 static VEC(ira_allocno_t,heap) *ira_conflict_id_allocno_map_vec;
395 
396 /* Initialize data concerning allocnos.  */
397 static void
398 initiate_allocnos (void)
399 {
400   allocno_live_range_pool
401     = create_alloc_pool ("allocno live ranges",
402 			 sizeof (struct ira_allocno_live_range), 100);
403   allocno_pool
404     = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
405   allocno_vec = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
406   ira_allocnos = NULL;
407   ira_allocnos_num = 0;
408   ira_conflict_id_allocno_map_vec
409     = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
410   ira_conflict_id_allocno_map = NULL;
411   ira_regno_allocno_map
412     = (ira_allocno_t *) ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
413   memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
414 }
415 
416 /* Create and return the allocno corresponding to REGNO in
417    LOOP_TREE_NODE.  Add the allocno to the list of allocnos with the
418    same regno if CAP_P is FALSE.  */
419 ira_allocno_t
420 ira_create_allocno (int regno, bool cap_p, ira_loop_tree_node_t loop_tree_node)
421 {
422   ira_allocno_t a;
423 
424   a = (ira_allocno_t) pool_alloc (allocno_pool);
425   ALLOCNO_REGNO (a) = regno;
426   ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
427   if (! cap_p)
428     {
429       ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
430       ira_regno_allocno_map[regno] = a;
431       if (loop_tree_node->regno_allocno_map[regno] == NULL)
432 	/* Remember that we can create temporary allocnos to break
433 	   cycles in register shuffle on region borders (see
434 	   ira-emit.c).  */
435 	loop_tree_node->regno_allocno_map[regno] = a;
436     }
437   ALLOCNO_CAP (a) = NULL;
438   ALLOCNO_CAP_MEMBER (a) = NULL;
439   ALLOCNO_NUM (a) = ira_allocnos_num;
440   bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
441   ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) = NULL;
442   ALLOCNO_CONFLICT_ALLOCNOS_NUM (a) = 0;
443   COPY_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a), ira_no_alloc_regs);
444   COPY_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), ira_no_alloc_regs);
445   ALLOCNO_NREFS (a) = 0;
446   ALLOCNO_FREQ (a) = 0;
447   ALLOCNO_HARD_REGNO (a) = -1;
448   ALLOCNO_CALL_FREQ (a) = 0;
449   ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
450 #ifdef STACK_REGS
451   ALLOCNO_NO_STACK_REG_P (a) = false;
452   ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
453 #endif
454   ALLOCNO_MEM_OPTIMIZED_DEST (a) = NULL;
455   ALLOCNO_MEM_OPTIMIZED_DEST_P (a) = false;
456   ALLOCNO_SOMEWHERE_RENAMED_P (a) = false;
457   ALLOCNO_CHILD_RENAMED_P (a) = false;
458   ALLOCNO_DONT_REASSIGN_P (a) = false;
459   ALLOCNO_BAD_SPILL_P (a) = false;
460   ALLOCNO_IN_GRAPH_P (a) = false;
461   ALLOCNO_ASSIGNED_P (a) = false;
462   ALLOCNO_MAY_BE_SPILLED_P (a) = false;
463   ALLOCNO_SPLAY_REMOVED_P (a) = false;
464   ALLOCNO_CONFLICT_VEC_P (a) = false;
465   ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
466   ALLOCNO_COPIES (a) = NULL;
467   ALLOCNO_HARD_REG_COSTS (a) = NULL;
468   ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
469   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
470   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
471   ALLOCNO_LEFT_CONFLICTS_SIZE (a) = -1;
472   ALLOCNO_COVER_CLASS (a) = NO_REGS;
473   ALLOCNO_UPDATED_COVER_CLASS_COST (a) = 0;
474   ALLOCNO_COVER_CLASS_COST (a) = 0;
475   ALLOCNO_MEMORY_COST (a) = 0;
476   ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
477   ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
478   ALLOCNO_NEXT_BUCKET_ALLOCNO (a) = NULL;
479   ALLOCNO_PREV_BUCKET_ALLOCNO (a) = NULL;
480   ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
481   ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
482   ALLOCNO_LIVE_RANGES (a) = NULL;
483   ALLOCNO_MIN (a) = INT_MAX;
484   ALLOCNO_MAX (a) = -1;
485   ALLOCNO_CONFLICT_ID (a) = ira_allocnos_num;
486   VEC_safe_push (ira_allocno_t, heap, allocno_vec, a);
487   ira_allocnos = VEC_address (ira_allocno_t, allocno_vec);
488   ira_allocnos_num = VEC_length (ira_allocno_t, allocno_vec);
489   VEC_safe_push (ira_allocno_t, heap, ira_conflict_id_allocno_map_vec, a);
490   ira_conflict_id_allocno_map
491     = VEC_address (ira_allocno_t, ira_conflict_id_allocno_map_vec);
492   return a;
493 }
494 
495 /* Set up cover class for A and update its conflict hard registers.  */
496 void
497 ira_set_allocno_cover_class (ira_allocno_t a, enum reg_class cover_class)
498 {
499   ALLOCNO_COVER_CLASS (a) = cover_class;
500   IOR_COMPL_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
501 			  reg_class_contents[cover_class]);
502   IOR_COMPL_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
503 			  reg_class_contents[cover_class]);
504 }
505 
506 /* Return TRUE if the conflict vector with NUM elements is more
507    profitable than conflict bit vector for A.  */
508 bool
509 ira_conflict_vector_profitable_p (ira_allocno_t a, int num)
510 {
511   int nw;
512 
513   if (ALLOCNO_MAX (a) < ALLOCNO_MIN (a))
514     /* We prefer bit vector in such case because it does not result in
515        allocation.  */
516     return false;
517 
518   nw = (ALLOCNO_MAX (a) - ALLOCNO_MIN (a) + IRA_INT_BITS) / IRA_INT_BITS;
519   return (2 * sizeof (ira_allocno_t) * (num + 1)
520 	  < 3 * nw * sizeof (IRA_INT_TYPE));
521 }
522 
523 /* Allocates and initialize the conflict vector of A for NUM
524    conflicting allocnos.  */
525 void
526 ira_allocate_allocno_conflict_vec (ira_allocno_t a, int num)
527 {
528   int size;
529   ira_allocno_t *vec;
530 
531   ira_assert (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) == NULL);
532   num++; /* for NULL end marker  */
533   size = sizeof (ira_allocno_t) * num;
534   ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) = ira_allocate (size);
535   vec = (ira_allocno_t *) ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a);
536   vec[0] = NULL;
537   ALLOCNO_CONFLICT_ALLOCNOS_NUM (a) = 0;
538   ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a) = size;
539   ALLOCNO_CONFLICT_VEC_P (a) = true;
540 }
541 
542 /* Allocate and initialize the conflict bit vector of A.  */
543 static void
544 allocate_allocno_conflict_bit_vec (ira_allocno_t a)
545 {
546   unsigned int size;
547 
548   ira_assert (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) == NULL);
549   size = ((ALLOCNO_MAX (a) - ALLOCNO_MIN (a) + IRA_INT_BITS)
550 	  / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
551   ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) = ira_allocate (size);
552   memset (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a), 0, size);
553   ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a) = size;
554   ALLOCNO_CONFLICT_VEC_P (a) = false;
555 }
556 
557 /* Allocate and initialize the conflict vector or conflict bit vector
558    of A for NUM conflicting allocnos whatever is more profitable.  */
559 void
560 ira_allocate_allocno_conflicts (ira_allocno_t a, int num)
561 {
562   if (ira_conflict_vector_profitable_p (a, num))
563     ira_allocate_allocno_conflict_vec (a, num);
564   else
565     allocate_allocno_conflict_bit_vec (a);
566 }
567 
568 /* Add A2 to the conflicts of A1.  */
569 static void
570 add_to_allocno_conflicts (ira_allocno_t a1, ira_allocno_t a2)
571 {
572   int num;
573   unsigned int size;
574 
575   if (ALLOCNO_CONFLICT_VEC_P (a1))
576     {
577       ira_allocno_t *vec;
578 
579       num = ALLOCNO_CONFLICT_ALLOCNOS_NUM (a1) + 2;
580       if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1)
581 	  >=  num * sizeof (ira_allocno_t))
582 	vec = (ira_allocno_t *) ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1);
583       else
584 	{
585 	  size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
586 	  vec = (ira_allocno_t *) ira_allocate (size);
587 	  memcpy (vec, ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1),
588 		  sizeof (ira_allocno_t) * ALLOCNO_CONFLICT_ALLOCNOS_NUM (a1));
589 	  ira_free (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1));
590 	  ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1) = vec;
591 	  ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1) = size;
592 	}
593       vec[num - 2] = a2;
594       vec[num - 1] = NULL;
595       ALLOCNO_CONFLICT_ALLOCNOS_NUM (a1)++;
596     }
597   else
598     {
599       int nw, added_head_nw, id;
600       IRA_INT_TYPE *vec;
601 
602       id = ALLOCNO_CONFLICT_ID (a2);
603       vec = (IRA_INT_TYPE *) ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1);
604       if (ALLOCNO_MIN (a1) > id)
605 	{
606 	  /* Expand head of the bit vector.  */
607 	  added_head_nw = (ALLOCNO_MIN (a1) - id - 1) / IRA_INT_BITS + 1;
608 	  nw = (ALLOCNO_MAX (a1) - ALLOCNO_MIN (a1)) / IRA_INT_BITS + 1;
609 	  size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
610 	  if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1) >= size)
611 	    {
612 	      memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
613 		       vec, nw * sizeof (IRA_INT_TYPE));
614 	      memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
615 	    }
616 	  else
617 	    {
618 	      size
619 		= (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
620 	      vec = (IRA_INT_TYPE *) ira_allocate (size);
621 	      memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
622 		      ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1),
623 		      nw * sizeof (IRA_INT_TYPE));
624 	      memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
625 	      memset ((char *) vec
626 		      + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
627 		      0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
628 	      ira_free (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1));
629 	      ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1) = vec;
630 	      ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1) = size;
631 	    }
632 	  ALLOCNO_MIN (a1) -= added_head_nw * IRA_INT_BITS;
633 	}
634       else if (ALLOCNO_MAX (a1) < id)
635 	{
636 	  nw = (id - ALLOCNO_MIN (a1)) / IRA_INT_BITS + 1;
637 	  size = nw * sizeof (IRA_INT_TYPE);
638 	  if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1) < size)
639 	    {
640 	      /* Expand tail of the bit vector.  */
641 	      size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
642 	      vec = (IRA_INT_TYPE *) ira_allocate (size);
643 	      memcpy (vec, ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1),
644 		      ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1));
645 	      memset ((char *) vec + ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1),
646 		      0, size - ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1));
647 	      ira_free (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1));
648 	      ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1) = vec;
649 	      ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1) = size;
650 	    }
651 	  ALLOCNO_MAX (a1) = id;
652 	}
653       SET_ALLOCNO_SET_BIT (vec, id, ALLOCNO_MIN (a1), ALLOCNO_MAX (a1));
654     }
655 }
656 
657 /* Add A1 to the conflicts of A2 and vise versa.  */
658 void
659 ira_add_allocno_conflict (ira_allocno_t a1, ira_allocno_t a2)
660 {
661   add_to_allocno_conflicts (a1, a2);
662   add_to_allocno_conflicts (a2, a1);
663 }
664 
665 /* Clear all conflicts of allocno A.  */
666 static void
667 clear_allocno_conflicts (ira_allocno_t a)
668 {
669   if (ALLOCNO_CONFLICT_VEC_P (a))
670     {
671       ALLOCNO_CONFLICT_ALLOCNOS_NUM (a) = 0;
672       ((ira_allocno_t *) ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a))[0] = NULL;
673     }
674   else if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a) != 0)
675     {
676       int nw;
677 
678       nw = (ALLOCNO_MAX (a) - ALLOCNO_MIN (a)) / IRA_INT_BITS + 1;
679       memset (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a), 0,
680 	      nw * sizeof (IRA_INT_TYPE));
681     }
682 }
683 
684 /* The array used to find duplications in conflict vectors of
685    allocnos.  */
686 static int *allocno_conflict_check;
687 
688 /* The value used to mark allocation presence in conflict vector of
689    the current allocno.  */
690 static int curr_allocno_conflict_check_tick;
691 
692 /* Remove duplications in conflict vector of A.  */
693 static void
694 compress_allocno_conflict_vec (ira_allocno_t a)
695 {
696   ira_allocno_t *vec, conflict_a;
697   int i, j;
698 
699   ira_assert (ALLOCNO_CONFLICT_VEC_P (a));
700   vec = (ira_allocno_t *) ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a);
701   curr_allocno_conflict_check_tick++;
702   for (i = j = 0; (conflict_a = vec[i]) != NULL; i++)
703     {
704       if (allocno_conflict_check[ALLOCNO_NUM (conflict_a)]
705 	  != curr_allocno_conflict_check_tick)
706 	{
707 	  allocno_conflict_check[ALLOCNO_NUM (conflict_a)]
708 	    = curr_allocno_conflict_check_tick;
709 	  vec[j++] = conflict_a;
710 	}
711     }
712   ALLOCNO_CONFLICT_ALLOCNOS_NUM (a) = j;
713   vec[j] = NULL;
714 }
715 
716 /* Remove duplications in conflict vectors of all allocnos.  */
717 static void
718 compress_conflict_vecs (void)
719 {
720   ira_allocno_t a;
721   ira_allocno_iterator ai;
722 
723   allocno_conflict_check
724     = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
725   memset (allocno_conflict_check, 0, sizeof (int) * ira_allocnos_num);
726   curr_allocno_conflict_check_tick = 0;
727   FOR_EACH_ALLOCNO (a, ai)
728     if (ALLOCNO_CONFLICT_VEC_P (a))
729       compress_allocno_conflict_vec (a);
730   ira_free (allocno_conflict_check);
731 }
732 
733 /* This recursive function outputs allocno A and if it is a cap the
734    function outputs its members.  */
735 void
736 ira_print_expanded_allocno (ira_allocno_t a)
737 {
738   basic_block bb;
739 
740   fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
741   if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
742     fprintf (ira_dump_file, ",b%d", bb->index);
743   else
744     fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
745   if (ALLOCNO_CAP_MEMBER (a) != NULL)
746     {
747       fprintf (ira_dump_file, ":");
748       ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
749     }
750   fprintf (ira_dump_file, ")");
751 }
752 
753 /* Create and return the cap representing allocno A in the
754    parent loop.  */
755 static ira_allocno_t
756 create_cap_allocno (ira_allocno_t a)
757 {
758   ira_allocno_t cap;
759   ira_loop_tree_node_t parent;
760   enum reg_class cover_class;
761 
762   ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
763 	      && ALLOCNO_NEXT_COALESCED_ALLOCNO (a) == a);
764   parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
765   cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
766   ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
767   cover_class = ALLOCNO_COVER_CLASS (a);
768   ira_set_allocno_cover_class (cap, cover_class);
769   ALLOCNO_AVAILABLE_REGS_NUM (cap) = ALLOCNO_AVAILABLE_REGS_NUM (a);
770   ALLOCNO_CAP_MEMBER (cap) = a;
771   ALLOCNO_CAP (a) = cap;
772   ALLOCNO_COVER_CLASS_COST (cap) = ALLOCNO_COVER_CLASS_COST (a);
773   ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
774   ira_allocate_and_copy_costs
775     (&ALLOCNO_HARD_REG_COSTS (cap), cover_class, ALLOCNO_HARD_REG_COSTS (a));
776   ira_allocate_and_copy_costs
777     (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), cover_class,
778      ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
779   ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
780   ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
781   ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
782   ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
783   IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (cap),
784 		    ALLOCNO_CONFLICT_HARD_REGS (a));
785   IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (cap),
786 		    ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
787   ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
788 #ifdef STACK_REGS
789   ALLOCNO_NO_STACK_REG_P (cap) = ALLOCNO_NO_STACK_REG_P (a);
790   ALLOCNO_TOTAL_NO_STACK_REG_P (cap) = ALLOCNO_TOTAL_NO_STACK_REG_P (a);
791 #endif
792   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
793     {
794       fprintf (ira_dump_file, "    Creating cap ");
795       ira_print_expanded_allocno (cap);
796       fprintf (ira_dump_file, "\n");
797     }
798   return cap;
799 }
800 
801 /* Create and return allocno live range with given attributes.  */
802 allocno_live_range_t
803 ira_create_allocno_live_range (ira_allocno_t a, int start, int finish,
804 			       allocno_live_range_t next)
805 {
806   allocno_live_range_t p;
807 
808   p = (allocno_live_range_t) pool_alloc (allocno_live_range_pool);
809   p->allocno = a;
810   p->start = start;
811   p->finish = finish;
812   p->next = next;
813   return p;
814 }
815 
816 /* Copy allocno live range R and return the result.  */
817 static allocno_live_range_t
818 copy_allocno_live_range (allocno_live_range_t r)
819 {
820   allocno_live_range_t p;
821 
822   p = (allocno_live_range_t) pool_alloc (allocno_live_range_pool);
823   *p = *r;
824   return p;
825 }
826 
827 /* Copy allocno live range list given by its head R and return the
828    result.  */
829 allocno_live_range_t
830 ira_copy_allocno_live_range_list (allocno_live_range_t r)
831 {
832   allocno_live_range_t p, first, last;
833 
834   if (r == NULL)
835     return NULL;
836   for (first = last = NULL; r != NULL; r = r->next)
837     {
838       p = copy_allocno_live_range (r);
839       if (first == NULL)
840 	first = p;
841       else
842 	last->next = p;
843       last = p;
844     }
845   return first;
846 }
847 
848 /* Merge ranges R1 and R2 and returns the result.  The function
849    maintains the order of ranges and tries to minimize number of the
850    result ranges.  */
851 allocno_live_range_t
852 ira_merge_allocno_live_ranges (allocno_live_range_t r1,
853 			       allocno_live_range_t r2)
854 {
855   allocno_live_range_t first, last, temp;
856 
857   if (r1 == NULL)
858     return r2;
859   if (r2 == NULL)
860     return r1;
861   for (first = last = NULL; r1 != NULL && r2 != NULL;)
862     {
863       if (r1->start < r2->start)
864 	{
865 	  temp = r1;
866 	  r1 = r2;
867 	  r2 = temp;
868 	}
869       if (r1->start <= r2->finish + 1)
870 	{
871 	  /* Intersected ranges: merge r1 and r2 into r1.  */
872 	  r1->start = r2->start;
873 	  if (r1->finish < r2->finish)
874 	    r1->finish = r2->finish;
875 	  temp = r2;
876 	  r2 = r2->next;
877 	  ira_finish_allocno_live_range (temp);
878 	  if (r2 == NULL)
879 	    {
880 	      /* To try to merge with subsequent ranges in r1.  */
881 	      r2 = r1->next;
882 	      r1->next = NULL;
883 	    }
884 	}
885       else
886 	{
887 	  /* Add r1 to the result.  */
888 	  if (first == NULL)
889 	    first = last = r1;
890 	  else
891 	    {
892 	      last->next = r1;
893 	      last = r1;
894 	    }
895 	  r1 = r1->next;
896 	  if (r1 == NULL)
897 	    {
898 	      /* To try to merge with subsequent ranges in r2.  */
899 	      r1 = r2->next;
900 	      r2->next = NULL;
901 	    }
902 	}
903     }
904   if (r1 != NULL)
905     {
906       if (first == NULL)
907 	first = r1;
908       else
909 	last->next = r1;
910       ira_assert (r1->next == NULL);
911     }
912   else if (r2 != NULL)
913     {
914       if (first == NULL)
915 	first = r2;
916       else
917 	last->next = r2;
918       ira_assert (r2->next == NULL);
919     }
920   else
921     {
922       ira_assert (last->next == NULL);
923     }
924   return first;
925 }
926 
927 /* Return TRUE if live ranges R1 and R2 intersect.  */
928 bool
929 ira_allocno_live_ranges_intersect_p (allocno_live_range_t r1,
930 				     allocno_live_range_t r2)
931 {
932   /* Remember the live ranges are always kept ordered.  */
933   while (r1 != NULL && r2 != NULL)
934     {
935       if (r1->start > r2->finish)
936 	r1 = r1->next;
937       else if (r2->start > r1->finish)
938 	r2 = r2->next;
939       else
940 	return true;
941     }
942   return false;
943 }
944 
945 /* Free allocno live range R.  */
946 void
947 ira_finish_allocno_live_range (allocno_live_range_t r)
948 {
949   pool_free (allocno_live_range_pool, r);
950 }
951 
952 /* Free list of allocno live ranges starting with R.  */
953 void
954 ira_finish_allocno_live_range_list (allocno_live_range_t r)
955 {
956   allocno_live_range_t next_r;
957 
958   for (; r != NULL; r = next_r)
959     {
960       next_r = r->next;
961       ira_finish_allocno_live_range (r);
962     }
963 }
964 
965 /* Free updated register costs of allocno A.  */
966 void
967 ira_free_allocno_updated_costs (ira_allocno_t a)
968 {
969   enum reg_class cover_class;
970 
971   cover_class = ALLOCNO_COVER_CLASS (a);
972   if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
973     ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
974   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
975   if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
976     ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
977 			  cover_class);
978   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
979 }
980 
981 /* Free the memory allocated for allocno A.  */
982 static void
983 finish_allocno (ira_allocno_t a)
984 {
985   enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
986 
987   ira_allocnos[ALLOCNO_NUM (a)] = NULL;
988   ira_conflict_id_allocno_map[ALLOCNO_CONFLICT_ID (a)] = NULL;
989   if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) != NULL)
990     ira_free (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a));
991   if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
992     ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), cover_class);
993   if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
994     ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class);
995   if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
996     ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
997   if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
998     ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
999 			  cover_class);
1000   ira_finish_allocno_live_range_list (ALLOCNO_LIVE_RANGES (a));
1001   pool_free (allocno_pool, a);
1002 }
1003 
1004 /* Free the memory allocated for all allocnos.  */
1005 static void
1006 finish_allocnos (void)
1007 {
1008   ira_allocno_t a;
1009   ira_allocno_iterator ai;
1010 
1011   FOR_EACH_ALLOCNO (a, ai)
1012     finish_allocno (a);
1013   ira_free (ira_regno_allocno_map);
1014   VEC_free (ira_allocno_t, heap, ira_conflict_id_allocno_map_vec);
1015   VEC_free (ira_allocno_t, heap, allocno_vec);
1016   free_alloc_pool (allocno_pool);
1017   free_alloc_pool (allocno_live_range_pool);
1018 }
1019 
1020 
1021 
1022 /* Pools for copies.  */
1023 static alloc_pool copy_pool;
1024 
1025 /* Vec containing references to all created copies.  It is a
1026    container of array ira_copies.  */
1027 static VEC(ira_copy_t,heap) *copy_vec;
1028 
1029 /* The function initializes data concerning allocno copies.  */
1030 static void
1031 initiate_copies (void)
1032 {
1033   copy_pool
1034     = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
1035   copy_vec = VEC_alloc (ira_copy_t, heap, get_max_uid ());
1036   ira_copies = NULL;
1037   ira_copies_num = 0;
1038 }
1039 
1040 /* Return copy connecting A1 and A2 and originated from INSN of
1041    LOOP_TREE_NODE if any.  */
1042 static ira_copy_t
1043 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx insn,
1044 		   ira_loop_tree_node_t loop_tree_node)
1045 {
1046   ira_copy_t cp, next_cp;
1047   ira_allocno_t another_a;
1048 
1049   for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1050     {
1051       if (cp->first == a1)
1052 	{
1053 	  next_cp = cp->next_first_allocno_copy;
1054 	  another_a = cp->second;
1055 	}
1056       else if (cp->second == a1)
1057 	{
1058 	  next_cp = cp->next_second_allocno_copy;
1059 	  another_a = cp->first;
1060 	}
1061       else
1062 	gcc_unreachable ();
1063       if (another_a == a2 && cp->insn == insn
1064 	  && cp->loop_tree_node == loop_tree_node)
1065 	return cp;
1066     }
1067   return NULL;
1068 }
1069 
1070 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1071    SECOND, FREQ, CONSTRAINT_P, and INSN.  */
1072 ira_copy_t
1073 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1074 		 bool constraint_p, rtx insn,
1075 		 ira_loop_tree_node_t loop_tree_node)
1076 {
1077   ira_copy_t cp;
1078 
1079   cp = (ira_copy_t) pool_alloc (copy_pool);
1080   cp->num = ira_copies_num;
1081   cp->first = first;
1082   cp->second = second;
1083   cp->freq = freq;
1084   cp->constraint_p = constraint_p;
1085   cp->insn = insn;
1086   cp->loop_tree_node = loop_tree_node;
1087   VEC_safe_push (ira_copy_t, heap, copy_vec, cp);
1088   ira_copies = VEC_address (ira_copy_t, copy_vec);
1089   ira_copies_num = VEC_length (ira_copy_t, copy_vec);
1090   return cp;
1091 }
1092 
1093 /* Attach a copy CP to allocnos involved into the copy.  */
1094 void
1095 ira_add_allocno_copy_to_list (ira_copy_t cp)
1096 {
1097   ira_allocno_t first = cp->first, second = cp->second;
1098 
1099   cp->prev_first_allocno_copy = NULL;
1100   cp->prev_second_allocno_copy = NULL;
1101   cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1102   if (cp->next_first_allocno_copy != NULL)
1103     {
1104       if (cp->next_first_allocno_copy->first == first)
1105 	cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1106       else
1107 	cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1108     }
1109   cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1110   if (cp->next_second_allocno_copy != NULL)
1111     {
1112       if (cp->next_second_allocno_copy->second == second)
1113 	cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1114       else
1115 	cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1116     }
1117   ALLOCNO_COPIES (first) = cp;
1118   ALLOCNO_COPIES (second) = cp;
1119 }
1120 
1121 /* Detach a copy CP from allocnos involved into the copy.  */
1122 void
1123 ira_remove_allocno_copy_from_list (ira_copy_t cp)
1124 {
1125   ira_allocno_t first = cp->first, second = cp->second;
1126   ira_copy_t prev, next;
1127 
1128   next = cp->next_first_allocno_copy;
1129   prev = cp->prev_first_allocno_copy;
1130   if (prev == NULL)
1131     ALLOCNO_COPIES (first) = next;
1132   else if (prev->first == first)
1133     prev->next_first_allocno_copy = next;
1134   else
1135     prev->next_second_allocno_copy = next;
1136   if (next != NULL)
1137     {
1138       if (next->first == first)
1139 	next->prev_first_allocno_copy = prev;
1140       else
1141 	next->prev_second_allocno_copy = prev;
1142     }
1143   cp->prev_first_allocno_copy = cp->next_first_allocno_copy = NULL;
1144 
1145   next = cp->next_second_allocno_copy;
1146   prev = cp->prev_second_allocno_copy;
1147   if (prev == NULL)
1148     ALLOCNO_COPIES (second) = next;
1149   else if (prev->second == second)
1150     prev->next_second_allocno_copy = next;
1151   else
1152     prev->next_first_allocno_copy = next;
1153   if (next != NULL)
1154     {
1155       if (next->second == second)
1156 	next->prev_second_allocno_copy = prev;
1157       else
1158 	next->prev_first_allocno_copy = prev;
1159     }
1160   cp->prev_second_allocno_copy = cp->next_second_allocno_copy = NULL;
1161 }
1162 
1163 /* Make a copy CP a canonical copy where number of the
1164    first allocno is less than the second one.  */
1165 void
1166 ira_swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1167 {
1168   ira_allocno_t temp;
1169   ira_copy_t temp_cp;
1170 
1171   if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1172     return;
1173 
1174   temp = cp->first;
1175   cp->first = cp->second;
1176   cp->second = temp;
1177 
1178   temp_cp = cp->prev_first_allocno_copy;
1179   cp->prev_first_allocno_copy = cp->prev_second_allocno_copy;
1180   cp->prev_second_allocno_copy = temp_cp;
1181 
1182   temp_cp = cp->next_first_allocno_copy;
1183   cp->next_first_allocno_copy = cp->next_second_allocno_copy;
1184   cp->next_second_allocno_copy = temp_cp;
1185 }
1186 
1187 /* Create (or update frequency if the copy already exists) and return
1188    the copy of allocnos FIRST and SECOND with frequency FREQ
1189    corresponding to move insn INSN (if any) and originated from
1190    LOOP_TREE_NODE.  */
1191 ira_copy_t
1192 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1193 		      bool constraint_p, rtx insn,
1194 		      ira_loop_tree_node_t loop_tree_node)
1195 {
1196   ira_copy_t cp;
1197 
1198   if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1199     {
1200       cp->freq += freq;
1201       return cp;
1202     }
1203   cp = ira_create_copy (first, second, freq, constraint_p, insn,
1204 			loop_tree_node);
1205   ira_assert (first != NULL && second != NULL);
1206   ira_add_allocno_copy_to_list (cp);
1207   ira_swap_allocno_copy_ends_if_necessary (cp);
1208   return cp;
1209 }
1210 
1211 /* Print info about copy CP into file F.  */
1212 static void
1213 print_copy (FILE *f, ira_copy_t cp)
1214 {
1215   fprintf (f, "  cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1216 	   ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1217 	   ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1218 	   cp->insn != NULL
1219 	   ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1220 }
1221 
1222 /* Print info about copy CP into stderr.  */
1223 void
1224 ira_debug_copy (ira_copy_t cp)
1225 {
1226   print_copy (stderr, cp);
1227 }
1228 
1229 /* Print info about all copies into file F.  */
1230 static void
1231 print_copies (FILE *f)
1232 {
1233   ira_copy_t cp;
1234   ira_copy_iterator ci;
1235 
1236   FOR_EACH_COPY (cp, ci)
1237     print_copy (f, cp);
1238 }
1239 
1240 /* Print info about all copies into stderr.  */
1241 void
1242 ira_debug_copies (void)
1243 {
1244   print_copies (stderr);
1245 }
1246 
1247 /* Print info about copies involving allocno A into file F.  */
1248 static void
1249 print_allocno_copies (FILE *f, ira_allocno_t a)
1250 {
1251   ira_allocno_t another_a;
1252   ira_copy_t cp, next_cp;
1253 
1254   fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1255   for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1256     {
1257       if (cp->first == a)
1258 	{
1259 	  next_cp = cp->next_first_allocno_copy;
1260 	  another_a = cp->second;
1261 	}
1262       else if (cp->second == a)
1263 	{
1264 	  next_cp = cp->next_second_allocno_copy;
1265 	  another_a = cp->first;
1266 	}
1267       else
1268 	gcc_unreachable ();
1269       fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1270 	       ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1271     }
1272   fprintf (f, "\n");
1273 }
1274 
1275 /* Print info about copies involving allocno A into stderr.  */
1276 void
1277 ira_debug_allocno_copies (ira_allocno_t a)
1278 {
1279   print_allocno_copies (stderr, a);
1280 }
1281 
1282 /* The function frees memory allocated for copy CP.  */
1283 static void
1284 finish_copy (ira_copy_t cp)
1285 {
1286   pool_free (copy_pool, cp);
1287 }
1288 
1289 
1290 /* Free memory allocated for all copies.  */
1291 static void
1292 finish_copies (void)
1293 {
1294   ira_copy_t cp;
1295   ira_copy_iterator ci;
1296 
1297   FOR_EACH_COPY (cp, ci)
1298     finish_copy (cp);
1299   VEC_free (ira_copy_t, heap, copy_vec);
1300   free_alloc_pool (copy_pool);
1301 }
1302 
1303 
1304 
1305 /* Pools for cost vectors.  It is defined only for cover classes.  */
1306 static alloc_pool cost_vector_pool[N_REG_CLASSES];
1307 
1308 /* The function initiates work with hard register cost vectors.  It
1309    creates allocation pool for each cover class.  */
1310 static void
1311 initiate_cost_vectors (void)
1312 {
1313   int i;
1314   enum reg_class cover_class;
1315 
1316   for (i = 0; i < ira_reg_class_cover_size; i++)
1317     {
1318       cover_class = ira_reg_class_cover[i];
1319       cost_vector_pool[cover_class]
1320 	= create_alloc_pool ("cost vectors",
1321 			     sizeof (int)
1322 			     * ira_class_hard_regs_num[cover_class],
1323 			     100);
1324     }
1325 }
1326 
1327 /* Allocate and return a cost vector VEC for COVER_CLASS.  */
1328 int *
1329 ira_allocate_cost_vector (enum reg_class cover_class)
1330 {
1331   return (int *) pool_alloc (cost_vector_pool[cover_class]);
1332 }
1333 
1334 /* Free a cost vector VEC for COVER_CLASS.  */
1335 void
1336 ira_free_cost_vector (int *vec, enum reg_class cover_class)
1337 {
1338   ira_assert (vec != NULL);
1339   pool_free (cost_vector_pool[cover_class], vec);
1340 }
1341 
1342 /* Finish work with hard register cost vectors.  Release allocation
1343    pool for each cover class.  */
1344 static void
1345 finish_cost_vectors (void)
1346 {
1347   int i;
1348   enum reg_class cover_class;
1349 
1350   for (i = 0; i < ira_reg_class_cover_size; i++)
1351     {
1352       cover_class = ira_reg_class_cover[i];
1353       free_alloc_pool (cost_vector_pool[cover_class]);
1354     }
1355 }
1356 
1357 
1358 
1359 /* The current loop tree node and its regno allocno map.  */
1360 ira_loop_tree_node_t ira_curr_loop_tree_node;
1361 ira_allocno_t *ira_curr_regno_allocno_map;
1362 
1363 /* This recursive function traverses loop tree with root LOOP_NODE
1364    calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1365    correspondingly in preorder and postorder.  The function sets up
1366    IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP.  If BB_P,
1367    basic block nodes of LOOP_NODE is also processed (before its
1368    subloop nodes).  */
1369 void
1370 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1371 			void (*preorder_func) (ira_loop_tree_node_t),
1372 			void (*postorder_func) (ira_loop_tree_node_t))
1373 {
1374   ira_loop_tree_node_t subloop_node;
1375 
1376   ira_assert (loop_node->bb == NULL);
1377   ira_curr_loop_tree_node = loop_node;
1378   ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1379 
1380   if (preorder_func != NULL)
1381     (*preorder_func) (loop_node);
1382 
1383   if (bb_p)
1384     for (subloop_node = loop_node->children;
1385 	 subloop_node != NULL;
1386 	 subloop_node = subloop_node->next)
1387       if (subloop_node->bb != NULL)
1388 	{
1389 	  if (preorder_func != NULL)
1390 	    (*preorder_func) (subloop_node);
1391 
1392 	  if (postorder_func != NULL)
1393 	    (*postorder_func) (subloop_node);
1394 	}
1395 
1396   for (subloop_node = loop_node->subloops;
1397        subloop_node != NULL;
1398        subloop_node = subloop_node->subloop_next)
1399     {
1400       ira_assert (subloop_node->bb == NULL);
1401       ira_traverse_loop_tree (bb_p, subloop_node,
1402 			      preorder_func, postorder_func);
1403     }
1404 
1405   ira_curr_loop_tree_node = loop_node;
1406   ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1407 
1408   if (postorder_func != NULL)
1409     (*postorder_func) (loop_node);
1410 }
1411 
1412 
1413 
1414 /* The basic block currently being processed.  */
1415 static basic_block curr_bb;
1416 
1417 /* This recursive function creates allocnos corresponding to
1418    pseudo-registers containing in X.  True OUTPUT_P means that X is
1419    a lvalue.  */
1420 static void
1421 create_insn_allocnos (rtx x, bool output_p)
1422 {
1423   int i, j;
1424   const char *fmt;
1425   enum rtx_code code = GET_CODE (x);
1426 
1427   if (code == REG)
1428     {
1429       int regno;
1430 
1431       if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1432 	{
1433 	  ira_allocno_t a;
1434 
1435 	  if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1436 	    a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1437 
1438 	  ALLOCNO_NREFS (a)++;
1439 	  ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1440 	  if (output_p)
1441 	    bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1442 	}
1443       return;
1444     }
1445   else if (code == SET)
1446     {
1447       create_insn_allocnos (SET_DEST (x), true);
1448       create_insn_allocnos (SET_SRC (x), false);
1449       return;
1450     }
1451   else if (code == CLOBBER)
1452     {
1453       create_insn_allocnos (XEXP (x, 0), true);
1454       return;
1455     }
1456   else if (code == MEM)
1457     {
1458       create_insn_allocnos (XEXP (x, 0), false);
1459       return;
1460     }
1461   else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1462 	   code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1463     {
1464       create_insn_allocnos (XEXP (x, 0), true);
1465       create_insn_allocnos (XEXP (x, 0), false);
1466       return;
1467     }
1468 
1469   fmt = GET_RTX_FORMAT (code);
1470   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1471     {
1472       if (fmt[i] == 'e')
1473 	create_insn_allocnos (XEXP (x, i), output_p);
1474       else if (fmt[i] == 'E')
1475 	for (j = 0; j < XVECLEN (x, i); j++)
1476 	  create_insn_allocnos (XVECEXP (x, i, j), output_p);
1477     }
1478 }
1479 
1480 /* Create allocnos corresponding to pseudo-registers living in the
1481    basic block represented by the corresponding loop tree node
1482    BB_NODE.  */
1483 static void
1484 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1485 {
1486   basic_block bb;
1487   rtx insn;
1488   unsigned int i;
1489   bitmap_iterator bi;
1490 
1491   curr_bb = bb = bb_node->bb;
1492   ira_assert (bb != NULL);
1493   FOR_BB_INSNS_REVERSE (bb, insn)
1494     if (NONDEBUG_INSN_P (insn))
1495       create_insn_allocnos (PATTERN (insn), false);
1496   /* It might be a allocno living through from one subloop to
1497      another.  */
1498   EXECUTE_IF_SET_IN_REG_SET (DF_LR_IN (bb), FIRST_PSEUDO_REGISTER, i, bi)
1499     if (ira_curr_regno_allocno_map[i] == NULL)
1500       ira_create_allocno (i, false, ira_curr_loop_tree_node);
1501 }
1502 
1503 /* Create allocnos corresponding to pseudo-registers living on edge E
1504    (a loop entry or exit).  Also mark the allocnos as living on the
1505    loop border. */
1506 static void
1507 create_loop_allocnos (edge e)
1508 {
1509   unsigned int i;
1510   bitmap live_in_regs, border_allocnos;
1511   bitmap_iterator bi;
1512   ira_loop_tree_node_t parent;
1513 
1514   live_in_regs = DF_LR_IN (e->dest);
1515   border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1516   EXECUTE_IF_SET_IN_REG_SET (DF_LR_OUT (e->src),
1517 			     FIRST_PSEUDO_REGISTER, i, bi)
1518     if (bitmap_bit_p (live_in_regs, i))
1519       {
1520 	if (ira_curr_regno_allocno_map[i] == NULL)
1521 	  {
1522 	    /* The order of creations is important for right
1523 	       ira_regno_allocno_map.  */
1524 	    if ((parent = ira_curr_loop_tree_node->parent) != NULL
1525 		&& parent->regno_allocno_map[i] == NULL)
1526 	      ira_create_allocno (i, false, parent);
1527 	    ira_create_allocno (i, false, ira_curr_loop_tree_node);
1528 	  }
1529 	bitmap_set_bit (border_allocnos,
1530 			ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1531       }
1532 }
1533 
1534 /* Create allocnos corresponding to pseudo-registers living in loop
1535    represented by the corresponding loop tree node LOOP_NODE.  This
1536    function is called by ira_traverse_loop_tree.  */
1537 static void
1538 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1539 {
1540   if (loop_node->bb != NULL)
1541     create_bb_allocnos (loop_node);
1542   else if (loop_node != ira_loop_tree_root)
1543     {
1544       int i;
1545       edge_iterator ei;
1546       edge e;
1547       VEC (edge, heap) *edges;
1548 
1549       FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1550 	if (e->src != loop_node->loop->latch)
1551 	  create_loop_allocnos (e);
1552 
1553       edges = get_loop_exit_edges (loop_node->loop);
1554       for (i = 0; VEC_iterate (edge, edges, i, e); i++)
1555 	create_loop_allocnos (e);
1556       VEC_free (edge, heap, edges);
1557     }
1558 }
1559 
1560 /* Propagate information about allocnos modified inside the loop given
1561    by its LOOP_TREE_NODE to its parent.  */
1562 static void
1563 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1564 {
1565   if (loop_tree_node == ira_loop_tree_root)
1566     return;
1567   ira_assert (loop_tree_node->bb == NULL);
1568   bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1569 		   loop_tree_node->modified_regnos);
1570 }
1571 
1572 /* Propagate new info about allocno A (see comments about accumulated
1573    info in allocno definition) to the corresponding allocno on upper
1574    loop tree level.  So allocnos on upper levels accumulate
1575    information about the corresponding allocnos in nested regions.
1576    The new info means allocno info finally calculated in this
1577    file.  */
1578 static void
1579 propagate_allocno_info (void)
1580 {
1581   int i;
1582   ira_allocno_t a, parent_a;
1583   ira_loop_tree_node_t parent;
1584   enum reg_class cover_class;
1585 
1586   if (flag_ira_region != IRA_REGION_ALL
1587       && flag_ira_region != IRA_REGION_MIXED)
1588     return;
1589   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1590     for (a = ira_regno_allocno_map[i];
1591 	 a != NULL;
1592 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1593       if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1594 	  && (parent_a = parent->regno_allocno_map[i]) != NULL
1595 	  /* There are no caps yet at this point.  So use
1596 	     border_allocnos to find allocnos for the propagation.  */
1597 	  && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
1598 			   ALLOCNO_NUM (a)))
1599 	{
1600 	  if (! ALLOCNO_BAD_SPILL_P (a))
1601 	    ALLOCNO_BAD_SPILL_P (parent_a) = false;
1602 	  ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
1603 	  ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
1604 	  ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
1605 #ifdef STACK_REGS
1606 	  if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
1607 	    ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a) = true;
1608 #endif
1609 	  IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (parent_a),
1610 			    ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
1611 	  ALLOCNO_CALLS_CROSSED_NUM (parent_a)
1612 	    += ALLOCNO_CALLS_CROSSED_NUM (a);
1613 	  ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
1614 	    += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1615 	  cover_class = ALLOCNO_COVER_CLASS (a);
1616 	  ira_assert (cover_class == ALLOCNO_COVER_CLASS (parent_a));
1617 	  ira_allocate_and_accumulate_costs
1618 	    (&ALLOCNO_HARD_REG_COSTS (parent_a), cover_class,
1619 	     ALLOCNO_HARD_REG_COSTS (a));
1620 	  ira_allocate_and_accumulate_costs
1621 	    (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
1622 	     cover_class,
1623 	     ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
1624 	  ALLOCNO_COVER_CLASS_COST (parent_a)
1625 	    += ALLOCNO_COVER_CLASS_COST (a);
1626 	  ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
1627 	}
1628 }
1629 
1630 /* Create allocnos corresponding to pseudo-registers in the current
1631    function.  Traverse the loop tree for this.  */
1632 static void
1633 create_allocnos (void)
1634 {
1635   /* We need to process BB first to correctly link allocnos by member
1636      next_regno_allocno.  */
1637   ira_traverse_loop_tree (true, ira_loop_tree_root,
1638 			  create_loop_tree_node_allocnos, NULL);
1639   if (optimize)
1640     ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
1641 			    propagate_modified_regnos);
1642 }
1643 
1644 
1645 
1646 /* The page contains function to remove some regions from a separate
1647    register allocation.  We remove regions whose separate allocation
1648    will hardly improve the result.  As a result we speed up regional
1649    register allocation.  */
1650 
1651 /* The function changes allocno in range list given by R onto A.  */
1652 static void
1653 change_allocno_in_range_list (allocno_live_range_t r, ira_allocno_t a)
1654 {
1655   for (; r != NULL; r = r->next)
1656     r->allocno = a;
1657 }
1658 
1659 /* Return TRUE if NODE represents a loop with low register
1660    pressure.  */
1661 static bool
1662 low_pressure_loop_node_p (ira_loop_tree_node_t node)
1663 {
1664   int i;
1665   enum reg_class cover_class;
1666 
1667   if (node->bb != NULL)
1668     return false;
1669 
1670   for (i = 0; i < ira_reg_class_cover_size; i++)
1671     {
1672       cover_class = ira_reg_class_cover[i];
1673       if (node->reg_pressure[cover_class]
1674 	  > ira_available_class_regs[cover_class])
1675 	return false;
1676     }
1677   return true;
1678 }
1679 
1680 /* Sort loops for marking them for removal.  We put already marked
1681    loops first, then less frequent loops next, and then outer loops
1682    next.  */
1683 static int
1684 loop_compare_func (const void *v1p, const void *v2p)
1685 {
1686   int diff;
1687   ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
1688   ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
1689 
1690   ira_assert (l1->parent != NULL && l2->parent != NULL);
1691   if (l1->to_remove_p && ! l2->to_remove_p)
1692     return -1;
1693   if (! l1->to_remove_p && l2->to_remove_p)
1694     return 1;
1695   if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
1696     return diff;
1697   if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
1698     return diff;
1699   /* Make sorting stable.  */
1700   return l1->loop->num - l2->loop->num;
1701 }
1702 
1703 
1704 /* Mark loops which should be removed from regional allocation.  We
1705    remove a loop with low register pressure inside another loop with
1706    register pressure.  In this case a separate allocation of the loop
1707    hardly helps (for irregular register file architecture it could
1708    help by choosing a better hard register in the loop but we prefer
1709    faster allocation even in this case).  We also remove cheap loops
1710    if there are more than IRA_MAX_LOOPS_NUM of them.  */
1711 static void
1712 mark_loops_for_removal (void)
1713 {
1714   int i, n;
1715   ira_loop_tree_node_t *sorted_loops;
1716   loop_p loop;
1717 
1718   sorted_loops
1719     = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
1720 					     * VEC_length (loop_p,
1721 							   ira_loops.larray));
1722   for (n = i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
1723     if (ira_loop_nodes[i].regno_allocno_map != NULL)
1724       {
1725 	if (ira_loop_nodes[i].parent == NULL)
1726 	  {
1727 	    /* Don't remove the root.  */
1728 	    ira_loop_nodes[i].to_remove_p = false;
1729 	    continue;
1730 	  }
1731 	sorted_loops[n++] = &ira_loop_nodes[i];
1732 	ira_loop_nodes[i].to_remove_p
1733 	  = (low_pressure_loop_node_p (ira_loop_nodes[i].parent)
1734 	     && low_pressure_loop_node_p (&ira_loop_nodes[i]));
1735       }
1736   qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
1737   for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
1738     {
1739       sorted_loops[i]->to_remove_p = true;
1740       if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1741 	fprintf
1742 	  (ira_dump_file,
1743 	   "  Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
1744 	   sorted_loops[i]->loop->num, sorted_loops[i]->loop->header->index,
1745 	   sorted_loops[i]->loop->header->frequency,
1746 	   loop_depth (sorted_loops[i]->loop),
1747 	   low_pressure_loop_node_p (sorted_loops[i]->parent)
1748 	   && low_pressure_loop_node_p (sorted_loops[i])
1749 	   ? "low pressure" : "cheap loop");
1750     }
1751   ira_free (sorted_loops);
1752 }
1753 
1754 /* Mark all loops but root for removing.  */
1755 static void
1756 mark_all_loops_for_removal (void)
1757 {
1758   int i;
1759   loop_p loop;
1760 
1761   for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
1762     if (ira_loop_nodes[i].regno_allocno_map != NULL)
1763       {
1764 	if (ira_loop_nodes[i].parent == NULL)
1765 	  {
1766 	    /* Don't remove the root.  */
1767 	    ira_loop_nodes[i].to_remove_p = false;
1768 	    continue;
1769 	  }
1770 	ira_loop_nodes[i].to_remove_p = true;
1771 	if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1772 	  fprintf
1773 	    (ira_dump_file,
1774 	     "  Mark loop %d (header %d, freq %d, depth %d) for removal\n",
1775 	     ira_loop_nodes[i].loop->num,
1776 	     ira_loop_nodes[i].loop->header->index,
1777 	     ira_loop_nodes[i].loop->header->frequency,
1778 	     loop_depth (ira_loop_nodes[i].loop));
1779       }
1780 }
1781 
1782 /* Definition of vector of loop tree nodes.  */
1783 DEF_VEC_P(ira_loop_tree_node_t);
1784 DEF_VEC_ALLOC_P(ira_loop_tree_node_t, heap);
1785 
1786 /* Vec containing references to all removed loop tree nodes.  */
1787 static VEC(ira_loop_tree_node_t,heap) *removed_loop_vec;
1788 
1789 /* Vec containing references to all children of loop tree nodes.  */
1790 static VEC(ira_loop_tree_node_t,heap) *children_vec;
1791 
1792 /* Remove subregions of NODE if their separate allocation will not
1793    improve the result.  */
1794 static void
1795 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
1796 {
1797   unsigned int start;
1798   bool remove_p;
1799   ira_loop_tree_node_t subnode;
1800 
1801   remove_p = node->to_remove_p;
1802   if (! remove_p)
1803     VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, node);
1804   start = VEC_length (ira_loop_tree_node_t, children_vec);
1805   for (subnode = node->children; subnode != NULL; subnode = subnode->next)
1806     if (subnode->bb == NULL)
1807       remove_uneccesary_loop_nodes_from_loop_tree (subnode);
1808     else
1809       VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, subnode);
1810   node->children = node->subloops = NULL;
1811   if (remove_p)
1812     {
1813       VEC_safe_push (ira_loop_tree_node_t, heap, removed_loop_vec, node);
1814       return;
1815     }
1816   while (VEC_length (ira_loop_tree_node_t, children_vec) > start)
1817     {
1818       subnode = VEC_pop (ira_loop_tree_node_t, children_vec);
1819       subnode->parent = node;
1820       subnode->next = node->children;
1821       node->children = subnode;
1822       if (subnode->bb == NULL)
1823 	{
1824 	  subnode->subloop_next = node->subloops;
1825 	  node->subloops = subnode;
1826 	}
1827     }
1828 }
1829 
1830 /* Return TRUE if NODE is inside PARENT.  */
1831 static bool
1832 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
1833 {
1834   for (node = node->parent; node != NULL; node = node->parent)
1835     if (node == parent)
1836       return true;
1837   return false;
1838 }
1839 
1840 /* Sort allocnos according to their order in regno allocno list.  */
1841 static int
1842 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
1843 {
1844   ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
1845   ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
1846   ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
1847   ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
1848 
1849   if (loop_is_inside_p (n1, n2))
1850     return -1;
1851   else if (loop_is_inside_p (n2, n1))
1852     return 1;
1853   /* If allocnos are equally good, sort by allocno numbers, so that
1854      the results of qsort leave nothing to chance.  We put allocnos
1855      with higher number first in the list because it is the original
1856      order for allocnos from loops on the same levels.  */
1857   return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
1858 }
1859 
1860 /* This array is used to sort allocnos to restore allocno order in
1861    the regno allocno list.  */
1862 static ira_allocno_t *regno_allocnos;
1863 
1864 /* Restore allocno order for REGNO in the regno allocno list.  */
1865 static void
1866 ira_rebuild_regno_allocno_list (int regno)
1867 {
1868   int i, n;
1869   ira_allocno_t a;
1870 
1871   for (n = 0, a = ira_regno_allocno_map[regno];
1872        a != NULL;
1873        a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1874     regno_allocnos[n++] = a;
1875   ira_assert (n > 0);
1876   qsort (regno_allocnos, n, sizeof (ira_allocno_t),
1877 	 regno_allocno_order_compare_func);
1878   for (i = 1; i < n; i++)
1879     ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
1880   ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
1881   ira_regno_allocno_map[regno] = regno_allocnos[0];
1882   if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1883     fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
1884 }
1885 
1886 /* Propagate info from allocno FROM_A to allocno A.  */
1887 static void
1888 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
1889 {
1890   enum reg_class cover_class;
1891 
1892   IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
1893 		    ALLOCNO_CONFLICT_HARD_REGS (from_a));
1894 #ifdef STACK_REGS
1895   if (ALLOCNO_NO_STACK_REG_P (from_a))
1896     ALLOCNO_NO_STACK_REG_P (a) = true;
1897 #endif
1898   ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
1899   ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
1900   ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
1901   IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
1902 		    ALLOCNO_TOTAL_CONFLICT_HARD_REGS (from_a));
1903   ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
1904   ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
1905     += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
1906   if (! ALLOCNO_BAD_SPILL_P (from_a))
1907     ALLOCNO_BAD_SPILL_P (a) = false;
1908 #ifdef STACK_REGS
1909   if (ALLOCNO_TOTAL_NO_STACK_REG_P (from_a))
1910     ALLOCNO_TOTAL_NO_STACK_REG_P (a) = true;
1911 #endif
1912   cover_class = ALLOCNO_COVER_CLASS (from_a);
1913   ira_assert (cover_class == ALLOCNO_COVER_CLASS (a));
1914   ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
1915 				     ALLOCNO_HARD_REG_COSTS (from_a));
1916   ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
1917 				     cover_class,
1918 				     ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
1919   ALLOCNO_COVER_CLASS_COST (a) += ALLOCNO_COVER_CLASS_COST (from_a);
1920   ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
1921 }
1922 
1923 /* Remove allocnos from loops removed from the allocation
1924    consideration.  */
1925 static void
1926 remove_unnecessary_allocnos (void)
1927 {
1928   int regno;
1929   bool merged_p, rebuild_p;
1930   ira_allocno_t a, prev_a, next_a, parent_a;
1931   ira_loop_tree_node_t a_node, parent;
1932   allocno_live_range_t r;
1933 
1934   merged_p = false;
1935   regno_allocnos = NULL;
1936   for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1937     {
1938       rebuild_p = false;
1939       for (prev_a = NULL, a = ira_regno_allocno_map[regno];
1940 	   a != NULL;
1941 	   a = next_a)
1942 	{
1943 	  next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
1944 	  a_node = ALLOCNO_LOOP_TREE_NODE (a);
1945 	  if (! a_node->to_remove_p)
1946 	    prev_a = a;
1947 	  else
1948 	    {
1949 	      for (parent = a_node->parent;
1950 		   (parent_a = parent->regno_allocno_map[regno]) == NULL
1951 		     && parent->to_remove_p;
1952 		   parent = parent->parent)
1953 		;
1954 	      if (parent_a == NULL)
1955 		{
1956 		  /* There are no allocnos with the same regno in
1957 		     upper region -- just move the allocno to the
1958 		     upper region.  */
1959 		  prev_a = a;
1960 		  ALLOCNO_LOOP_TREE_NODE (a) = parent;
1961 		  parent->regno_allocno_map[regno] = a;
1962 		  bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
1963 		  rebuild_p = true;
1964 		}
1965 	      else
1966 		{
1967 		  /* Remove the allocno and update info of allocno in
1968 		     the upper region.  */
1969 		  if (prev_a == NULL)
1970 		    ira_regno_allocno_map[regno] = next_a;
1971 		  else
1972 		    ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
1973 		  r = ALLOCNO_LIVE_RANGES (a);
1974 		  change_allocno_in_range_list (r, parent_a);
1975 		  ALLOCNO_LIVE_RANGES (parent_a)
1976 		    = ira_merge_allocno_live_ranges
1977 		      (r, ALLOCNO_LIVE_RANGES (parent_a));
1978 		  merged_p = true;
1979 		  ALLOCNO_LIVE_RANGES (a) = NULL;
1980 		  propagate_some_info_from_allocno (parent_a, a);
1981 		  /* Remove it from the corresponding regno allocno
1982 		     map to avoid info propagation of subsequent
1983 		     allocno into this already removed allocno.  */
1984 		  a_node->regno_allocno_map[regno] = NULL;
1985 		  finish_allocno (a);
1986 		}
1987 	    }
1988 	}
1989       if (rebuild_p)
1990 	/* We need to restore the order in regno allocno list.  */
1991 	{
1992 	  if (regno_allocnos == NULL)
1993 	    regno_allocnos
1994 	      = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
1995 						* ira_allocnos_num);
1996 	  ira_rebuild_regno_allocno_list (regno);
1997 	}
1998     }
1999   if (merged_p)
2000     ira_rebuild_start_finish_chains ();
2001   if (regno_allocnos != NULL)
2002     ira_free (regno_allocnos);
2003 }
2004 
2005 /* Remove allocnos from all loops but the root.  */
2006 static void
2007 remove_low_level_allocnos (void)
2008 {
2009   int regno;
2010   bool merged_p, propagate_p;
2011   ira_allocno_t a, top_a;
2012   ira_loop_tree_node_t a_node, parent;
2013   allocno_live_range_t r;
2014   ira_allocno_iterator ai;
2015 
2016   merged_p = false;
2017   FOR_EACH_ALLOCNO (a, ai)
2018     {
2019       a_node = ALLOCNO_LOOP_TREE_NODE (a);
2020       if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2021 	continue;
2022       regno = ALLOCNO_REGNO (a);
2023       if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2024 	{
2025 	  ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2026 	  ira_loop_tree_root->regno_allocno_map[regno] = a;
2027 	  continue;
2028 	}
2029       propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2030       /* Remove the allocno and update info of allocno in the upper
2031 	 region.  */
2032       r = ALLOCNO_LIVE_RANGES (a);
2033       change_allocno_in_range_list (r, top_a);
2034       ALLOCNO_LIVE_RANGES (top_a)
2035 	= ira_merge_allocno_live_ranges (r, ALLOCNO_LIVE_RANGES (top_a));
2036       merged_p = true;
2037       ALLOCNO_LIVE_RANGES (a) = NULL;
2038       if (propagate_p)
2039 	propagate_some_info_from_allocno (top_a, a);
2040     }
2041   FOR_EACH_ALLOCNO (a, ai)
2042     {
2043       a_node = ALLOCNO_LOOP_TREE_NODE (a);
2044       if (a_node == ira_loop_tree_root)
2045 	continue;
2046       parent = a_node->parent;
2047       regno = ALLOCNO_REGNO (a);
2048       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2049 	ira_assert (ALLOCNO_CAP (a) != NULL);
2050       else if (ALLOCNO_CAP (a) == NULL)
2051  	ira_assert (parent->regno_allocno_map[regno] != NULL);
2052     }
2053   FOR_EACH_ALLOCNO (a, ai)
2054     {
2055       regno = ALLOCNO_REGNO (a);
2056       if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2057 	{
2058 	  ira_regno_allocno_map[regno] = a;
2059 	  ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2060 	  ALLOCNO_CAP_MEMBER (a) = NULL;
2061 	  COPY_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
2062 			     ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
2063 #ifdef STACK_REGS
2064 	  if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2065 	    ALLOCNO_NO_STACK_REG_P (a) = true;
2066 #endif
2067 	}
2068       else
2069 	finish_allocno (a);
2070     }
2071   if (merged_p)
2072     ira_rebuild_start_finish_chains ();
2073 }
2074 
2075 /* Remove loops from consideration.  We remove all loops except for
2076    root if ALL_P or loops for which a separate allocation will not
2077    improve the result.  We have to do this after allocno creation and
2078    their costs and cover class evaluation because only after that the
2079    register pressure can be known and is calculated.  */
2080 static void
2081 remove_unnecessary_regions (bool all_p)
2082 {
2083   if (all_p)
2084     mark_all_loops_for_removal ();
2085   else
2086     mark_loops_for_removal ();
2087   children_vec
2088     = VEC_alloc (ira_loop_tree_node_t, heap,
2089 		 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2090   removed_loop_vec
2091     = VEC_alloc (ira_loop_tree_node_t, heap,
2092 		 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2093   remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root) ;
2094   VEC_free (ira_loop_tree_node_t, heap, children_vec);
2095   if (all_p)
2096     remove_low_level_allocnos ();
2097   else
2098     remove_unnecessary_allocnos ();
2099   while (VEC_length (ira_loop_tree_node_t, removed_loop_vec) > 0)
2100     finish_loop_tree_node (VEC_pop (ira_loop_tree_node_t, removed_loop_vec));
2101   VEC_free (ira_loop_tree_node_t, heap, removed_loop_vec);
2102 }
2103 
2104 
2105 
2106 /* At this point true value of allocno attribute bad_spill_p means
2107    that there is an insn where allocno occurs and where the allocno
2108    can not be used as memory.  The function updates the attribute, now
2109    it can be true only for allocnos which can not be used as memory in
2110    an insn and in whose live ranges there is other allocno deaths.
2111    Spilling allocnos with true value will not improve the code because
2112    it will not make other allocnos colorable and additional reloads
2113    for the corresponding pseudo will be generated in reload pass for
2114    each insn it occurs.
2115 
2116    This is a trick mentioned in one classic article of Chaitin etc
2117    which is frequently omitted in other implementations of RA based on
2118    graph coloring.  */
2119 static void
2120 update_bad_spill_attribute (void)
2121 {
2122   int i;
2123   ira_allocno_t a;
2124   ira_allocno_iterator ai;
2125   allocno_live_range_t r;
2126   enum reg_class cover_class;
2127   bitmap_head dead_points[N_REG_CLASSES];
2128 
2129   for (i = 0; i < ira_reg_class_cover_size; i++)
2130     {
2131       cover_class = ira_reg_class_cover[i];
2132       bitmap_initialize (&dead_points[cover_class], &reg_obstack);
2133     }
2134   FOR_EACH_ALLOCNO (a, ai)
2135     {
2136       cover_class = ALLOCNO_COVER_CLASS (a);
2137       if (cover_class == NO_REGS)
2138 	continue;
2139       for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
2140 	bitmap_set_bit (&dead_points[cover_class], r->finish);
2141     }
2142   FOR_EACH_ALLOCNO (a, ai)
2143     {
2144       cover_class = ALLOCNO_COVER_CLASS (a);
2145       if (cover_class == NO_REGS)
2146 	continue;
2147       if (! ALLOCNO_BAD_SPILL_P (a))
2148 	continue;
2149       for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
2150 	{
2151 	  for (i = r->start + 1; i < r->finish; i++)
2152 	    if (bitmap_bit_p (&dead_points[cover_class], i))
2153 	      break;
2154 	  if (i < r->finish)
2155 	    break;
2156 	}
2157       if (r != NULL)
2158 	ALLOCNO_BAD_SPILL_P (a) = false;
2159     }
2160   for (i = 0; i < ira_reg_class_cover_size; i++)
2161     {
2162       cover_class = ira_reg_class_cover[i];
2163       bitmap_clear (&dead_points[cover_class]);
2164     }
2165 }
2166 
2167 
2168 
2169 /* Set up minimal and maximal live range points for allocnos.  */
2170 static void
2171 setup_min_max_allocno_live_range_point (void)
2172 {
2173   int i;
2174   ira_allocno_t a, parent_a, cap;
2175   ira_allocno_iterator ai;
2176   allocno_live_range_t r;
2177   ira_loop_tree_node_t parent;
2178 
2179   FOR_EACH_ALLOCNO (a, ai)
2180     {
2181       r = ALLOCNO_LIVE_RANGES (a);
2182       if (r == NULL)
2183 	continue;
2184       ALLOCNO_MAX (a) = r->finish;
2185       for (; r->next != NULL; r = r->next)
2186 	;
2187       ALLOCNO_MIN (a) = r->start;
2188     }
2189   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2190     for (a = ira_regno_allocno_map[i];
2191 	 a != NULL;
2192 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2193       {
2194 	if (ALLOCNO_MAX (a) < 0)
2195 	  continue;
2196 	ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2197 	/* Accumulation of range info.  */
2198 	if (ALLOCNO_CAP (a) != NULL)
2199 	  {
2200 	    for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2201 	      {
2202 		if (ALLOCNO_MAX (cap) < ALLOCNO_MAX (a))
2203 		  ALLOCNO_MAX (cap) = ALLOCNO_MAX (a);
2204 		if (ALLOCNO_MIN (cap) > ALLOCNO_MIN (a))
2205 		  ALLOCNO_MIN (cap) = ALLOCNO_MIN (a);
2206 	      }
2207 	    continue;
2208 	  }
2209 	if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2210 	  continue;
2211 	parent_a = parent->regno_allocno_map[i];
2212 	if (ALLOCNO_MAX (parent_a) < ALLOCNO_MAX (a))
2213 	  ALLOCNO_MAX (parent_a) = ALLOCNO_MAX (a);
2214 	if (ALLOCNO_MIN (parent_a) > ALLOCNO_MIN (a))
2215 	  ALLOCNO_MIN (parent_a) = ALLOCNO_MIN (a);
2216       }
2217 #ifdef ENABLE_IRA_CHECKING
2218   FOR_EACH_ALLOCNO (a, ai)
2219     {
2220       if ((0 <= ALLOCNO_MIN (a) && ALLOCNO_MIN (a) <= ira_max_point)
2221 	  && (0 <= ALLOCNO_MAX (a) && ALLOCNO_MAX (a) <= ira_max_point))
2222 	continue;
2223       gcc_unreachable ();
2224     }
2225 #endif
2226 }
2227 
2228 /* Sort allocnos according to their live ranges.  Allocnos with
2229    smaller cover class are put first unless we use priority coloring.
2230    Allocnos with the same cove class are ordered according their start
2231    (min).  Allocnos with the same start are ordered according their
2232    finish (max).  */
2233 static int
2234 allocno_range_compare_func (const void *v1p, const void *v2p)
2235 {
2236   int diff;
2237   ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2238   ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2239 
2240   if (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
2241       && (diff = ALLOCNO_COVER_CLASS (a1) - ALLOCNO_COVER_CLASS (a2)) != 0)
2242     return diff;
2243   if ((diff = ALLOCNO_MIN (a1) - ALLOCNO_MIN (a2)) != 0)
2244     return diff;
2245   if ((diff = ALLOCNO_MAX (a1) - ALLOCNO_MAX (a2)) != 0)
2246      return diff;
2247   return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2248 }
2249 
2250 /* Sort ira_conflict_id_allocno_map and set up conflict id of
2251    allocnos.  */
2252 static void
2253 sort_conflict_id_allocno_map (void)
2254 {
2255   int i, num;
2256   ira_allocno_t a;
2257   ira_allocno_iterator ai;
2258 
2259   num = 0;
2260   FOR_EACH_ALLOCNO (a, ai)
2261     ira_conflict_id_allocno_map[num++] = a;
2262   qsort (ira_conflict_id_allocno_map, num, sizeof (ira_allocno_t),
2263 	 allocno_range_compare_func);
2264   for (i = 0; i < num; i++)
2265     if ((a = ira_conflict_id_allocno_map[i]) != NULL)
2266       ALLOCNO_CONFLICT_ID (a) = i;
2267   for (i = num; i < ira_allocnos_num; i++)
2268     ira_conflict_id_allocno_map[i] = NULL;
2269 }
2270 
2271 /* Set up minimal and maximal conflict ids of allocnos with which
2272    given allocno can conflict.  */
2273 static void
2274 setup_min_max_conflict_allocno_ids (void)
2275 {
2276   int cover_class;
2277   int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2278   int *live_range_min, *last_lived;
2279   ira_allocno_t a;
2280 
2281   live_range_min = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
2282   cover_class = -1;
2283   first_not_finished = -1;
2284   for (i = 0; i < ira_allocnos_num; i++)
2285     {
2286       a = ira_conflict_id_allocno_map[i];
2287       if (a == NULL)
2288 	continue;
2289       if (cover_class < 0
2290 	  || (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
2291 	      && cover_class != (int) ALLOCNO_COVER_CLASS (a)))
2292 	{
2293 	  cover_class = ALLOCNO_COVER_CLASS (a);
2294 	  min = i;
2295 	  first_not_finished = i;
2296 	}
2297       else
2298 	{
2299 	  start = ALLOCNO_MIN (a);
2300 	  /* If we skip an allocno, the allocno with smaller ids will
2301 	     be also skipped because of the secondary sorting the
2302 	     range finishes (see function
2303 	     allocno_range_compare_func).  */
2304 	  while (first_not_finished < i
2305 		 && start > ALLOCNO_MAX (ira_conflict_id_allocno_map
2306 					 [first_not_finished]))
2307 	    first_not_finished++;
2308 	  min = first_not_finished;
2309 	}
2310       if (min == i)
2311 	/* We could increase min further in this case but it is good
2312 	   enough.  */
2313 	min++;
2314       live_range_min[i] = ALLOCNO_MIN (a);
2315       ALLOCNO_MIN (a) = min;
2316     }
2317   last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2318   cover_class = -1;
2319   filled_area_start = -1;
2320   for (i = ira_allocnos_num - 1; i >= 0; i--)
2321     {
2322       a = ira_conflict_id_allocno_map[i];
2323       if (a == NULL)
2324 	continue;
2325       if (cover_class < 0
2326 	  || (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
2327 	      && cover_class != (int) ALLOCNO_COVER_CLASS (a)))
2328 	{
2329 	  cover_class = ALLOCNO_COVER_CLASS (a);
2330 	  for (j = 0; j < ira_max_point; j++)
2331 	    last_lived[j] = -1;
2332 	  filled_area_start = ira_max_point;
2333 	}
2334       min = live_range_min[i];
2335       finish = ALLOCNO_MAX (a);
2336       max = last_lived[finish];
2337       if (max < 0)
2338 	/* We could decrease max further in this case but it is good
2339 	   enough.  */
2340 	max = ALLOCNO_CONFLICT_ID (a) - 1;
2341       ALLOCNO_MAX (a) = max;
2342       /* In filling, we can go further A range finish to recognize
2343 	 intersection quickly because if the finish of subsequently
2344 	 processed allocno (it has smaller conflict id) range is
2345 	 further A range finish than they are definitely intersected
2346 	 (the reason for this is the allocnos with bigger conflict id
2347 	 have their range starts not smaller than allocnos with
2348 	 smaller ids.  */
2349       for (j = min; j < filled_area_start; j++)
2350 	last_lived[j] = i;
2351       filled_area_start = min;
2352     }
2353   ira_free (last_lived);
2354   ira_free (live_range_min);
2355 }
2356 
2357 
2358 
2359 static void
2360 create_caps (void)
2361 {
2362   ira_allocno_t a;
2363   ira_allocno_iterator ai;
2364   ira_loop_tree_node_t loop_tree_node;
2365 
2366   FOR_EACH_ALLOCNO (a, ai)
2367     {
2368       if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2369 	continue;
2370       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2371 	create_cap_allocno (a);
2372       else if (ALLOCNO_CAP (a) == NULL)
2373 	{
2374 	  loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2375 	  if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2376 	    create_cap_allocno (a);
2377 	}
2378     }
2379 }
2380 
2381 
2382 
2383 /* The page contains code transforming more one region internal
2384    representation (IR) to one region IR which is necessary for reload.
2385    This transformation is called IR flattening.  We might just rebuild
2386    the IR for one region but we don't do it because it takes a lot of
2387    time.  */
2388 
2389 /* Map: regno -> allocnos which will finally represent the regno for
2390    IR with one region.  */
2391 static ira_allocno_t *regno_top_level_allocno_map;
2392 
2393 /* Process all allocnos originated from pseudo REGNO and copy live
2394    ranges, hard reg conflicts, and allocno stack reg attributes from
2395    low level allocnos to final allocnos which are destinations of
2396    removed stores at a loop exit.  Return true if we copied live
2397    ranges.  */
2398 static bool
2399 copy_info_to_removed_store_destinations (int regno)
2400 {
2401   ira_allocno_t a;
2402   ira_allocno_t parent_a = NULL;
2403   ira_loop_tree_node_t parent;
2404   allocno_live_range_t r;
2405   bool merged_p;
2406 
2407   merged_p = false;
2408   for (a = ira_regno_allocno_map[regno];
2409        a != NULL;
2410        a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2411     {
2412       if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))])
2413 	/* This allocno will be removed.  */
2414 	continue;
2415       /* Caps will be removed.  */
2416       ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2417       for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2418 	   parent != NULL;
2419 	   parent = parent->parent)
2420 	if ((parent_a = parent->regno_allocno_map[regno]) == NULL
2421 	    || (parent_a == regno_top_level_allocno_map[REGNO (ALLOCNO_REG
2422 							       (parent_a))]
2423 		&& ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)))
2424 	  break;
2425       if (parent == NULL || parent_a == NULL)
2426 	continue;
2427       if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2428 	{
2429 	  fprintf
2430 	    (ira_dump_file,
2431 	     "      Coping ranges of a%dr%d to a%dr%d: ",
2432 	     ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)),
2433 	     ALLOCNO_NUM (parent_a), REGNO (ALLOCNO_REG (parent_a)));
2434 	  ira_print_live_range_list (ira_dump_file,
2435 				     ALLOCNO_LIVE_RANGES (a));
2436 	}
2437       r = ira_copy_allocno_live_range_list (ALLOCNO_LIVE_RANGES (a));
2438       change_allocno_in_range_list (r, parent_a);
2439       ALLOCNO_LIVE_RANGES (parent_a)
2440 	= ira_merge_allocno_live_ranges (r, ALLOCNO_LIVE_RANGES (parent_a));
2441       IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (parent_a),
2442 			ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
2443 #ifdef STACK_REGS
2444       if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2445 	ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a) = true;
2446 #endif
2447       ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2448       ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2449 	+= ALLOCNO_CALLS_CROSSED_NUM (a);
2450       ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2451 	+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2452       merged_p = true;
2453     }
2454   return merged_p;
2455 }
2456 
2457 /* Flatten the IR.  In other words, this function transforms IR as if
2458    it were built with one region (without loops).  We could make it
2459    much simpler by rebuilding IR with one region, but unfortunately it
2460    takes a lot of time.  MAX_REGNO_BEFORE_EMIT and
2461    IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
2462    IRA_MAX_POINT before emitting insns on the loop borders.  */
2463 void
2464 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
2465 {
2466   int i, j, num;
2467   bool keep_p;
2468   int hard_regs_num;
2469   bool new_pseudos_p, merged_p, mem_dest_p;
2470   unsigned int n;
2471   enum reg_class cover_class;
2472   ira_allocno_t a, parent_a, first, second, node_first, node_second;
2473   ira_copy_t cp;
2474   ira_loop_tree_node_t parent, node;
2475   allocno_live_range_t r;
2476   ira_allocno_iterator ai;
2477   ira_copy_iterator ci;
2478   sparseset allocnos_live;
2479 
2480   regno_top_level_allocno_map
2481     = (ira_allocno_t *) ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
2482   memset (regno_top_level_allocno_map, 0,
2483 	  max_reg_num () * sizeof (ira_allocno_t));
2484   new_pseudos_p = merged_p = false;
2485   FOR_EACH_ALLOCNO (a, ai)
2486     {
2487       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2488 	/* Caps are not in the regno allocno maps and they are never
2489 	   will be transformed into allocnos existing after IR
2490 	   flattening.  */
2491 	continue;
2492       COPY_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
2493 			 ALLOCNO_CONFLICT_HARD_REGS (a));
2494 #ifdef STACK_REGS
2495       ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
2496 #endif
2497     }
2498   /* Fix final allocno attributes.  */
2499   for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2500     {
2501       mem_dest_p = false;
2502       for (a = ira_regno_allocno_map[i];
2503 	   a != NULL;
2504 	   a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2505 	{
2506 	  ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2507 	  if (ALLOCNO_SOMEWHERE_RENAMED_P (a))
2508 	    new_pseudos_p = true;
2509 	  if (ALLOCNO_CAP (a) != NULL
2510 	      || (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
2511 	      || ((parent_a = parent->regno_allocno_map[ALLOCNO_REGNO (a)])
2512 		  == NULL))
2513 	    {
2514 	      ALLOCNO_COPIES (a) = NULL;
2515 	      regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))] = a;
2516 	      continue;
2517 	    }
2518 	  ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
2519 
2520 	  if (ALLOCNO_MEM_OPTIMIZED_DEST (a) != NULL)
2521 	    mem_dest_p = true;
2522 	  if (REGNO (ALLOCNO_REG (a)) == REGNO (ALLOCNO_REG (parent_a)))
2523 	    {
2524 	      IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (parent_a),
2525 				ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
2526 #ifdef STACK_REGS
2527 	      if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2528 		ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a) = true;
2529 #endif
2530 	      if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2531 		{
2532 		  fprintf (ira_dump_file,
2533 			   "      Moving ranges of a%dr%d to a%dr%d: ",
2534 			   ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)),
2535 			   ALLOCNO_NUM (parent_a),
2536 			   REGNO (ALLOCNO_REG (parent_a)));
2537 		  ira_print_live_range_list (ira_dump_file,
2538 					     ALLOCNO_LIVE_RANGES (a));
2539 		}
2540 	      change_allocno_in_range_list (ALLOCNO_LIVE_RANGES (a), parent_a);
2541 	      ALLOCNO_LIVE_RANGES (parent_a)
2542 		= ira_merge_allocno_live_ranges
2543 		  (ALLOCNO_LIVE_RANGES (a), ALLOCNO_LIVE_RANGES (parent_a));
2544 	      merged_p = true;
2545 	      ALLOCNO_LIVE_RANGES (a) = NULL;
2546 	      ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)
2547 		= (ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)
2548 		   || ALLOCNO_MEM_OPTIMIZED_DEST_P (a));
2549 	      continue;
2550 	    }
2551 	  new_pseudos_p = true;
2552 	  for (;;)
2553 	    {
2554 	      ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
2555 	      ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
2556 	      ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
2557 	      ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2558 		-= ALLOCNO_CALLS_CROSSED_NUM (a);
2559 	      ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2560 		-= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2561 	      ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
2562 			  && ALLOCNO_NREFS (parent_a) >= 0
2563 			  && ALLOCNO_FREQ (parent_a) >= 0);
2564 	      cover_class = ALLOCNO_COVER_CLASS (parent_a);
2565 	      hard_regs_num = ira_class_hard_regs_num[cover_class];
2566 	      if (ALLOCNO_HARD_REG_COSTS (a) != NULL
2567 		  && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
2568 		for (j = 0; j < hard_regs_num; j++)
2569 		  ALLOCNO_HARD_REG_COSTS (parent_a)[j]
2570 		    -= ALLOCNO_HARD_REG_COSTS (a)[j];
2571 	      if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
2572 		  && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
2573 		for (j = 0; j < hard_regs_num; j++)
2574 		  ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
2575 		    -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
2576 	      ALLOCNO_COVER_CLASS_COST (parent_a)
2577 		-= ALLOCNO_COVER_CLASS_COST (a);
2578 	      ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
2579 	      if (ALLOCNO_CAP (parent_a) != NULL
2580 		  || (parent
2581 		      = ALLOCNO_LOOP_TREE_NODE (parent_a)->parent) == NULL
2582 		  || (parent_a = (parent->regno_allocno_map
2583 				  [ALLOCNO_REGNO (parent_a)])) == NULL)
2584 		break;
2585 	    }
2586 	  ALLOCNO_COPIES (a) = NULL;
2587 	  regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))] = a;
2588 	}
2589       if (mem_dest_p && copy_info_to_removed_store_destinations (i))
2590 	merged_p = true;
2591     }
2592   ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
2593   if (merged_p || ira_max_point_before_emit != ira_max_point)
2594     ira_rebuild_start_finish_chains ();
2595   if (new_pseudos_p)
2596     {
2597       /* Rebuild conflicts.  */
2598       FOR_EACH_ALLOCNO (a, ai)
2599 	{
2600 	  if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
2601 	      || ALLOCNO_CAP_MEMBER (a) != NULL)
2602 	    continue;
2603 	  for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
2604 	    ira_assert (r->allocno == a);
2605 	  clear_allocno_conflicts (a);
2606 	}
2607       allocnos_live = sparseset_alloc (ira_allocnos_num);
2608       for (i = 0; i < ira_max_point; i++)
2609 	{
2610 	  for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
2611 	    {
2612 	      a = r->allocno;
2613 	      if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
2614 		  || ALLOCNO_CAP_MEMBER (a) != NULL)
2615 		continue;
2616 	      num = ALLOCNO_NUM (a);
2617 	      cover_class = ALLOCNO_COVER_CLASS (a);
2618 	      sparseset_set_bit (allocnos_live, num);
2619 	      EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, n)
2620 		{
2621 		  ira_allocno_t live_a = ira_allocnos[n];
2622 
2623 		  if (ira_reg_classes_intersect_p
2624 		      [cover_class][ALLOCNO_COVER_CLASS (live_a)]
2625 		      /* Don't set up conflict for the allocno with itself.  */
2626 		      && num != (int) n)
2627 		    ira_add_allocno_conflict (a, live_a);
2628 		}
2629 	    }
2630 
2631 	  for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
2632 	    sparseset_clear_bit (allocnos_live, ALLOCNO_NUM (r->allocno));
2633 	}
2634       sparseset_free (allocnos_live);
2635       compress_conflict_vecs ();
2636     }
2637   /* Mark some copies for removing and change allocnos in the rest
2638      copies.  */
2639   FOR_EACH_COPY (cp, ci)
2640     {
2641       if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
2642 	  || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
2643 	{
2644 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2645 	    fprintf
2646 	      (ira_dump_file, "      Remove cp%d:%c%dr%d-%c%dr%d\n",
2647 	       cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
2648 	       ALLOCNO_NUM (cp->first), REGNO (ALLOCNO_REG (cp->first)),
2649 	       ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
2650 	       ALLOCNO_NUM (cp->second), REGNO (ALLOCNO_REG (cp->second)));
2651 	  cp->loop_tree_node = NULL;
2652 	  continue;
2653 	}
2654       first = regno_top_level_allocno_map[REGNO (ALLOCNO_REG (cp->first))];
2655       second = regno_top_level_allocno_map[REGNO (ALLOCNO_REG (cp->second))];
2656       node = cp->loop_tree_node;
2657       if (node == NULL)
2658 	keep_p = true; /* It copy generated in ira-emit.c.  */
2659       else
2660 	{
2661 	  /* Check that the copy was not propagated from level on
2662 	     which we will have different pseudos.  */
2663 	  node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
2664 	  node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
2665 	  keep_p = ((REGNO (ALLOCNO_REG (first))
2666 		     == REGNO (ALLOCNO_REG (node_first)))
2667 		     && (REGNO (ALLOCNO_REG (second))
2668 			 == REGNO (ALLOCNO_REG (node_second))));
2669 	}
2670       if (keep_p)
2671 	{
2672 	  cp->loop_tree_node = ira_loop_tree_root;
2673 	  cp->first = first;
2674 	  cp->second = second;
2675 	}
2676       else
2677 	{
2678 	  cp->loop_tree_node = NULL;
2679 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2680 	    fprintf (ira_dump_file, "      Remove cp%d:a%dr%d-a%dr%d\n",
2681 		     cp->num, ALLOCNO_NUM (cp->first),
2682 		     REGNO (ALLOCNO_REG (cp->first)), ALLOCNO_NUM (cp->second),
2683 		     REGNO (ALLOCNO_REG (cp->second)));
2684 	}
2685     }
2686   /* Remove unnecessary allocnos on lower levels of the loop tree.  */
2687   FOR_EACH_ALLOCNO (a, ai)
2688     {
2689       if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
2690 	  || ALLOCNO_CAP_MEMBER (a) != NULL)
2691 	{
2692 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2693 	    fprintf (ira_dump_file, "      Remove a%dr%d\n",
2694 		     ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)));
2695 	  finish_allocno (a);
2696 	  continue;
2697 	}
2698       ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2699       ALLOCNO_REGNO (a) = REGNO (ALLOCNO_REG (a));
2700       ALLOCNO_CAP (a) = NULL;
2701       /* Restore updated costs for assignments from reload.  */
2702       ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
2703       ALLOCNO_UPDATED_COVER_CLASS_COST (a) = ALLOCNO_COVER_CLASS_COST (a);
2704       if (! ALLOCNO_ASSIGNED_P (a))
2705 	ira_free_allocno_updated_costs (a);
2706       ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2707       ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2708     }
2709   /* Remove unnecessary copies.  */
2710   FOR_EACH_COPY (cp, ci)
2711     {
2712       if (cp->loop_tree_node == NULL)
2713 	{
2714 	  ira_copies[cp->num] = NULL;
2715 	  finish_copy (cp);
2716 	  continue;
2717 	}
2718       ira_assert
2719 	(ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
2720 	 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
2721       ira_add_allocno_copy_to_list (cp);
2722       ira_swap_allocno_copy_ends_if_necessary (cp);
2723     }
2724   rebuild_regno_allocno_maps ();
2725   if (ira_max_point != ira_max_point_before_emit)
2726     ira_compress_allocno_live_ranges ();
2727   ira_free (regno_top_level_allocno_map);
2728 }
2729 
2730 
2731 
2732 #ifdef ENABLE_IRA_CHECKING
2733 /* Check creation of all allocnos.  Allocnos on lower levels should
2734    have allocnos or caps on all upper levels.  */
2735 static void
2736 check_allocno_creation (void)
2737 {
2738   ira_allocno_t a;
2739   ira_allocno_iterator ai;
2740   ira_loop_tree_node_t loop_tree_node;
2741 
2742   FOR_EACH_ALLOCNO (a, ai)
2743     {
2744       loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2745       ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
2746 				ALLOCNO_NUM (a)));
2747       if (loop_tree_node == ira_loop_tree_root)
2748 	continue;
2749       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2750 	ira_assert (ALLOCNO_CAP (a) != NULL);
2751       else if (ALLOCNO_CAP (a) == NULL)
2752 	ira_assert (loop_tree_node->parent
2753 		    ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
2754 		    && bitmap_bit_p (loop_tree_node->border_allocnos,
2755 				     ALLOCNO_NUM (a)));
2756     }
2757 }
2758 #endif
2759 
2760 /* Create a internal representation (IR) for IRA (allocnos, copies,
2761    loop tree nodes).  If LOOPS_P is FALSE the nodes corresponding to
2762    the loops (except the root which corresponds the all function) and
2763    correspondingly allocnos for the loops will be not created.  Such
2764    parameter value is used for Chaitin-Briggs coloring.  The function
2765    returns TRUE if we generate loop structure (besides nodes
2766    representing all function and the basic blocks) for regional
2767    allocation.  A true return means that we really need to flatten IR
2768    before the reload.  */
2769 bool
2770 ira_build (bool loops_p)
2771 {
2772   df_analyze ();
2773 
2774   initiate_cost_vectors ();
2775   initiate_allocnos ();
2776   initiate_copies ();
2777   create_loop_tree_nodes (loops_p);
2778   form_loop_tree ();
2779   create_allocnos ();
2780   ira_costs ();
2781   ira_create_allocno_live_ranges ();
2782   remove_unnecessary_regions (false);
2783   ira_compress_allocno_live_ranges ();
2784   update_bad_spill_attribute ();
2785   loops_p = more_one_region_p ();
2786   if (loops_p)
2787     {
2788       propagate_allocno_info ();
2789       create_caps ();
2790     }
2791   ira_tune_allocno_costs_and_cover_classes ();
2792 #ifdef ENABLE_IRA_CHECKING
2793   check_allocno_creation ();
2794 #endif
2795   setup_min_max_allocno_live_range_point ();
2796   sort_conflict_id_allocno_map ();
2797   setup_min_max_conflict_allocno_ids ();
2798   ira_build_conflicts ();
2799   if (! ira_conflicts_p)
2800     {
2801       ira_allocno_t a;
2802       ira_allocno_iterator ai;
2803 
2804       /* Remove all regions but root one.  */
2805       if (loops_p)
2806 	{
2807 	  remove_unnecessary_regions (true);
2808 	  loops_p = false;
2809 	}
2810       /* We don't save hard registers around calls for fast allocation
2811 	 -- add caller clobbered registers as conflicting ones to
2812 	 allocno crossing calls.  */
2813       FOR_EACH_ALLOCNO (a, ai)
2814 	if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
2815 	  {
2816 	    IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
2817 			      call_used_reg_set);
2818 	    IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
2819 			      call_used_reg_set);
2820 	  }
2821     }
2822   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2823     print_copies (ira_dump_file);
2824   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2825     {
2826       int n, nr;
2827       ira_allocno_t a;
2828       allocno_live_range_t r;
2829       ira_allocno_iterator ai;
2830 
2831       n = 0;
2832       FOR_EACH_ALLOCNO (a, ai)
2833 	n += ALLOCNO_CONFLICT_ALLOCNOS_NUM (a);
2834       nr = 0;
2835       FOR_EACH_ALLOCNO (a, ai)
2836 	for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
2837 	  nr++;
2838       fprintf (ira_dump_file, "  regions=%d, blocks=%d, points=%d\n",
2839 	       VEC_length (loop_p, ira_loops.larray), n_basic_blocks,
2840 	       ira_max_point);
2841       fprintf (ira_dump_file,
2842 	       "    allocnos=%d, copies=%d, conflicts=%d, ranges=%d\n",
2843 	       ira_allocnos_num, ira_copies_num, n, nr);
2844     }
2845   return loops_p;
2846 }
2847 
2848 /* Release the data created by function ira_build.  */
2849 void
2850 ira_destroy (void)
2851 {
2852   finish_loop_tree_nodes ();
2853   finish_copies ();
2854   finish_allocnos ();
2855   finish_cost_vectors ();
2856   ira_finish_allocno_live_ranges ();
2857 }
2858