xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/ira-build.c (revision d536862b7d93d77932ef5de7eebdc48d76921b77)
1 /* Building internal representation for IRA.
2    Copyright (C) 2006-2019 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "predict.h"
28 #include "df.h"
29 #include "insn-config.h"
30 #include "regs.h"
31 #include "memmodel.h"
32 #include "ira.h"
33 #include "ira-int.h"
34 #include "params.h"
35 #include "sparseset.h"
36 #include "cfgloop.h"
37 
38 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
39 				     ira_loop_tree_node_t);
40 
41 /* The root of the loop tree corresponding to the all function.  */
42 ira_loop_tree_node_t ira_loop_tree_root;
43 
44 /* Height of the loop tree.  */
45 int ira_loop_tree_height;
46 
47 /* All nodes representing basic blocks are referred through the
48    following array.  We cannot use basic block member `aux' for this
49    because it is used for insertion of insns on edges.  */
50 ira_loop_tree_node_t ira_bb_nodes;
51 
52 /* All nodes representing loops are referred through the following
53    array.  */
54 ira_loop_tree_node_t ira_loop_nodes;
55 
56 /* And size of the ira_loop_nodes array.  */
57 unsigned int ira_loop_nodes_count;
58 
59 /* Map regno -> allocnos with given regno (see comments for
60    allocno member `next_regno_allocno').  */
61 ira_allocno_t *ira_regno_allocno_map;
62 
63 /* Array of references to all allocnos.  The order number of the
64    allocno corresponds to the index in the array.  Removed allocnos
65    have NULL element value.  */
66 ira_allocno_t *ira_allocnos;
67 
68 /* Sizes of the previous array.  */
69 int ira_allocnos_num;
70 
71 /* Count of conflict record structures we've created, used when creating
72    a new conflict id.  */
73 int ira_objects_num;
74 
75 /* Map a conflict id to its conflict record.  */
76 ira_object_t *ira_object_id_map;
77 
78 /* Array of references to all allocno preferences.  The order number
79    of the preference corresponds to the index in the array.  */
80 ira_pref_t *ira_prefs;
81 
82 /* Size of the previous array.  */
83 int ira_prefs_num;
84 
85 /* Array of references to all copies.  The order number of the copy
86    corresponds to the index in the array.  Removed copies have NULL
87    element value.  */
88 ira_copy_t *ira_copies;
89 
90 /* Size of the previous array.  */
91 int ira_copies_num;
92 
93 
94 
95 /* LAST_BASIC_BLOCK before generating additional insns because of live
96    range splitting.  Emitting insns on a critical edge creates a new
97    basic block.  */
98 static int last_basic_block_before_change;
99 
100 /* Initialize some members in loop tree node NODE.  Use LOOP_NUM for
101    the member loop_num.  */
102 static void
103 init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
104 {
105   int max_regno = max_reg_num ();
106 
107   node->regno_allocno_map
108     = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
109   memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
110   memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
111   node->all_allocnos = ira_allocate_bitmap ();
112   node->modified_regnos = ira_allocate_bitmap ();
113   node->border_allocnos = ira_allocate_bitmap ();
114   node->local_copies = ira_allocate_bitmap ();
115   node->loop_num = loop_num;
116   node->children = NULL;
117   node->subloops = NULL;
118 }
119 
120 
121 /* The following function allocates the loop tree nodes.  If
122    CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
123    the root which corresponds the all function) will be not allocated
124    but nodes will still be allocated for basic blocks.  */
125 static void
126 create_loop_tree_nodes (void)
127 {
128   unsigned int i, j;
129   bool skip_p;
130   edge_iterator ei;
131   edge e;
132   vec<edge> edges;
133   loop_p loop;
134 
135   ira_bb_nodes
136     = ((struct ira_loop_tree_node *)
137        ira_allocate (sizeof (struct ira_loop_tree_node)
138 		     * last_basic_block_for_fn (cfun)));
139   last_basic_block_before_change = last_basic_block_for_fn (cfun);
140   for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
141     {
142       ira_bb_nodes[i].regno_allocno_map = NULL;
143       memset (ira_bb_nodes[i].reg_pressure, 0,
144 	      sizeof (ira_bb_nodes[i].reg_pressure));
145       ira_bb_nodes[i].all_allocnos = NULL;
146       ira_bb_nodes[i].modified_regnos = NULL;
147       ira_bb_nodes[i].border_allocnos = NULL;
148       ira_bb_nodes[i].local_copies = NULL;
149     }
150   if (current_loops == NULL)
151     {
152       ira_loop_nodes_count = 1;
153       ira_loop_nodes = ((struct ira_loop_tree_node *)
154 			ira_allocate (sizeof (struct ira_loop_tree_node)));
155       init_loop_tree_node (ira_loop_nodes, 0);
156       return;
157     }
158   ira_loop_nodes_count = number_of_loops (cfun);
159   ira_loop_nodes = ((struct ira_loop_tree_node *)
160 		    ira_allocate (sizeof (struct ira_loop_tree_node)
161 				  * ira_loop_nodes_count));
162   FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
163     {
164       if (loop_outer (loop) != NULL)
165 	{
166 	  ira_loop_nodes[i].regno_allocno_map = NULL;
167 	  skip_p = false;
168 	  FOR_EACH_EDGE (e, ei, loop->header->preds)
169 	    if (e->src != loop->latch
170 		&& (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
171 	      {
172 		skip_p = true;
173 		break;
174 	      }
175 	  if (skip_p)
176 	    continue;
177 	  edges = get_loop_exit_edges (loop);
178 	  FOR_EACH_VEC_ELT (edges, j, e)
179 	    if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
180 	      {
181 		skip_p = true;
182 		break;
183 	      }
184 	  edges.release ();
185 	  if (skip_p)
186 	    continue;
187 	}
188       init_loop_tree_node (&ira_loop_nodes[i], loop->num);
189     }
190 }
191 
192 /* The function returns TRUE if there are more one allocation
193    region.  */
194 static bool
195 more_one_region_p (void)
196 {
197   unsigned int i;
198   loop_p loop;
199 
200   if (current_loops != NULL)
201     FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
202       if (ira_loop_nodes[i].regno_allocno_map != NULL
203 	  && ira_loop_tree_root != &ira_loop_nodes[i])
204 	return true;
205   return false;
206 }
207 
208 /* Free the loop tree node of a loop.  */
209 static void
210 finish_loop_tree_node (ira_loop_tree_node_t loop)
211 {
212   if (loop->regno_allocno_map != NULL)
213     {
214       ira_assert (loop->bb == NULL);
215       ira_free_bitmap (loop->local_copies);
216       ira_free_bitmap (loop->border_allocnos);
217       ira_free_bitmap (loop->modified_regnos);
218       ira_free_bitmap (loop->all_allocnos);
219       ira_free (loop->regno_allocno_map);
220       loop->regno_allocno_map = NULL;
221     }
222 }
223 
224 /* Free the loop tree nodes.  */
225 static void
226 finish_loop_tree_nodes (void)
227 {
228   unsigned int i;
229 
230   for (i = 0; i < ira_loop_nodes_count; i++)
231     finish_loop_tree_node (&ira_loop_nodes[i]);
232   ira_free (ira_loop_nodes);
233   for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
234     {
235       if (ira_bb_nodes[i].local_copies != NULL)
236 	ira_free_bitmap (ira_bb_nodes[i].local_copies);
237       if (ira_bb_nodes[i].border_allocnos != NULL)
238 	ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
239       if (ira_bb_nodes[i].modified_regnos != NULL)
240 	ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
241       if (ira_bb_nodes[i].all_allocnos != NULL)
242 	ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
243       if (ira_bb_nodes[i].regno_allocno_map != NULL)
244 	ira_free (ira_bb_nodes[i].regno_allocno_map);
245     }
246   ira_free (ira_bb_nodes);
247 }
248 
249 
250 
251 /* The following recursive function adds LOOP to the loop tree
252    hierarchy.  LOOP is added only once.  If LOOP is NULL we adding
253    loop designating the whole function when CFG loops are not
254    built.  */
255 static void
256 add_loop_to_tree (struct loop *loop)
257 {
258   int loop_num;
259   struct loop *parent;
260   ira_loop_tree_node_t loop_node, parent_node;
261 
262   /* We cannot use loop node access macros here because of potential
263      checking and because the nodes are not initialized enough
264      yet.  */
265   if (loop != NULL && loop_outer (loop) != NULL)
266     add_loop_to_tree (loop_outer (loop));
267   loop_num = loop != NULL ? loop->num : 0;
268   if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
269       && ira_loop_nodes[loop_num].children == NULL)
270     {
271       /* We have not added loop node to the tree yet.  */
272       loop_node = &ira_loop_nodes[loop_num];
273       loop_node->loop = loop;
274       loop_node->bb = NULL;
275       if (loop == NULL)
276 	parent = NULL;
277       else
278 	{
279 	  for (parent = loop_outer (loop);
280 	       parent != NULL;
281 	       parent = loop_outer (parent))
282 	    if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
283 	      break;
284 	}
285       if (parent == NULL)
286 	{
287 	  loop_node->next = NULL;
288 	  loop_node->subloop_next = NULL;
289 	  loop_node->parent = NULL;
290 	}
291       else
292 	{
293 	  parent_node = &ira_loop_nodes[parent->num];
294 	  loop_node->next = parent_node->children;
295 	  parent_node->children = loop_node;
296 	  loop_node->subloop_next = parent_node->subloops;
297 	  parent_node->subloops = loop_node;
298 	  loop_node->parent = parent_node;
299 	}
300     }
301 }
302 
303 /* The following recursive function sets up levels of nodes of the
304    tree given its root LOOP_NODE.  The enumeration starts with LEVEL.
305    The function returns maximal value of level in the tree + 1.  */
306 static int
307 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
308 {
309   int height, max_height;
310   ira_loop_tree_node_t subloop_node;
311 
312   ira_assert (loop_node->bb == NULL);
313   loop_node->level = level;
314   max_height = level + 1;
315   for (subloop_node = loop_node->subloops;
316        subloop_node != NULL;
317        subloop_node = subloop_node->subloop_next)
318     {
319       ira_assert (subloop_node->bb == NULL);
320       height = setup_loop_tree_level (subloop_node, level + 1);
321       if (height > max_height)
322 	max_height = height;
323     }
324   return max_height;
325 }
326 
327 /* Create the loop tree.  The algorithm is designed to provide correct
328    order of loops (they are ordered by their last loop BB) and basic
329    blocks in the chain formed by member next.  */
330 static void
331 form_loop_tree (void)
332 {
333   basic_block bb;
334   struct loop *parent;
335   ira_loop_tree_node_t bb_node, loop_node;
336 
337   /* We cannot use loop/bb node access macros because of potential
338      checking and because the nodes are not initialized enough
339      yet.  */
340   FOR_EACH_BB_FN (bb, cfun)
341     {
342       bb_node = &ira_bb_nodes[bb->index];
343       bb_node->bb = bb;
344       bb_node->loop = NULL;
345       bb_node->subloops = NULL;
346       bb_node->children = NULL;
347       bb_node->subloop_next = NULL;
348       bb_node->next = NULL;
349       if (current_loops == NULL)
350 	parent = NULL;
351       else
352 	{
353 	  for (parent = bb->loop_father;
354 	       parent != NULL;
355 	       parent = loop_outer (parent))
356 	    if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
357 	      break;
358 	}
359       add_loop_to_tree (parent);
360       loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
361       bb_node->next = loop_node->children;
362       bb_node->parent = loop_node;
363       loop_node->children = bb_node;
364     }
365   ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
366   ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
367   ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
368 }
369 
370 
371 
372 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
373    tree nodes.  */
374 static void
375 rebuild_regno_allocno_maps (void)
376 {
377   unsigned int l;
378   int max_regno, regno;
379   ira_allocno_t a;
380   ira_loop_tree_node_t loop_tree_node;
381   loop_p loop;
382   ira_allocno_iterator ai;
383 
384   ira_assert (current_loops != NULL);
385   max_regno = max_reg_num ();
386   FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
387     if (ira_loop_nodes[l].regno_allocno_map != NULL)
388       {
389 	ira_free (ira_loop_nodes[l].regno_allocno_map);
390 	ira_loop_nodes[l].regno_allocno_map
391 	  = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
392 					    * max_regno);
393 	memset (ira_loop_nodes[l].regno_allocno_map, 0,
394 		sizeof (ira_allocno_t) * max_regno);
395       }
396   ira_free (ira_regno_allocno_map);
397   ira_regno_allocno_map
398     = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
399   memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
400   FOR_EACH_ALLOCNO (a, ai)
401     {
402       if (ALLOCNO_CAP_MEMBER (a) != NULL)
403 	/* Caps are not in the regno allocno maps.  */
404 	continue;
405       regno = ALLOCNO_REGNO (a);
406       loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
407       ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
408       ira_regno_allocno_map[regno] = a;
409       if (loop_tree_node->regno_allocno_map[regno] == NULL)
410 	/* Remember that we can create temporary allocnos to break
411 	   cycles in register shuffle.  */
412 	loop_tree_node->regno_allocno_map[regno] = a;
413     }
414 }
415 
416 
417 /* Pools for allocnos, allocno live ranges and objects.  */
418 static object_allocator<live_range> live_range_pool ("live ranges");
419 static object_allocator<ira_allocno> allocno_pool ("allocnos");
420 static object_allocator<ira_object> object_pool ("objects");
421 
422 /* Vec containing references to all created allocnos.  It is a
423    container of array allocnos.  */
424 static vec<ira_allocno_t> allocno_vec;
425 
426 /* Vec containing references to all created ira_objects.  It is a
427    container of ira_object_id_map.  */
428 static vec<ira_object_t> ira_object_id_map_vec;
429 
430 /* Initialize data concerning allocnos.  */
431 static void
432 initiate_allocnos (void)
433 {
434   allocno_vec.create (max_reg_num () * 2);
435   ira_allocnos = NULL;
436   ira_allocnos_num = 0;
437   ira_objects_num = 0;
438   ira_object_id_map_vec.create (max_reg_num () * 2);
439   ira_object_id_map = NULL;
440   ira_regno_allocno_map
441     = (ira_allocno_t *) ira_allocate (max_reg_num ()
442 				      * sizeof (ira_allocno_t));
443   memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
444 }
445 
446 /* Create and return an object corresponding to a new allocno A.  */
447 static ira_object_t
448 ira_create_object (ira_allocno_t a, int subword)
449 {
450   enum reg_class aclass = ALLOCNO_CLASS (a);
451   ira_object_t obj = object_pool.allocate ();
452 
453   OBJECT_ALLOCNO (obj) = a;
454   OBJECT_SUBWORD (obj) = subword;
455   OBJECT_CONFLICT_ID (obj) = ira_objects_num;
456   OBJECT_CONFLICT_VEC_P (obj) = false;
457   OBJECT_CONFLICT_ARRAY (obj) = NULL;
458   OBJECT_NUM_CONFLICTS (obj) = 0;
459   COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
460   COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
461   IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
462 			  reg_class_contents[aclass]);
463   IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
464 			  reg_class_contents[aclass]);
465   OBJECT_MIN (obj) = INT_MAX;
466   OBJECT_MAX (obj) = -1;
467   OBJECT_LIVE_RANGES (obj) = NULL;
468 
469   ira_object_id_map_vec.safe_push (obj);
470   ira_object_id_map
471     = ira_object_id_map_vec.address ();
472   ira_objects_num = ira_object_id_map_vec.length ();
473 
474   return obj;
475 }
476 
477 /* Create and return the allocno corresponding to REGNO in
478    LOOP_TREE_NODE.  Add the allocno to the list of allocnos with the
479    same regno if CAP_P is FALSE.  */
480 ira_allocno_t
481 ira_create_allocno (int regno, bool cap_p,
482 		    ira_loop_tree_node_t loop_tree_node)
483 {
484   ira_allocno_t a;
485 
486   a = allocno_pool.allocate ();
487   ALLOCNO_REGNO (a) = regno;
488   ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
489   if (! cap_p)
490     {
491       ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
492       ira_regno_allocno_map[regno] = a;
493       if (loop_tree_node->regno_allocno_map[regno] == NULL)
494 	/* Remember that we can create temporary allocnos to break
495 	   cycles in register shuffle on region borders (see
496 	   ira-emit.c).  */
497 	loop_tree_node->regno_allocno_map[regno] = a;
498     }
499   ALLOCNO_CAP (a) = NULL;
500   ALLOCNO_CAP_MEMBER (a) = NULL;
501   ALLOCNO_NUM (a) = ira_allocnos_num;
502   bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
503   ALLOCNO_NREFS (a) = 0;
504   ALLOCNO_FREQ (a) = 0;
505   ALLOCNO_HARD_REGNO (a) = -1;
506   ALLOCNO_CALL_FREQ (a) = 0;
507   ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
508   ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
509   CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
510 #ifdef STACK_REGS
511   ALLOCNO_NO_STACK_REG_P (a) = false;
512   ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
513 #endif
514   ALLOCNO_DONT_REASSIGN_P (a) = false;
515   ALLOCNO_BAD_SPILL_P (a) = false;
516   ALLOCNO_ASSIGNED_P (a) = false;
517   ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
518   ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
519   ALLOCNO_PREFS (a) = NULL;
520   ALLOCNO_COPIES (a) = NULL;
521   ALLOCNO_HARD_REG_COSTS (a) = NULL;
522   ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
523   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
524   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
525   ALLOCNO_CLASS (a) = NO_REGS;
526   ALLOCNO_UPDATED_CLASS_COST (a) = 0;
527   ALLOCNO_CLASS_COST (a) = 0;
528   ALLOCNO_MEMORY_COST (a) = 0;
529   ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
530   ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
531   ALLOCNO_NUM_OBJECTS (a) = 0;
532 
533   ALLOCNO_ADD_DATA (a) = NULL;
534   allocno_vec.safe_push (a);
535   ira_allocnos = allocno_vec.address ();
536   ira_allocnos_num = allocno_vec.length ();
537 
538   return a;
539 }
540 
541 /* Set up register class for A and update its conflict hard
542    registers.  */
543 void
544 ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
545 {
546   ira_allocno_object_iterator oi;
547   ira_object_t obj;
548 
549   ALLOCNO_CLASS (a) = aclass;
550   FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
551     {
552       IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
553 			      reg_class_contents[aclass]);
554       IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
555 			      reg_class_contents[aclass]);
556     }
557 }
558 
559 /* Determine the number of objects we should associate with allocno A
560    and allocate them.  */
561 void
562 ira_create_allocno_objects (ira_allocno_t a)
563 {
564   machine_mode mode = ALLOCNO_MODE (a);
565   enum reg_class aclass = ALLOCNO_CLASS (a);
566   int n = ira_reg_class_max_nregs[aclass][mode];
567   int i;
568 
569   if (n != 2 || maybe_ne (GET_MODE_SIZE (mode), n * UNITS_PER_WORD))
570     n = 1;
571 
572   ALLOCNO_NUM_OBJECTS (a) = n;
573   for (i = 0; i < n; i++)
574     ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
575 }
576 
577 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
578    ALLOCNO_OBJECT structures.  This must be called after the allocno
579    classes are known.  */
580 static void
581 create_allocno_objects (void)
582 {
583   ira_allocno_t a;
584   ira_allocno_iterator ai;
585 
586   FOR_EACH_ALLOCNO (a, ai)
587     ira_create_allocno_objects (a);
588 }
589 
590 /* Merge hard register conflict information for all objects associated with
591    allocno TO into the corresponding objects associated with FROM.
592    If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS.  */
593 static void
594 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
595 			  bool total_only)
596 {
597   int i;
598   gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
599   for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
600     {
601       ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
602       ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
603 
604       if (!total_only)
605 	IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
606 			  OBJECT_CONFLICT_HARD_REGS (from_obj));
607       IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
608 			OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
609     }
610 #ifdef STACK_REGS
611   if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
612     ALLOCNO_NO_STACK_REG_P (to) = true;
613   if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
614     ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
615 #endif
616 }
617 
618 /* Update hard register conflict information for all objects associated with
619    A to include the regs in SET.  */
620 void
621 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
622 {
623   ira_allocno_object_iterator i;
624   ira_object_t obj;
625 
626   FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
627     {
628       IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
629       IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
630     }
631 }
632 
633 /* Return TRUE if a conflict vector with NUM elements is more
634    profitable than a conflict bit vector for OBJ.  */
635 bool
636 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
637 {
638   int nw;
639   int max = OBJECT_MAX (obj);
640   int min = OBJECT_MIN (obj);
641 
642   if (max < min)
643     /* We prefer a bit vector in such case because it does not result
644        in allocation.  */
645     return false;
646 
647   nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
648   return (2 * sizeof (ira_object_t) * (num + 1)
649 	  < 3 * nw * sizeof (IRA_INT_TYPE));
650 }
651 
652 /* Allocates and initialize the conflict vector of OBJ for NUM
653    conflicting objects.  */
654 void
655 ira_allocate_conflict_vec (ira_object_t obj, int num)
656 {
657   int size;
658   ira_object_t *vec;
659 
660   ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
661   num++; /* for NULL end marker  */
662   size = sizeof (ira_object_t) * num;
663   OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
664   vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
665   vec[0] = NULL;
666   OBJECT_NUM_CONFLICTS (obj) = 0;
667   OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
668   OBJECT_CONFLICT_VEC_P (obj) = true;
669 }
670 
671 /* Allocate and initialize the conflict bit vector of OBJ.  */
672 static void
673 allocate_conflict_bit_vec (ira_object_t obj)
674 {
675   unsigned int size;
676 
677   ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
678   size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
679 	  / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
680   OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
681   memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
682   OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
683   OBJECT_CONFLICT_VEC_P (obj) = false;
684 }
685 
686 /* Allocate and initialize the conflict vector or conflict bit vector
687    of OBJ for NUM conflicting allocnos whatever is more profitable.  */
688 void
689 ira_allocate_object_conflicts (ira_object_t obj, int num)
690 {
691   if (ira_conflict_vector_profitable_p (obj, num))
692     ira_allocate_conflict_vec (obj, num);
693   else
694     allocate_conflict_bit_vec (obj);
695 }
696 
697 /* Add OBJ2 to the conflicts of OBJ1.  */
698 static void
699 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
700 {
701   int num;
702   unsigned int size;
703 
704   if (OBJECT_CONFLICT_VEC_P (obj1))
705     {
706       ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
707       int curr_num = OBJECT_NUM_CONFLICTS (obj1);
708       num = curr_num + 2;
709       if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
710 	{
711 	  ira_object_t *newvec;
712 	  size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
713 	  newvec = (ira_object_t *) ira_allocate (size);
714 	  memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
715 	  ira_free (vec);
716 	  vec = newvec;
717 	  OBJECT_CONFLICT_ARRAY (obj1) = vec;
718 	  OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
719 	}
720       vec[num - 2] = obj2;
721       vec[num - 1] = NULL;
722       OBJECT_NUM_CONFLICTS (obj1)++;
723     }
724   else
725     {
726       int nw, added_head_nw, id;
727       IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
728 
729       id = OBJECT_CONFLICT_ID (obj2);
730       if (OBJECT_MIN (obj1) > id)
731 	{
732 	  /* Expand head of the bit vector.  */
733 	  added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
734 	  nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
735 	  size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
736 	  if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
737 	    {
738 	      memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
739 		       vec, nw * sizeof (IRA_INT_TYPE));
740 	      memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
741 	    }
742 	  else
743 	    {
744 	      size
745 		= (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
746 	      vec = (IRA_INT_TYPE *) ira_allocate (size);
747 	      memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
748 		      OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
749 	      memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
750 	      memset ((char *) vec
751 		      + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
752 		      0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
753 	      ira_free (OBJECT_CONFLICT_ARRAY (obj1));
754 	      OBJECT_CONFLICT_ARRAY (obj1) = vec;
755 	      OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
756 	    }
757 	  OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
758 	}
759       else if (OBJECT_MAX (obj1) < id)
760 	{
761 	  nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
762 	  size = nw * sizeof (IRA_INT_TYPE);
763 	  if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
764 	    {
765 	      /* Expand tail of the bit vector.  */
766 	      size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
767 	      vec = (IRA_INT_TYPE *) ira_allocate (size);
768 	      memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
769 	      memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
770 		      0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
771 	      ira_free (OBJECT_CONFLICT_ARRAY (obj1));
772 	      OBJECT_CONFLICT_ARRAY (obj1) = vec;
773 	      OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
774 	    }
775 	  OBJECT_MAX (obj1) = id;
776 	}
777       SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
778     }
779 }
780 
781 /* Add OBJ1 to the conflicts of OBJ2 and vice versa.  */
782 static void
783 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
784 {
785   add_to_conflicts (obj1, obj2);
786   add_to_conflicts (obj2, obj1);
787 }
788 
789 /* Clear all conflicts of OBJ.  */
790 static void
791 clear_conflicts (ira_object_t obj)
792 {
793   if (OBJECT_CONFLICT_VEC_P (obj))
794     {
795       OBJECT_NUM_CONFLICTS (obj) = 0;
796       OBJECT_CONFLICT_VEC (obj)[0] = NULL;
797     }
798   else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
799     {
800       int nw;
801 
802       nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
803       memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
804     }
805 }
806 
807 /* The array used to find duplications in conflict vectors of
808    allocnos.  */
809 static int *conflict_check;
810 
811 /* The value used to mark allocation presence in conflict vector of
812    the current allocno.  */
813 static int curr_conflict_check_tick;
814 
815 /* Remove duplications in conflict vector of OBJ.  */
816 static void
817 compress_conflict_vec (ira_object_t obj)
818 {
819   ira_object_t *vec, conflict_obj;
820   int i, j;
821 
822   ira_assert (OBJECT_CONFLICT_VEC_P (obj));
823   vec = OBJECT_CONFLICT_VEC (obj);
824   curr_conflict_check_tick++;
825   for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
826     {
827       int id = OBJECT_CONFLICT_ID (conflict_obj);
828       if (conflict_check[id] != curr_conflict_check_tick)
829 	{
830 	  conflict_check[id] = curr_conflict_check_tick;
831 	  vec[j++] = conflict_obj;
832 	}
833     }
834   OBJECT_NUM_CONFLICTS (obj) = j;
835   vec[j] = NULL;
836 }
837 
838 /* Remove duplications in conflict vectors of all allocnos.  */
839 static void
840 compress_conflict_vecs (void)
841 {
842   ira_object_t obj;
843   ira_object_iterator oi;
844 
845   conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
846   memset (conflict_check, 0, sizeof (int) * ira_objects_num);
847   curr_conflict_check_tick = 0;
848   FOR_EACH_OBJECT (obj, oi)
849     {
850       if (OBJECT_CONFLICT_VEC_P (obj))
851 	compress_conflict_vec (obj);
852     }
853   ira_free (conflict_check);
854 }
855 
856 /* This recursive function outputs allocno A and if it is a cap the
857    function outputs its members.  */
858 void
859 ira_print_expanded_allocno (ira_allocno_t a)
860 {
861   basic_block bb;
862 
863   fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
864   if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
865     fprintf (ira_dump_file, ",b%d", bb->index);
866   else
867     fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
868   if (ALLOCNO_CAP_MEMBER (a) != NULL)
869     {
870       fprintf (ira_dump_file, ":");
871       ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
872     }
873   fprintf (ira_dump_file, ")");
874 }
875 
876 /* Create and return the cap representing allocno A in the
877    parent loop.  */
878 static ira_allocno_t
879 create_cap_allocno (ira_allocno_t a)
880 {
881   ira_allocno_t cap;
882   ira_loop_tree_node_t parent;
883   enum reg_class aclass;
884 
885   parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
886   cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
887   ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
888   ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
889   aclass = ALLOCNO_CLASS (a);
890   ira_set_allocno_class (cap, aclass);
891   ira_create_allocno_objects (cap);
892   ALLOCNO_CAP_MEMBER (cap) = a;
893   ALLOCNO_CAP (a) = cap;
894   ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
895   ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
896   ira_allocate_and_copy_costs
897     (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
898   ira_allocate_and_copy_costs
899     (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
900      ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
901   ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
902   ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
903   ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
904   ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
905 
906   merge_hard_reg_conflicts (a, cap, false);
907 
908   ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
909   ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
910   IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap),
911 		    ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
912   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
913     {
914       fprintf (ira_dump_file, "    Creating cap ");
915       ira_print_expanded_allocno (cap);
916       fprintf (ira_dump_file, "\n");
917     }
918   return cap;
919 }
920 
921 /* Create and return a live range for OBJECT with given attributes.  */
922 live_range_t
923 ira_create_live_range (ira_object_t obj, int start, int finish,
924 		       live_range_t next)
925 {
926   live_range_t p;
927 
928   p = live_range_pool.allocate ();
929   p->object = obj;
930   p->start = start;
931   p->finish = finish;
932   p->next = next;
933   return p;
934 }
935 
936 /* Create a new live range for OBJECT and queue it at the head of its
937    live range list.  */
938 void
939 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
940 {
941   live_range_t p;
942   p = ira_create_live_range (object, start, finish,
943 			     OBJECT_LIVE_RANGES (object));
944   OBJECT_LIVE_RANGES (object) = p;
945 }
946 
947 /* Copy allocno live range R and return the result.  */
948 static live_range_t
949 copy_live_range (live_range_t r)
950 {
951   live_range_t p;
952 
953   p = live_range_pool.allocate ();
954   *p = *r;
955   return p;
956 }
957 
958 /* Copy allocno live range list given by its head R and return the
959    result.  */
960 live_range_t
961 ira_copy_live_range_list (live_range_t r)
962 {
963   live_range_t p, first, last;
964 
965   if (r == NULL)
966     return NULL;
967   for (first = last = NULL; r != NULL; r = r->next)
968     {
969       p = copy_live_range (r);
970       if (first == NULL)
971 	first = p;
972       else
973 	last->next = p;
974       last = p;
975     }
976   return first;
977 }
978 
979 /* Merge ranges R1 and R2 and returns the result.  The function
980    maintains the order of ranges and tries to minimize number of the
981    result ranges.  */
982 live_range_t
983 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
984 {
985   live_range_t first, last;
986 
987   if (r1 == NULL)
988     return r2;
989   if (r2 == NULL)
990     return r1;
991   for (first = last = NULL; r1 != NULL && r2 != NULL;)
992     {
993       if (r1->start < r2->start)
994 	std::swap (r1, r2);
995       if (r1->start <= r2->finish + 1)
996 	{
997 	  /* Intersected ranges: merge r1 and r2 into r1.  */
998 	  r1->start = r2->start;
999 	  if (r1->finish < r2->finish)
1000 	    r1->finish = r2->finish;
1001 	  live_range_t temp = r2;
1002 	  r2 = r2->next;
1003 	  ira_finish_live_range (temp);
1004 	  if (r2 == NULL)
1005 	    {
1006 	      /* To try to merge with subsequent ranges in r1.  */
1007 	      r2 = r1->next;
1008 	      r1->next = NULL;
1009 	    }
1010 	}
1011       else
1012 	{
1013 	  /* Add r1 to the result.  */
1014 	  if (first == NULL)
1015 	    first = last = r1;
1016 	  else
1017 	    {
1018 	      last->next = r1;
1019 	      last = r1;
1020 	    }
1021 	  r1 = r1->next;
1022 	  if (r1 == NULL)
1023 	    {
1024 	      /* To try to merge with subsequent ranges in r2.  */
1025 	      r1 = r2->next;
1026 	      r2->next = NULL;
1027 	    }
1028 	}
1029     }
1030   if (r1 != NULL)
1031     {
1032       if (first == NULL)
1033 	first = r1;
1034       else
1035 	last->next = r1;
1036       ira_assert (r1->next == NULL);
1037     }
1038   else if (r2 != NULL)
1039     {
1040       if (first == NULL)
1041 	first = r2;
1042       else
1043 	last->next = r2;
1044       ira_assert (r2->next == NULL);
1045     }
1046   else
1047     {
1048       ira_assert (last->next == NULL);
1049     }
1050   return first;
1051 }
1052 
1053 /* Return TRUE if live ranges R1 and R2 intersect.  */
1054 bool
1055 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1056 {
1057   /* Remember the live ranges are always kept ordered.  */
1058   while (r1 != NULL && r2 != NULL)
1059     {
1060       if (r1->start > r2->finish)
1061 	r1 = r1->next;
1062       else if (r2->start > r1->finish)
1063 	r2 = r2->next;
1064       else
1065 	return true;
1066     }
1067   return false;
1068 }
1069 
1070 /* Free allocno live range R.  */
1071 void
1072 ira_finish_live_range (live_range_t r)
1073 {
1074   live_range_pool.remove (r);
1075 }
1076 
1077 /* Free list of allocno live ranges starting with R.  */
1078 void
1079 ira_finish_live_range_list (live_range_t r)
1080 {
1081   live_range_t next_r;
1082 
1083   for (; r != NULL; r = next_r)
1084     {
1085       next_r = r->next;
1086       ira_finish_live_range (r);
1087     }
1088 }
1089 
1090 /* Free updated register costs of allocno A.  */
1091 void
1092 ira_free_allocno_updated_costs (ira_allocno_t a)
1093 {
1094   enum reg_class aclass;
1095 
1096   aclass = ALLOCNO_CLASS (a);
1097   if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1098     ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1099   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1100   if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1101     ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1102 			  aclass);
1103   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1104 }
1105 
1106 /* Free and nullify all cost vectors allocated earlier for allocno
1107    A.  */
1108 static void
1109 ira_free_allocno_costs (ira_allocno_t a)
1110 {
1111   enum reg_class aclass = ALLOCNO_CLASS (a);
1112   ira_object_t obj;
1113   ira_allocno_object_iterator oi;
1114 
1115   FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1116     {
1117       ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1118       ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1119       if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1120 	ira_free (OBJECT_CONFLICT_ARRAY (obj));
1121       object_pool.remove (obj);
1122     }
1123 
1124   ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1125   if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1126     ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1127   if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1128     ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1129   if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1130     ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1131   if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1132     ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1133 			  aclass);
1134   ALLOCNO_HARD_REG_COSTS (a) = NULL;
1135   ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1136   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1137   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1138 }
1139 
1140 /* Free the memory allocated for allocno A.  */
1141 static void
1142 finish_allocno (ira_allocno_t a)
1143 {
1144   ira_free_allocno_costs (a);
1145   allocno_pool.remove (a);
1146 }
1147 
1148 /* Free the memory allocated for all allocnos.  */
1149 static void
1150 finish_allocnos (void)
1151 {
1152   ira_allocno_t a;
1153   ira_allocno_iterator ai;
1154 
1155   FOR_EACH_ALLOCNO (a, ai)
1156     finish_allocno (a);
1157   ira_free (ira_regno_allocno_map);
1158   ira_object_id_map_vec.release ();
1159   allocno_vec.release ();
1160   allocno_pool.release ();
1161   object_pool.release ();
1162   live_range_pool.release ();
1163 }
1164 
1165 
1166 
1167 /* Pools for allocno preferences.  */
1168 static object_allocator <ira_allocno_pref> pref_pool ("prefs");
1169 
1170 /* Vec containing references to all created preferences.  It is a
1171    container of array ira_prefs.  */
1172 static vec<ira_pref_t> pref_vec;
1173 
1174 /* The function initializes data concerning allocno prefs.  */
1175 static void
1176 initiate_prefs (void)
1177 {
1178   pref_vec.create (get_max_uid ());
1179   ira_prefs = NULL;
1180   ira_prefs_num = 0;
1181 }
1182 
1183 /* Return pref for A and HARD_REGNO if any.  */
1184 static ira_pref_t
1185 find_allocno_pref (ira_allocno_t a, int hard_regno)
1186 {
1187   ira_pref_t pref;
1188 
1189   for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1190     if (pref->allocno == a && pref->hard_regno == hard_regno)
1191       return pref;
1192   return NULL;
1193 }
1194 
1195 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ.  */
1196 ira_pref_t
1197 ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
1198 {
1199   ira_pref_t pref;
1200 
1201   pref = pref_pool.allocate ();
1202   pref->num = ira_prefs_num;
1203   pref->allocno = a;
1204   pref->hard_regno = hard_regno;
1205   pref->freq = freq;
1206   pref_vec.safe_push (pref);
1207   ira_prefs = pref_vec.address ();
1208   ira_prefs_num = pref_vec.length ();
1209   return pref;
1210 }
1211 
1212 /* Attach a pref PREF to the corresponding allocno.  */
1213 static void
1214 add_allocno_pref_to_list (ira_pref_t pref)
1215 {
1216   ira_allocno_t a = pref->allocno;
1217 
1218   pref->next_pref = ALLOCNO_PREFS (a);
1219   ALLOCNO_PREFS (a) = pref;
1220 }
1221 
1222 /* Create (or update frequency if the pref already exists) the pref of
1223    allocnos A preferring HARD_REGNO with frequency FREQ.  */
1224 void
1225 ira_add_allocno_pref (ira_allocno_t a, int hard_regno, int freq)
1226 {
1227   ira_pref_t pref;
1228 
1229   if (freq <= 0)
1230     return;
1231   if ((pref = find_allocno_pref (a, hard_regno)) != NULL)
1232     {
1233       pref->freq += freq;
1234       return;
1235     }
1236   pref = ira_create_pref (a, hard_regno, freq);
1237   ira_assert (a != NULL);
1238   add_allocno_pref_to_list (pref);
1239 }
1240 
1241 /* Print info about PREF into file F.  */
1242 static void
1243 print_pref (FILE *f, ira_pref_t pref)
1244 {
1245   fprintf (f, "  pref%d:a%d(r%d)<-hr%d@%d\n", pref->num,
1246 	   ALLOCNO_NUM (pref->allocno), ALLOCNO_REGNO (pref->allocno),
1247 	   pref->hard_regno, pref->freq);
1248 }
1249 
1250 /* Print info about PREF into stderr.  */
1251 void
1252 ira_debug_pref (ira_pref_t pref)
1253 {
1254   print_pref (stderr, pref);
1255 }
1256 
1257 /* Print info about all prefs into file F.  */
1258 static void
1259 print_prefs (FILE *f)
1260 {
1261   ira_pref_t pref;
1262   ira_pref_iterator pi;
1263 
1264   FOR_EACH_PREF (pref, pi)
1265     print_pref (f, pref);
1266 }
1267 
1268 /* Print info about all prefs into stderr.  */
1269 void
1270 ira_debug_prefs (void)
1271 {
1272   print_prefs (stderr);
1273 }
1274 
1275 /* Print info about prefs involving allocno A into file F.  */
1276 static void
1277 print_allocno_prefs (FILE *f, ira_allocno_t a)
1278 {
1279   ira_pref_t pref;
1280 
1281   fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1282   for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1283     fprintf (f, " pref%d:hr%d@%d", pref->num, pref->hard_regno, pref->freq);
1284   fprintf (f, "\n");
1285 }
1286 
1287 /* Print info about prefs involving allocno A into stderr.  */
1288 void
1289 ira_debug_allocno_prefs (ira_allocno_t a)
1290 {
1291   print_allocno_prefs (stderr, a);
1292 }
1293 
1294 /* The function frees memory allocated for PREF.  */
1295 static void
1296 finish_pref (ira_pref_t pref)
1297 {
1298   ira_prefs[pref->num] = NULL;
1299   pref_pool.remove (pref);
1300 }
1301 
1302 /* Remove PREF from the list of allocno prefs and free memory for
1303    it.  */
1304 void
1305 ira_remove_pref (ira_pref_t pref)
1306 {
1307   ira_pref_t cpref, prev;
1308 
1309   if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1310     fprintf (ira_dump_file, " Removing pref%d:hr%d@%d\n",
1311 	     pref->num, pref->hard_regno, pref->freq);
1312   for (prev = NULL, cpref = ALLOCNO_PREFS (pref->allocno);
1313        cpref != NULL;
1314        prev = cpref, cpref = cpref->next_pref)
1315     if (cpref == pref)
1316       break;
1317   ira_assert (cpref != NULL);
1318   if (prev == NULL)
1319     ALLOCNO_PREFS (pref->allocno) = pref->next_pref;
1320   else
1321     prev->next_pref = pref->next_pref;
1322   finish_pref (pref);
1323 }
1324 
1325 /* Remove all prefs of allocno A.  */
1326 void
1327 ira_remove_allocno_prefs (ira_allocno_t a)
1328 {
1329   ira_pref_t pref, next_pref;
1330 
1331   for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
1332     {
1333       next_pref = pref->next_pref;
1334       finish_pref (pref);
1335     }
1336   ALLOCNO_PREFS (a) = NULL;
1337 }
1338 
1339 /* Free memory allocated for all prefs.  */
1340 static void
1341 finish_prefs (void)
1342 {
1343   ira_pref_t pref;
1344   ira_pref_iterator pi;
1345 
1346   FOR_EACH_PREF (pref, pi)
1347     finish_pref (pref);
1348   pref_vec.release ();
1349   pref_pool.release ();
1350 }
1351 
1352 
1353 
1354 /* Pools for copies.  */
1355 static object_allocator<ira_allocno_copy> copy_pool ("copies");
1356 
1357 /* Vec containing references to all created copies.  It is a
1358    container of array ira_copies.  */
1359 static vec<ira_copy_t> copy_vec;
1360 
1361 /* The function initializes data concerning allocno copies.  */
1362 static void
1363 initiate_copies (void)
1364 {
1365   copy_vec.create (get_max_uid ());
1366   ira_copies = NULL;
1367   ira_copies_num = 0;
1368 }
1369 
1370 /* Return copy connecting A1 and A2 and originated from INSN of
1371    LOOP_TREE_NODE if any.  */
1372 static ira_copy_t
1373 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx_insn *insn,
1374 		   ira_loop_tree_node_t loop_tree_node)
1375 {
1376   ira_copy_t cp, next_cp;
1377   ira_allocno_t another_a;
1378 
1379   for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1380     {
1381       if (cp->first == a1)
1382 	{
1383 	  next_cp = cp->next_first_allocno_copy;
1384 	  another_a = cp->second;
1385 	}
1386       else if (cp->second == a1)
1387 	{
1388 	  next_cp = cp->next_second_allocno_copy;
1389 	  another_a = cp->first;
1390 	}
1391       else
1392 	gcc_unreachable ();
1393       if (another_a == a2 && cp->insn == insn
1394 	  && cp->loop_tree_node == loop_tree_node)
1395 	return cp;
1396     }
1397   return NULL;
1398 }
1399 
1400 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1401    SECOND, FREQ, CONSTRAINT_P, and INSN.  */
1402 ira_copy_t
1403 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1404 		 bool constraint_p, rtx_insn *insn,
1405 		 ira_loop_tree_node_t loop_tree_node)
1406 {
1407   ira_copy_t cp;
1408 
1409   cp = copy_pool.allocate ();
1410   cp->num = ira_copies_num;
1411   cp->first = first;
1412   cp->second = second;
1413   cp->freq = freq;
1414   cp->constraint_p = constraint_p;
1415   cp->insn = insn;
1416   cp->loop_tree_node = loop_tree_node;
1417   copy_vec.safe_push (cp);
1418   ira_copies = copy_vec.address ();
1419   ira_copies_num = copy_vec.length ();
1420   return cp;
1421 }
1422 
1423 /* Attach a copy CP to allocnos involved into the copy.  */
1424 static void
1425 add_allocno_copy_to_list (ira_copy_t cp)
1426 {
1427   ira_allocno_t first = cp->first, second = cp->second;
1428 
1429   cp->prev_first_allocno_copy = NULL;
1430   cp->prev_second_allocno_copy = NULL;
1431   cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1432   if (cp->next_first_allocno_copy != NULL)
1433     {
1434       if (cp->next_first_allocno_copy->first == first)
1435 	cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1436       else
1437 	cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1438     }
1439   cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1440   if (cp->next_second_allocno_copy != NULL)
1441     {
1442       if (cp->next_second_allocno_copy->second == second)
1443 	cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1444       else
1445 	cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1446     }
1447   ALLOCNO_COPIES (first) = cp;
1448   ALLOCNO_COPIES (second) = cp;
1449 }
1450 
1451 /* Make a copy CP a canonical copy where number of the
1452    first allocno is less than the second one.  */
1453 static void
1454 swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1455 {
1456   if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1457     return;
1458 
1459   std::swap (cp->first, cp->second);
1460   std::swap (cp->prev_first_allocno_copy, cp->prev_second_allocno_copy);
1461   std::swap (cp->next_first_allocno_copy, cp->next_second_allocno_copy);
1462 }
1463 
1464 /* Create (or update frequency if the copy already exists) and return
1465    the copy of allocnos FIRST and SECOND with frequency FREQ
1466    corresponding to move insn INSN (if any) and originated from
1467    LOOP_TREE_NODE.  */
1468 ira_copy_t
1469 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1470 		      bool constraint_p, rtx_insn *insn,
1471 		      ira_loop_tree_node_t loop_tree_node)
1472 {
1473   ira_copy_t cp;
1474 
1475   if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1476     {
1477       cp->freq += freq;
1478       return cp;
1479     }
1480   cp = ira_create_copy (first, second, freq, constraint_p, insn,
1481 			loop_tree_node);
1482   ira_assert (first != NULL && second != NULL);
1483   add_allocno_copy_to_list (cp);
1484   swap_allocno_copy_ends_if_necessary (cp);
1485   return cp;
1486 }
1487 
1488 /* Print info about copy CP into file F.  */
1489 static void
1490 print_copy (FILE *f, ira_copy_t cp)
1491 {
1492   fprintf (f, "  cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1493 	   ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1494 	   ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1495 	   cp->insn != NULL
1496 	   ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1497 }
1498 
1499 DEBUG_FUNCTION void
1500 debug (ira_allocno_copy &ref)
1501 {
1502   print_copy (stderr, &ref);
1503 }
1504 
1505 DEBUG_FUNCTION void
1506 debug (ira_allocno_copy *ptr)
1507 {
1508   if (ptr)
1509     debug (*ptr);
1510   else
1511     fprintf (stderr, "<nil>\n");
1512 }
1513 
1514 /* Print info about copy CP into stderr.  */
1515 void
1516 ira_debug_copy (ira_copy_t cp)
1517 {
1518   print_copy (stderr, cp);
1519 }
1520 
1521 /* Print info about all copies into file F.  */
1522 static void
1523 print_copies (FILE *f)
1524 {
1525   ira_copy_t cp;
1526   ira_copy_iterator ci;
1527 
1528   FOR_EACH_COPY (cp, ci)
1529     print_copy (f, cp);
1530 }
1531 
1532 /* Print info about all copies into stderr.  */
1533 void
1534 ira_debug_copies (void)
1535 {
1536   print_copies (stderr);
1537 }
1538 
1539 /* Print info about copies involving allocno A into file F.  */
1540 static void
1541 print_allocno_copies (FILE *f, ira_allocno_t a)
1542 {
1543   ira_allocno_t another_a;
1544   ira_copy_t cp, next_cp;
1545 
1546   fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1547   for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1548     {
1549       if (cp->first == a)
1550 	{
1551 	  next_cp = cp->next_first_allocno_copy;
1552 	  another_a = cp->second;
1553 	}
1554       else if (cp->second == a)
1555 	{
1556 	  next_cp = cp->next_second_allocno_copy;
1557 	  another_a = cp->first;
1558 	}
1559       else
1560 	gcc_unreachable ();
1561       fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1562 	       ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1563     }
1564   fprintf (f, "\n");
1565 }
1566 
1567 DEBUG_FUNCTION void
1568 debug (ira_allocno &ref)
1569 {
1570   print_allocno_copies (stderr, &ref);
1571 }
1572 
1573 DEBUG_FUNCTION void
1574 debug (ira_allocno *ptr)
1575 {
1576   if (ptr)
1577     debug (*ptr);
1578   else
1579     fprintf (stderr, "<nil>\n");
1580 }
1581 
1582 
1583 /* Print info about copies involving allocno A into stderr.  */
1584 void
1585 ira_debug_allocno_copies (ira_allocno_t a)
1586 {
1587   print_allocno_copies (stderr, a);
1588 }
1589 
1590 /* The function frees memory allocated for copy CP.  */
1591 static void
1592 finish_copy (ira_copy_t cp)
1593 {
1594   copy_pool.remove (cp);
1595 }
1596 
1597 
1598 /* Free memory allocated for all copies.  */
1599 static void
1600 finish_copies (void)
1601 {
1602   ira_copy_t cp;
1603   ira_copy_iterator ci;
1604 
1605   FOR_EACH_COPY (cp, ci)
1606     finish_copy (cp);
1607   copy_vec.release ();
1608   copy_pool.release ();
1609 }
1610 
1611 
1612 
1613 /* Pools for cost vectors.  It is defined only for allocno classes.  */
1614 static pool_allocator *cost_vector_pool[N_REG_CLASSES];
1615 
1616 /* The function initiates work with hard register cost vectors.  It
1617    creates allocation pool for each allocno class.  */
1618 static void
1619 initiate_cost_vectors (void)
1620 {
1621   int i;
1622   enum reg_class aclass;
1623 
1624   for (i = 0; i < ira_allocno_classes_num; i++)
1625     {
1626       aclass = ira_allocno_classes[i];
1627       cost_vector_pool[aclass] = new pool_allocator
1628 	("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass]));
1629     }
1630 }
1631 
1632 /* Allocate and return a cost vector VEC for ACLASS.  */
1633 int *
1634 ira_allocate_cost_vector (reg_class_t aclass)
1635 {
1636   return (int*) cost_vector_pool[(int) aclass]->allocate ();
1637 }
1638 
1639 /* Free a cost vector VEC for ACLASS.  */
1640 void
1641 ira_free_cost_vector (int *vec, reg_class_t aclass)
1642 {
1643   ira_assert (vec != NULL);
1644   cost_vector_pool[(int) aclass]->remove (vec);
1645 }
1646 
1647 /* Finish work with hard register cost vectors.  Release allocation
1648    pool for each allocno class.  */
1649 static void
1650 finish_cost_vectors (void)
1651 {
1652   int i;
1653   enum reg_class aclass;
1654 
1655   for (i = 0; i < ira_allocno_classes_num; i++)
1656     {
1657       aclass = ira_allocno_classes[i];
1658       delete cost_vector_pool[aclass];
1659     }
1660 }
1661 
1662 
1663 
1664 /* Compute a post-ordering of the reverse control flow of the loop body
1665    designated by the children nodes of LOOP_NODE, whose body nodes in
1666    pre-order are input as LOOP_PREORDER.  Return a VEC with a post-order
1667    of the reverse loop body.
1668 
1669    For the post-order of the reverse CFG, we visit the basic blocks in
1670    LOOP_PREORDER array in the reverse order of where they appear.
1671    This is important: We do not just want to compute a post-order of
1672    the reverse CFG, we want to make a best-guess for a visiting order that
1673    minimizes the number of chain elements per allocno live range.  If the
1674    blocks would be visited in a different order, we would still compute a
1675    correct post-ordering but it would be less likely that two nodes
1676    connected by an edge in the CFG are neighbors in the topsort.  */
1677 
1678 static vec<ira_loop_tree_node_t>
1679 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
1680 				  vec<ira_loop_tree_node_t> loop_preorder)
1681 {
1682   vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
1683   unsigned int n_loop_preorder;
1684 
1685   n_loop_preorder = loop_preorder.length ();
1686   if (n_loop_preorder != 0)
1687     {
1688       ira_loop_tree_node_t subloop_node;
1689       unsigned int i;
1690       auto_vec<ira_loop_tree_node_t> dfs_stack;
1691 
1692       /* This is a bit of strange abuse of the BB_VISITED flag:  We use
1693 	 the flag to mark blocks we still have to visit to add them to
1694 	 our post-order.  Define an alias to avoid confusion.  */
1695 #define BB_TO_VISIT BB_VISITED
1696 
1697       FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1698 	{
1699 	  gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
1700 	  subloop_node->bb->flags |= BB_TO_VISIT;
1701 	}
1702 
1703       topsort_nodes.create (n_loop_preorder);
1704       dfs_stack.create (n_loop_preorder);
1705 
1706       FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
1707 	{
1708 	  if (! (subloop_node->bb->flags & BB_TO_VISIT))
1709 	    continue;
1710 
1711 	  subloop_node->bb->flags &= ~BB_TO_VISIT;
1712 	  dfs_stack.quick_push (subloop_node);
1713 	  while (! dfs_stack.is_empty ())
1714 	    {
1715 	      edge e;
1716 	      edge_iterator ei;
1717 
1718 	      ira_loop_tree_node_t n = dfs_stack.last ();
1719 	      FOR_EACH_EDGE (e, ei, n->bb->preds)
1720 		{
1721 		  ira_loop_tree_node_t pred_node;
1722 		  basic_block pred_bb = e->src;
1723 
1724 		  if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1725 		    continue;
1726 
1727 		  pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
1728 		  if (pred_node != n
1729 		      && (pred_node->bb->flags & BB_TO_VISIT))
1730 		    {
1731 		      pred_node->bb->flags &= ~BB_TO_VISIT;
1732 		      dfs_stack.quick_push (pred_node);
1733 		    }
1734 		}
1735 	      if (n == dfs_stack.last ())
1736 		{
1737 		  dfs_stack.pop ();
1738 		  topsort_nodes.quick_push (n);
1739 		}
1740 	    }
1741 	}
1742 
1743 #undef BB_TO_VISIT
1744     }
1745 
1746   gcc_assert (topsort_nodes.length () == n_loop_preorder);
1747   return topsort_nodes;
1748 }
1749 
1750 /* The current loop tree node and its regno allocno map.  */
1751 ira_loop_tree_node_t ira_curr_loop_tree_node;
1752 ira_allocno_t *ira_curr_regno_allocno_map;
1753 
1754 /* This recursive function traverses loop tree with root LOOP_NODE
1755    calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1756    correspondingly in preorder and postorder.  The function sets up
1757    IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP.  If BB_P,
1758    basic block nodes of LOOP_NODE is also processed (before its
1759    subloop nodes).
1760 
1761    If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1762    the loop are passed in the *reverse* post-order of the *reverse*
1763    CFG.  This is only used by ira_create_allocno_live_ranges, which
1764    wants to visit basic blocks in this order to minimize the number
1765    of elements per live range chain.
1766    Note that the loop tree nodes are still visited in the normal,
1767    forward post-order of  the loop tree.  */
1768 
1769 void
1770 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1771 			void (*preorder_func) (ira_loop_tree_node_t),
1772 			void (*postorder_func) (ira_loop_tree_node_t))
1773 {
1774   ira_loop_tree_node_t subloop_node;
1775 
1776   ira_assert (loop_node->bb == NULL);
1777   ira_curr_loop_tree_node = loop_node;
1778   ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1779 
1780   if (preorder_func != NULL)
1781     (*preorder_func) (loop_node);
1782 
1783   if (bb_p)
1784     {
1785       auto_vec<ira_loop_tree_node_t> loop_preorder;
1786       unsigned int i;
1787 
1788       /* Add all nodes to the set of nodes to visit.  The IRA loop tree
1789 	 is set up such that nodes in the loop body appear in a pre-order
1790 	 of their place in the CFG.  */
1791       for (subloop_node = loop_node->children;
1792 	   subloop_node != NULL;
1793 	   subloop_node = subloop_node->next)
1794 	if (subloop_node->bb != NULL)
1795 	  loop_preorder.safe_push (subloop_node);
1796 
1797       if (preorder_func != NULL)
1798 	FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1799 	  (*preorder_func) (subloop_node);
1800 
1801       if (postorder_func != NULL)
1802 	{
1803 	  vec<ira_loop_tree_node_t> loop_rev_postorder =
1804 	    ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
1805 	  FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
1806 	    (*postorder_func) (subloop_node);
1807 	  loop_rev_postorder.release ();
1808 	}
1809     }
1810 
1811   for (subloop_node = loop_node->subloops;
1812        subloop_node != NULL;
1813        subloop_node = subloop_node->subloop_next)
1814     {
1815       ira_assert (subloop_node->bb == NULL);
1816       ira_traverse_loop_tree (bb_p, subloop_node,
1817 			      preorder_func, postorder_func);
1818     }
1819 
1820   ira_curr_loop_tree_node = loop_node;
1821   ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1822 
1823   if (postorder_func != NULL)
1824     (*postorder_func) (loop_node);
1825 }
1826 
1827 
1828 
1829 /* The basic block currently being processed.  */
1830 static basic_block curr_bb;
1831 
1832 /* This recursive function creates allocnos corresponding to
1833    pseudo-registers containing in X.  True OUTPUT_P means that X is
1834    an lvalue.  PARENT corresponds to the parent expression of X.  */
1835 static void
1836 create_insn_allocnos (rtx x, rtx outer, bool output_p)
1837 {
1838   int i, j;
1839   const char *fmt;
1840   enum rtx_code code = GET_CODE (x);
1841 
1842   if (code == REG)
1843     {
1844       int regno;
1845 
1846       if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1847 	{
1848 	  ira_allocno_t a;
1849 
1850 	  if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1851 	    {
1852 	      a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1853 	      if (outer != NULL && GET_CODE (outer) == SUBREG)
1854 		{
1855 		  machine_mode wmode = GET_MODE (outer);
1856 		  if (partial_subreg_p (ALLOCNO_WMODE (a), wmode))
1857 		    ALLOCNO_WMODE (a) = wmode;
1858 		}
1859 	    }
1860 
1861 	  ALLOCNO_NREFS (a)++;
1862 	  ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1863 	  if (output_p)
1864 	    bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1865 	}
1866       return;
1867     }
1868   else if (code == SET)
1869     {
1870       create_insn_allocnos (SET_DEST (x), NULL, true);
1871       create_insn_allocnos (SET_SRC (x), NULL, false);
1872       return;
1873     }
1874   else if (code == CLOBBER)
1875     {
1876       create_insn_allocnos (XEXP (x, 0), NULL, true);
1877       return;
1878     }
1879   else if (code == CLOBBER_HIGH)
1880     {
1881       gcc_assert (REG_P (XEXP (x, 0)) && HARD_REGISTER_P (XEXP (x, 0)));
1882       return;
1883     }
1884   else if (code == MEM)
1885     {
1886       create_insn_allocnos (XEXP (x, 0), NULL, false);
1887       return;
1888     }
1889   else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1890 	   code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1891     {
1892       create_insn_allocnos (XEXP (x, 0), NULL, true);
1893       create_insn_allocnos (XEXP (x, 0), NULL, false);
1894       return;
1895     }
1896 
1897   fmt = GET_RTX_FORMAT (code);
1898   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1899     {
1900       if (fmt[i] == 'e')
1901 	create_insn_allocnos (XEXP (x, i), x, output_p);
1902       else if (fmt[i] == 'E')
1903 	for (j = 0; j < XVECLEN (x, i); j++)
1904 	  create_insn_allocnos (XVECEXP (x, i, j), x, output_p);
1905     }
1906 }
1907 
1908 /* Create allocnos corresponding to pseudo-registers living in the
1909    basic block represented by the corresponding loop tree node
1910    BB_NODE.  */
1911 static void
1912 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1913 {
1914   basic_block bb;
1915   rtx_insn *insn;
1916   unsigned int i;
1917   bitmap_iterator bi;
1918 
1919   curr_bb = bb = bb_node->bb;
1920   ira_assert (bb != NULL);
1921   FOR_BB_INSNS_REVERSE (bb, insn)
1922     if (NONDEBUG_INSN_P (insn))
1923       create_insn_allocnos (PATTERN (insn), NULL, false);
1924   /* It might be a allocno living through from one subloop to
1925      another.  */
1926   EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
1927     if (ira_curr_regno_allocno_map[i] == NULL)
1928       ira_create_allocno (i, false, ira_curr_loop_tree_node);
1929 }
1930 
1931 /* Create allocnos corresponding to pseudo-registers living on edge E
1932    (a loop entry or exit).  Also mark the allocnos as living on the
1933    loop border. */
1934 static void
1935 create_loop_allocnos (edge e)
1936 {
1937   unsigned int i;
1938   bitmap live_in_regs, border_allocnos;
1939   bitmap_iterator bi;
1940   ira_loop_tree_node_t parent;
1941 
1942   live_in_regs = df_get_live_in (e->dest);
1943   border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1944   EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e->src),
1945 			     FIRST_PSEUDO_REGISTER, i, bi)
1946     if (bitmap_bit_p (live_in_regs, i))
1947       {
1948 	if (ira_curr_regno_allocno_map[i] == NULL)
1949 	  {
1950 	    /* The order of creations is important for right
1951 	       ira_regno_allocno_map.  */
1952 	    if ((parent = ira_curr_loop_tree_node->parent) != NULL
1953 		&& parent->regno_allocno_map[i] == NULL)
1954 	      ira_create_allocno (i, false, parent);
1955 	    ira_create_allocno (i, false, ira_curr_loop_tree_node);
1956 	  }
1957 	bitmap_set_bit (border_allocnos,
1958 			ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1959       }
1960 }
1961 
1962 /* Create allocnos corresponding to pseudo-registers living in loop
1963    represented by the corresponding loop tree node LOOP_NODE.  This
1964    function is called by ira_traverse_loop_tree.  */
1965 static void
1966 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1967 {
1968   if (loop_node->bb != NULL)
1969     create_bb_allocnos (loop_node);
1970   else if (loop_node != ira_loop_tree_root)
1971     {
1972       int i;
1973       edge_iterator ei;
1974       edge e;
1975       vec<edge> edges;
1976 
1977       ira_assert (current_loops != NULL);
1978       FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1979 	if (e->src != loop_node->loop->latch)
1980 	  create_loop_allocnos (e);
1981 
1982       edges = get_loop_exit_edges (loop_node->loop);
1983       FOR_EACH_VEC_ELT (edges, i, e)
1984 	create_loop_allocnos (e);
1985       edges.release ();
1986     }
1987 }
1988 
1989 /* Propagate information about allocnos modified inside the loop given
1990    by its LOOP_TREE_NODE to its parent.  */
1991 static void
1992 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1993 {
1994   if (loop_tree_node == ira_loop_tree_root)
1995     return;
1996   ira_assert (loop_tree_node->bb == NULL);
1997   bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1998 		   loop_tree_node->modified_regnos);
1999 }
2000 
2001 /* Propagate new info about allocno A (see comments about accumulated
2002    info in allocno definition) to the corresponding allocno on upper
2003    loop tree level.  So allocnos on upper levels accumulate
2004    information about the corresponding allocnos in nested regions.
2005    The new info means allocno info finally calculated in this
2006    file.  */
2007 static void
2008 propagate_allocno_info (void)
2009 {
2010   int i;
2011   ira_allocno_t a, parent_a;
2012   ira_loop_tree_node_t parent;
2013   enum reg_class aclass;
2014 
2015   if (flag_ira_region != IRA_REGION_ALL
2016       && flag_ira_region != IRA_REGION_MIXED)
2017     return;
2018   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2019     for (a = ira_regno_allocno_map[i];
2020 	 a != NULL;
2021 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2022       if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
2023 	  && (parent_a = parent->regno_allocno_map[i]) != NULL
2024 	  /* There are no caps yet at this point.  So use
2025 	     border_allocnos to find allocnos for the propagation.  */
2026 	  && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
2027 			   ALLOCNO_NUM (a)))
2028 	{
2029 	  if (! ALLOCNO_BAD_SPILL_P (a))
2030 	    ALLOCNO_BAD_SPILL_P (parent_a) = false;
2031 	  ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
2032 	  ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
2033 	  ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2034 	  merge_hard_reg_conflicts (a, parent_a, true);
2035 	  ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2036 	    += ALLOCNO_CALLS_CROSSED_NUM (a);
2037 	  ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2038 	    += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
2039  	  IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
2040  			    ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2041 	  ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2042 	    += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2043 	  aclass = ALLOCNO_CLASS (a);
2044 	  ira_assert (aclass == ALLOCNO_CLASS (parent_a));
2045 	  ira_allocate_and_accumulate_costs
2046 	    (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
2047 	     ALLOCNO_HARD_REG_COSTS (a));
2048 	  ira_allocate_and_accumulate_costs
2049 	    (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
2050 	     aclass,
2051 	     ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
2052 	  ALLOCNO_CLASS_COST (parent_a)
2053 	    += ALLOCNO_CLASS_COST (a);
2054 	  ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
2055 	}
2056 }
2057 
2058 /* Create allocnos corresponding to pseudo-registers in the current
2059    function.  Traverse the loop tree for this.  */
2060 static void
2061 create_allocnos (void)
2062 {
2063   /* We need to process BB first to correctly link allocnos by member
2064      next_regno_allocno.  */
2065   ira_traverse_loop_tree (true, ira_loop_tree_root,
2066 			  create_loop_tree_node_allocnos, NULL);
2067   if (optimize)
2068     ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
2069 			    propagate_modified_regnos);
2070 }
2071 
2072 
2073 
2074 /* The page contains function to remove some regions from a separate
2075    register allocation.  We remove regions whose separate allocation
2076    will hardly improve the result.  As a result we speed up regional
2077    register allocation.  */
2078 
2079 /* The function changes the object in range list given by R to OBJ.  */
2080 static void
2081 change_object_in_range_list (live_range_t r, ira_object_t obj)
2082 {
2083   for (; r != NULL; r = r->next)
2084     r->object = obj;
2085 }
2086 
2087 /* Move all live ranges associated with allocno FROM to allocno TO.  */
2088 static void
2089 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2090 {
2091   int i;
2092   int n = ALLOCNO_NUM_OBJECTS (from);
2093 
2094   gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2095 
2096   for (i = 0; i < n; i++)
2097     {
2098       ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2099       ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2100       live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2101 
2102       if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2103 	{
2104 	  fprintf (ira_dump_file,
2105 		   "      Moving ranges of a%dr%d to a%dr%d: ",
2106 		   ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2107 		   ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2108 	  ira_print_live_range_list (ira_dump_file, lr);
2109 	}
2110       change_object_in_range_list (lr, to_obj);
2111       OBJECT_LIVE_RANGES (to_obj)
2112 	= ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2113       OBJECT_LIVE_RANGES (from_obj) = NULL;
2114     }
2115 }
2116 
2117 static void
2118 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2119 {
2120   int i;
2121   int n = ALLOCNO_NUM_OBJECTS (from);
2122 
2123   gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2124 
2125   for (i = 0; i < n; i++)
2126     {
2127       ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2128       ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2129       live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2130 
2131       if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2132 	{
2133 	  fprintf (ira_dump_file, "      Copying ranges of a%dr%d to a%dr%d: ",
2134 		   ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2135 		   ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2136 	  ira_print_live_range_list (ira_dump_file, lr);
2137 	}
2138       lr = ira_copy_live_range_list (lr);
2139       change_object_in_range_list (lr, to_obj);
2140       OBJECT_LIVE_RANGES (to_obj)
2141 	= ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2142     }
2143 }
2144 
2145 /* Return TRUE if NODE represents a loop with low register
2146    pressure.  */
2147 static bool
2148 low_pressure_loop_node_p (ira_loop_tree_node_t node)
2149 {
2150   int i;
2151   enum reg_class pclass;
2152 
2153   if (node->bb != NULL)
2154     return false;
2155 
2156   for (i = 0; i < ira_pressure_classes_num; i++)
2157     {
2158       pclass = ira_pressure_classes[i];
2159       if (node->reg_pressure[pclass] > ira_class_hard_regs_num[pclass]
2160 	  && ira_class_hard_regs_num[pclass] > 1)
2161 	return false;
2162     }
2163   return true;
2164 }
2165 
2166 #ifdef STACK_REGS
2167 /* Return TRUE if LOOP has a complex enter or exit edge.  We don't
2168    form a region from such loop if the target use stack register
2169    because reg-stack.c cannot deal with such edges.  */
2170 static bool
2171 loop_with_complex_edge_p (struct loop *loop)
2172 {
2173   int i;
2174   edge_iterator ei;
2175   edge e;
2176   vec<edge> edges;
2177   bool res;
2178 
2179   FOR_EACH_EDGE (e, ei, loop->header->preds)
2180     if (e->flags & EDGE_EH)
2181       return true;
2182   edges = get_loop_exit_edges (loop);
2183   res = false;
2184   FOR_EACH_VEC_ELT (edges, i, e)
2185     if (e->flags & EDGE_COMPLEX)
2186       {
2187 	res = true;
2188 	break;
2189       }
2190   edges.release ();
2191   return res;
2192 }
2193 #endif
2194 
2195 /* Sort loops for marking them for removal.  We put already marked
2196    loops first, then less frequent loops next, and then outer loops
2197    next.  */
2198 static int
2199 loop_compare_func (const void *v1p, const void *v2p)
2200 {
2201   int diff;
2202   ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
2203   ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
2204 
2205   ira_assert (l1->parent != NULL && l2->parent != NULL);
2206   if (l1->to_remove_p && ! l2->to_remove_p)
2207     return -1;
2208   if (! l1->to_remove_p && l2->to_remove_p)
2209     return 1;
2210   if ((diff = l1->loop->header->count.to_frequency (cfun)
2211 	      - l2->loop->header->count.to_frequency (cfun)) != 0)
2212     return diff;
2213   if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
2214     return diff;
2215   /* Make sorting stable.  */
2216   return l1->loop_num - l2->loop_num;
2217 }
2218 
2219 /* Mark loops which should be removed from regional allocation.  We
2220    remove a loop with low register pressure inside another loop with
2221    register pressure.  In this case a separate allocation of the loop
2222    hardly helps (for irregular register file architecture it could
2223    help by choosing a better hard register in the loop but we prefer
2224    faster allocation even in this case).  We also remove cheap loops
2225    if there are more than IRA_MAX_LOOPS_NUM of them.  Loop with EH
2226    exit or enter edges are removed too because the allocation might
2227    require put pseudo moves on the EH edges (we could still do this
2228    for pseudos with caller saved hard registers in some cases but it
2229    is impossible to say here or during top-down allocation pass what
2230    hard register the pseudos get finally).  */
2231 static void
2232 mark_loops_for_removal (void)
2233 {
2234   int i, n;
2235   ira_loop_tree_node_t *sorted_loops;
2236   loop_p loop;
2237 
2238   ira_assert (current_loops != NULL);
2239   sorted_loops
2240     = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
2241 					     * number_of_loops (cfun));
2242   for (n = i = 0; vec_safe_iterate (get_loops (cfun), i, &loop); i++)
2243     if (ira_loop_nodes[i].regno_allocno_map != NULL)
2244       {
2245 	if (ira_loop_nodes[i].parent == NULL)
2246 	  {
2247 	    /* Don't remove the root.  */
2248 	    ira_loop_nodes[i].to_remove_p = false;
2249 	    continue;
2250 	  }
2251 	sorted_loops[n++] = &ira_loop_nodes[i];
2252 	ira_loop_nodes[i].to_remove_p
2253 	  = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
2254 	      && low_pressure_loop_node_p (&ira_loop_nodes[i]))
2255 #ifdef STACK_REGS
2256 	     || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
2257 #endif
2258 	     );
2259       }
2260   qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
2261   for (i = 0; i < n - IRA_MAX_LOOPS_NUM; i++)
2262     {
2263       sorted_loops[i]->to_remove_p = true;
2264       if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2265 	fprintf
2266 	  (ira_dump_file,
2267 	   "  Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2268 	   sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
2269 	   sorted_loops[i]->loop->header->count.to_frequency (cfun),
2270 	   loop_depth (sorted_loops[i]->loop),
2271 	   low_pressure_loop_node_p (sorted_loops[i]->parent)
2272 	   && low_pressure_loop_node_p (sorted_loops[i])
2273 	   ? "low pressure" : "cheap loop");
2274     }
2275   ira_free (sorted_loops);
2276 }
2277 
2278 /* Mark all loops but root for removing.  */
2279 static void
2280 mark_all_loops_for_removal (void)
2281 {
2282   int i;
2283   loop_p loop;
2284 
2285   ira_assert (current_loops != NULL);
2286   FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
2287     if (ira_loop_nodes[i].regno_allocno_map != NULL)
2288       {
2289 	if (ira_loop_nodes[i].parent == NULL)
2290 	  {
2291 	    /* Don't remove the root.  */
2292 	    ira_loop_nodes[i].to_remove_p = false;
2293 	    continue;
2294 	  }
2295 	ira_loop_nodes[i].to_remove_p = true;
2296 	if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2297 	  fprintf
2298 	    (ira_dump_file,
2299 	     "  Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2300 	     ira_loop_nodes[i].loop_num,
2301 	     ira_loop_nodes[i].loop->header->index,
2302 	     ira_loop_nodes[i].loop->header->count.to_frequency (cfun),
2303 	     loop_depth (ira_loop_nodes[i].loop));
2304       }
2305 }
2306 
2307 /* Definition of vector of loop tree nodes.  */
2308 
2309 /* Vec containing references to all removed loop tree nodes.  */
2310 static vec<ira_loop_tree_node_t> removed_loop_vec;
2311 
2312 /* Vec containing references to all children of loop tree nodes.  */
2313 static vec<ira_loop_tree_node_t> children_vec;
2314 
2315 /* Remove subregions of NODE if their separate allocation will not
2316    improve the result.  */
2317 static void
2318 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
2319 {
2320   unsigned int start;
2321   bool remove_p;
2322   ira_loop_tree_node_t subnode;
2323 
2324   remove_p = node->to_remove_p;
2325   if (! remove_p)
2326     children_vec.safe_push (node);
2327   start = children_vec.length ();
2328   for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2329     if (subnode->bb == NULL)
2330       remove_uneccesary_loop_nodes_from_loop_tree (subnode);
2331     else
2332       children_vec.safe_push (subnode);
2333   node->children = node->subloops = NULL;
2334   if (remove_p)
2335     {
2336       removed_loop_vec.safe_push (node);
2337       return;
2338     }
2339   while (children_vec.length () > start)
2340     {
2341       subnode = children_vec.pop ();
2342       subnode->parent = node;
2343       subnode->next = node->children;
2344       node->children = subnode;
2345       if (subnode->bb == NULL)
2346 	{
2347 	  subnode->subloop_next = node->subloops;
2348 	  node->subloops = subnode;
2349 	}
2350     }
2351 }
2352 
2353 /* Return TRUE if NODE is inside PARENT.  */
2354 static bool
2355 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2356 {
2357   for (node = node->parent; node != NULL; node = node->parent)
2358     if (node == parent)
2359       return true;
2360   return false;
2361 }
2362 
2363 /* Sort allocnos according to their order in regno allocno list.  */
2364 static int
2365 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2366 {
2367   ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2368   ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2369   ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2370   ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2371 
2372   if (loop_is_inside_p (n1, n2))
2373     return -1;
2374   else if (loop_is_inside_p (n2, n1))
2375     return 1;
2376   /* If allocnos are equally good, sort by allocno numbers, so that
2377      the results of qsort leave nothing to chance.  We put allocnos
2378      with higher number first in the list because it is the original
2379      order for allocnos from loops on the same levels.  */
2380   return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2381 }
2382 
2383 /* This array is used to sort allocnos to restore allocno order in
2384    the regno allocno list.  */
2385 static ira_allocno_t *regno_allocnos;
2386 
2387 /* Restore allocno order for REGNO in the regno allocno list.  */
2388 static void
2389 ira_rebuild_regno_allocno_list (int regno)
2390 {
2391   int i, n;
2392   ira_allocno_t a;
2393 
2394   for (n = 0, a = ira_regno_allocno_map[regno];
2395        a != NULL;
2396        a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2397     regno_allocnos[n++] = a;
2398   ira_assert (n > 0);
2399   qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2400 	 regno_allocno_order_compare_func);
2401   for (i = 1; i < n; i++)
2402     ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2403   ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2404   ira_regno_allocno_map[regno] = regno_allocnos[0];
2405   if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2406     fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2407 }
2408 
2409 /* Propagate info from allocno FROM_A to allocno A.  */
2410 static void
2411 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2412 {
2413   enum reg_class aclass;
2414 
2415   merge_hard_reg_conflicts (from_a, a, false);
2416   ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2417   ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2418   ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2419   ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2420   ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
2421     += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
2422   IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
2423  		    ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a));
2424 
2425   ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2426     += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2427   if (! ALLOCNO_BAD_SPILL_P (from_a))
2428     ALLOCNO_BAD_SPILL_P (a) = false;
2429   aclass = ALLOCNO_CLASS (from_a);
2430   ira_assert (aclass == ALLOCNO_CLASS (a));
2431   ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2432 				     ALLOCNO_HARD_REG_COSTS (from_a));
2433   ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2434 				     aclass,
2435 				     ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2436   ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2437   ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2438 }
2439 
2440 /* Remove allocnos from loops removed from the allocation
2441    consideration.  */
2442 static void
2443 remove_unnecessary_allocnos (void)
2444 {
2445   int regno;
2446   bool merged_p, rebuild_p;
2447   ira_allocno_t a, prev_a, next_a, parent_a;
2448   ira_loop_tree_node_t a_node, parent;
2449 
2450   merged_p = false;
2451   regno_allocnos = NULL;
2452   for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2453     {
2454       rebuild_p = false;
2455       for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2456 	   a != NULL;
2457 	   a = next_a)
2458 	{
2459 	  next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2460 	  a_node = ALLOCNO_LOOP_TREE_NODE (a);
2461 	  if (! a_node->to_remove_p)
2462 	    prev_a = a;
2463 	  else
2464 	    {
2465 	      for (parent = a_node->parent;
2466 		   (parent_a = parent->regno_allocno_map[regno]) == NULL
2467 		     && parent->to_remove_p;
2468 		   parent = parent->parent)
2469 		;
2470 	      if (parent_a == NULL)
2471 		{
2472 		  /* There are no allocnos with the same regno in
2473 		     upper region -- just move the allocno to the
2474 		     upper region.  */
2475 		  prev_a = a;
2476 		  ALLOCNO_LOOP_TREE_NODE (a) = parent;
2477 		  parent->regno_allocno_map[regno] = a;
2478 		  bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2479 		  rebuild_p = true;
2480 		}
2481 	      else
2482 		{
2483 		  /* Remove the allocno and update info of allocno in
2484 		     the upper region.  */
2485 		  if (prev_a == NULL)
2486 		    ira_regno_allocno_map[regno] = next_a;
2487 		  else
2488 		    ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2489 		  move_allocno_live_ranges (a, parent_a);
2490 		  merged_p = true;
2491 		  propagate_some_info_from_allocno (parent_a, a);
2492 		  /* Remove it from the corresponding regno allocno
2493 		     map to avoid info propagation of subsequent
2494 		     allocno into this already removed allocno.  */
2495 		  a_node->regno_allocno_map[regno] = NULL;
2496 		  ira_remove_allocno_prefs (a);
2497 		  finish_allocno (a);
2498 		}
2499 	    }
2500 	}
2501       if (rebuild_p)
2502 	/* We need to restore the order in regno allocno list.  */
2503 	{
2504 	  if (regno_allocnos == NULL)
2505 	    regno_allocnos
2506 	      = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2507 						* ira_allocnos_num);
2508 	  ira_rebuild_regno_allocno_list (regno);
2509 	}
2510     }
2511   if (merged_p)
2512     ira_rebuild_start_finish_chains ();
2513   if (regno_allocnos != NULL)
2514     ira_free (regno_allocnos);
2515 }
2516 
2517 /* Remove allocnos from all loops but the root.  */
2518 static void
2519 remove_low_level_allocnos (void)
2520 {
2521   int regno;
2522   bool merged_p, propagate_p;
2523   ira_allocno_t a, top_a;
2524   ira_loop_tree_node_t a_node, parent;
2525   ira_allocno_iterator ai;
2526 
2527   merged_p = false;
2528   FOR_EACH_ALLOCNO (a, ai)
2529     {
2530       a_node = ALLOCNO_LOOP_TREE_NODE (a);
2531       if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2532 	continue;
2533       regno = ALLOCNO_REGNO (a);
2534       if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2535 	{
2536 	  ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2537 	  ira_loop_tree_root->regno_allocno_map[regno] = a;
2538 	  continue;
2539 	}
2540       propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2541       /* Remove the allocno and update info of allocno in the upper
2542 	 region.  */
2543       move_allocno_live_ranges (a, top_a);
2544       merged_p = true;
2545       if (propagate_p)
2546 	propagate_some_info_from_allocno (top_a, a);
2547     }
2548   FOR_EACH_ALLOCNO (a, ai)
2549     {
2550       a_node = ALLOCNO_LOOP_TREE_NODE (a);
2551       if (a_node == ira_loop_tree_root)
2552 	continue;
2553       parent = a_node->parent;
2554       regno = ALLOCNO_REGNO (a);
2555       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2556 	ira_assert (ALLOCNO_CAP (a) != NULL);
2557       else if (ALLOCNO_CAP (a) == NULL)
2558  	ira_assert (parent->regno_allocno_map[regno] != NULL);
2559     }
2560   FOR_EACH_ALLOCNO (a, ai)
2561     {
2562       regno = ALLOCNO_REGNO (a);
2563       if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2564 	{
2565 	  ira_object_t obj;
2566 	  ira_allocno_object_iterator oi;
2567 
2568 	  ira_regno_allocno_map[regno] = a;
2569 	  ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2570 	  ALLOCNO_CAP_MEMBER (a) = NULL;
2571 	  FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2572 	    COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2573 			       OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2574 #ifdef STACK_REGS
2575 	  if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2576 	    ALLOCNO_NO_STACK_REG_P (a) = true;
2577 #endif
2578 	}
2579       else
2580 	{
2581 	  ira_remove_allocno_prefs (a);
2582 	  finish_allocno (a);
2583 	}
2584     }
2585   if (merged_p)
2586     ira_rebuild_start_finish_chains ();
2587 }
2588 
2589 /* Remove loops from consideration.  We remove all loops except for
2590    root if ALL_P or loops for which a separate allocation will not
2591    improve the result.  We have to do this after allocno creation and
2592    their costs and allocno class evaluation because only after that
2593    the register pressure can be known and is calculated.  */
2594 static void
2595 remove_unnecessary_regions (bool all_p)
2596 {
2597   if (current_loops == NULL)
2598     return;
2599   if (all_p)
2600     mark_all_loops_for_removal ();
2601   else
2602     mark_loops_for_removal ();
2603   children_vec.create (last_basic_block_for_fn (cfun)
2604 		       + number_of_loops (cfun));
2605   removed_loop_vec.create (last_basic_block_for_fn (cfun)
2606 			   + number_of_loops (cfun));
2607   remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
2608   children_vec.release ();
2609   if (all_p)
2610     remove_low_level_allocnos ();
2611   else
2612     remove_unnecessary_allocnos ();
2613   while (removed_loop_vec.length () > 0)
2614     finish_loop_tree_node (removed_loop_vec.pop ());
2615   removed_loop_vec.release ();
2616 }
2617 
2618 
2619 
2620 /* At this point true value of allocno attribute bad_spill_p means
2621    that there is an insn where allocno occurs and where the allocno
2622    cannot be used as memory.  The function updates the attribute, now
2623    it can be true only for allocnos which cannot be used as memory in
2624    an insn and in whose live ranges there is other allocno deaths.
2625    Spilling allocnos with true value will not improve the code because
2626    it will not make other allocnos colorable and additional reloads
2627    for the corresponding pseudo will be generated in reload pass for
2628    each insn it occurs.
2629 
2630    This is a trick mentioned in one classic article of Chaitin etc
2631    which is frequently omitted in other implementations of RA based on
2632    graph coloring.  */
2633 static void
2634 update_bad_spill_attribute (void)
2635 {
2636   int i;
2637   ira_allocno_t a;
2638   ira_allocno_iterator ai;
2639   ira_allocno_object_iterator aoi;
2640   ira_object_t obj;
2641   live_range_t r;
2642   enum reg_class aclass;
2643   bitmap_head dead_points[N_REG_CLASSES];
2644 
2645   for (i = 0; i < ira_allocno_classes_num; i++)
2646     {
2647       aclass = ira_allocno_classes[i];
2648       bitmap_initialize (&dead_points[aclass], &reg_obstack);
2649     }
2650   FOR_EACH_ALLOCNO (a, ai)
2651     {
2652       aclass = ALLOCNO_CLASS (a);
2653       if (aclass == NO_REGS)
2654 	continue;
2655       FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2656 	for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2657 	  bitmap_set_bit (&dead_points[aclass], r->finish);
2658     }
2659   FOR_EACH_ALLOCNO (a, ai)
2660     {
2661       aclass = ALLOCNO_CLASS (a);
2662       if (aclass == NO_REGS)
2663 	continue;
2664       if (! ALLOCNO_BAD_SPILL_P (a))
2665 	continue;
2666       FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2667 	{
2668 	  for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2669 	    {
2670 	      for (i = r->start + 1; i < r->finish; i++)
2671 		if (bitmap_bit_p (&dead_points[aclass], i))
2672 		  break;
2673 	      if (i < r->finish)
2674 		break;
2675 	    }
2676 	  if (r != NULL)
2677 	    {
2678 	      ALLOCNO_BAD_SPILL_P (a) = false;
2679 	      break;
2680 	    }
2681 	}
2682     }
2683   for (i = 0; i < ira_allocno_classes_num; i++)
2684     {
2685       aclass = ira_allocno_classes[i];
2686       bitmap_clear (&dead_points[aclass]);
2687     }
2688 }
2689 
2690 
2691 
2692 /* Set up minimal and maximal live range points for allocnos.  */
2693 static void
2694 setup_min_max_allocno_live_range_point (void)
2695 {
2696   int i;
2697   ira_allocno_t a, parent_a, cap;
2698   ira_allocno_iterator ai;
2699 #ifdef ENABLE_IRA_CHECKING
2700   ira_object_iterator oi;
2701   ira_object_t obj;
2702 #endif
2703   live_range_t r;
2704   ira_loop_tree_node_t parent;
2705 
2706   FOR_EACH_ALLOCNO (a, ai)
2707     {
2708       int n = ALLOCNO_NUM_OBJECTS (a);
2709 
2710       for (i = 0; i < n; i++)
2711 	{
2712 	  ira_object_t obj = ALLOCNO_OBJECT (a, i);
2713 	  r = OBJECT_LIVE_RANGES (obj);
2714 	  if (r == NULL)
2715 	    continue;
2716 	  OBJECT_MAX (obj) = r->finish;
2717 	  for (; r->next != NULL; r = r->next)
2718 	    ;
2719 	  OBJECT_MIN (obj) = r->start;
2720 	}
2721     }
2722   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2723     for (a = ira_regno_allocno_map[i];
2724 	 a != NULL;
2725 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2726       {
2727 	int j;
2728 	int n = ALLOCNO_NUM_OBJECTS (a);
2729 
2730 	for (j = 0; j < n; j++)
2731 	  {
2732 	    ira_object_t obj = ALLOCNO_OBJECT (a, j);
2733 	    ira_object_t parent_obj;
2734 
2735 	    if (OBJECT_MAX (obj) < 0)
2736 	      {
2737 		/* The object is not used and hence does not live.  */
2738 		ira_assert (OBJECT_LIVE_RANGES (obj) == NULL);
2739 		OBJECT_MAX (obj) = 0;
2740 		OBJECT_MIN (obj) = 1;
2741 		continue;
2742 	      }
2743 	    ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2744 	    /* Accumulation of range info.  */
2745 	    if (ALLOCNO_CAP (a) != NULL)
2746 	      {
2747 		for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2748 		  {
2749 		    ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2750 		    if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2751 		      OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2752 		    if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2753 		      OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2754 		  }
2755 		continue;
2756 	      }
2757 	    if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2758 	      continue;
2759 	    parent_a = parent->regno_allocno_map[i];
2760 	    parent_obj = ALLOCNO_OBJECT (parent_a, j);
2761 	    if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2762 	      OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2763 	    if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2764 	      OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2765 	  }
2766       }
2767 #ifdef ENABLE_IRA_CHECKING
2768   FOR_EACH_OBJECT (obj, oi)
2769     {
2770       if ((OBJECT_MIN (obj) >= 0 && OBJECT_MIN (obj) <= ira_max_point)
2771 	  && (OBJECT_MAX (obj) >= 0 && OBJECT_MAX (obj) <= ira_max_point))
2772 	continue;
2773       gcc_unreachable ();
2774     }
2775 #endif
2776 }
2777 
2778 /* Sort allocnos according to their live ranges.  Allocnos with
2779    smaller allocno class are put first unless we use priority
2780    coloring.  Allocnos with the same class are ordered according
2781    their start (min).  Allocnos with the same start are ordered
2782    according their finish (max).  */
2783 static int
2784 object_range_compare_func (const void *v1p, const void *v2p)
2785 {
2786   int diff;
2787   ira_object_t obj1 = *(const ira_object_t *) v1p;
2788   ira_object_t obj2 = *(const ira_object_t *) v2p;
2789   ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2790   ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2791 
2792   if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2793     return diff;
2794   if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2795      return diff;
2796   return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2797 }
2798 
2799 /* Sort ira_object_id_map and set up conflict id of allocnos.  */
2800 static void
2801 sort_conflict_id_map (void)
2802 {
2803   int i, num;
2804   ira_allocno_t a;
2805   ira_allocno_iterator ai;
2806 
2807   num = 0;
2808   FOR_EACH_ALLOCNO (a, ai)
2809     {
2810       ira_allocno_object_iterator oi;
2811       ira_object_t obj;
2812 
2813       FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2814 	ira_object_id_map[num++] = obj;
2815     }
2816   if (num > 1)
2817     qsort (ira_object_id_map, num, sizeof (ira_object_t),
2818 	   object_range_compare_func);
2819   for (i = 0; i < num; i++)
2820     {
2821       ira_object_t obj = ira_object_id_map[i];
2822 
2823       gcc_assert (obj != NULL);
2824       OBJECT_CONFLICT_ID (obj) = i;
2825     }
2826   for (i = num; i < ira_objects_num; i++)
2827     ira_object_id_map[i] = NULL;
2828 }
2829 
2830 /* Set up minimal and maximal conflict ids of allocnos with which
2831    given allocno can conflict.  */
2832 static void
2833 setup_min_max_conflict_allocno_ids (void)
2834 {
2835   int aclass;
2836   int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2837   int *live_range_min, *last_lived;
2838   int word0_min, word0_max;
2839   ira_allocno_t a;
2840   ira_allocno_iterator ai;
2841 
2842   live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2843   aclass = -1;
2844   first_not_finished = -1;
2845   for (i = 0; i < ira_objects_num; i++)
2846     {
2847       ira_object_t obj = ira_object_id_map[i];
2848 
2849       if (obj == NULL)
2850 	continue;
2851 
2852       a = OBJECT_ALLOCNO (obj);
2853 
2854       if (aclass < 0)
2855 	{
2856 	  aclass = ALLOCNO_CLASS (a);
2857 	  min = i;
2858 	  first_not_finished = i;
2859 	}
2860       else
2861 	{
2862 	  start = OBJECT_MIN (obj);
2863 	  /* If we skip an allocno, the allocno with smaller ids will
2864 	     be also skipped because of the secondary sorting the
2865 	     range finishes (see function
2866 	     object_range_compare_func).  */
2867 	  while (first_not_finished < i
2868 		 && start > OBJECT_MAX (ira_object_id_map
2869 					[first_not_finished]))
2870 	    first_not_finished++;
2871 	  min = first_not_finished;
2872 	}
2873       if (min == i)
2874 	/* We could increase min further in this case but it is good
2875 	   enough.  */
2876 	min++;
2877       live_range_min[i] = OBJECT_MIN (obj);
2878       OBJECT_MIN (obj) = min;
2879     }
2880   last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2881   aclass = -1;
2882   filled_area_start = -1;
2883   for (i = ira_objects_num - 1; i >= 0; i--)
2884     {
2885       ira_object_t obj = ira_object_id_map[i];
2886 
2887       if (obj == NULL)
2888 	continue;
2889 
2890       a = OBJECT_ALLOCNO (obj);
2891       if (aclass < 0)
2892 	{
2893 	  aclass = ALLOCNO_CLASS (a);
2894 	  for (j = 0; j < ira_max_point; j++)
2895 	    last_lived[j] = -1;
2896 	  filled_area_start = ira_max_point;
2897 	}
2898       min = live_range_min[i];
2899       finish = OBJECT_MAX (obj);
2900       max = last_lived[finish];
2901       if (max < 0)
2902 	/* We could decrease max further in this case but it is good
2903 	   enough.  */
2904 	max = OBJECT_CONFLICT_ID (obj) - 1;
2905       OBJECT_MAX (obj) = max;
2906       /* In filling, we can go further A range finish to recognize
2907 	 intersection quickly because if the finish of subsequently
2908 	 processed allocno (it has smaller conflict id) range is
2909 	 further A range finish than they are definitely intersected
2910 	 (the reason for this is the allocnos with bigger conflict id
2911 	 have their range starts not smaller than allocnos with
2912 	 smaller ids.  */
2913       for (j = min; j < filled_area_start; j++)
2914 	last_lived[j] = i;
2915       filled_area_start = min;
2916     }
2917   ira_free (last_lived);
2918   ira_free (live_range_min);
2919 
2920   /* For allocnos with more than one object, we may later record extra conflicts in
2921      subobject 0 that we cannot really know about here.
2922      For now, simply widen the min/max range of these subobjects.  */
2923 
2924   word0_min = INT_MAX;
2925   word0_max = INT_MIN;
2926 
2927   FOR_EACH_ALLOCNO (a, ai)
2928     {
2929       int n = ALLOCNO_NUM_OBJECTS (a);
2930       ira_object_t obj0;
2931 
2932       if (n < 2)
2933 	continue;
2934       obj0 = ALLOCNO_OBJECT (a, 0);
2935       if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2936 	word0_min = OBJECT_CONFLICT_ID (obj0);
2937       if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2938 	word0_max = OBJECT_CONFLICT_ID (obj0);
2939     }
2940   FOR_EACH_ALLOCNO (a, ai)
2941     {
2942       int n = ALLOCNO_NUM_OBJECTS (a);
2943       ira_object_t obj0;
2944 
2945       if (n < 2)
2946 	continue;
2947       obj0 = ALLOCNO_OBJECT (a, 0);
2948       if (OBJECT_MIN (obj0) > word0_min)
2949 	OBJECT_MIN (obj0) = word0_min;
2950       if (OBJECT_MAX (obj0) < word0_max)
2951 	OBJECT_MAX (obj0) = word0_max;
2952     }
2953 }
2954 
2955 
2956 
2957 static void
2958 create_caps (void)
2959 {
2960   ira_allocno_t a;
2961   ira_allocno_iterator ai;
2962   ira_loop_tree_node_t loop_tree_node;
2963 
2964   FOR_EACH_ALLOCNO (a, ai)
2965     {
2966       if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2967 	continue;
2968       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2969 	create_cap_allocno (a);
2970       else if (ALLOCNO_CAP (a) == NULL)
2971 	{
2972 	  loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2973 	  if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2974 	    create_cap_allocno (a);
2975 	}
2976     }
2977 }
2978 
2979 
2980 
2981 /* The page contains code transforming more one region internal
2982    representation (IR) to one region IR which is necessary for reload.
2983    This transformation is called IR flattening.  We might just rebuild
2984    the IR for one region but we don't do it because it takes a lot of
2985    time.  */
2986 
2987 /* Map: regno -> allocnos which will finally represent the regno for
2988    IR with one region.  */
2989 static ira_allocno_t *regno_top_level_allocno_map;
2990 
2991 /* Find the allocno that corresponds to A at a level one higher up in the
2992    loop tree.  Returns NULL if A is a cap, or if it has no parent.  */
2993 ira_allocno_t
2994 ira_parent_allocno (ira_allocno_t a)
2995 {
2996   ira_loop_tree_node_t parent;
2997 
2998   if (ALLOCNO_CAP (a) != NULL)
2999     return NULL;
3000 
3001   parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3002   if (parent == NULL)
3003     return NULL;
3004 
3005   return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
3006 }
3007 
3008 /* Find the allocno that corresponds to A at a level one higher up in the
3009    loop tree.  If ALLOCNO_CAP is set for A, return that.  */
3010 ira_allocno_t
3011 ira_parent_or_cap_allocno (ira_allocno_t a)
3012 {
3013   if (ALLOCNO_CAP (a) != NULL)
3014     return ALLOCNO_CAP (a);
3015 
3016   return ira_parent_allocno (a);
3017 }
3018 
3019 /* Process all allocnos originated from pseudo REGNO and copy live
3020    ranges, hard reg conflicts, and allocno stack reg attributes from
3021    low level allocnos to final allocnos which are destinations of
3022    removed stores at a loop exit.  Return true if we copied live
3023    ranges.  */
3024 static bool
3025 copy_info_to_removed_store_destinations (int regno)
3026 {
3027   ira_allocno_t a;
3028   ira_allocno_t parent_a = NULL;
3029   ira_loop_tree_node_t parent;
3030   bool merged_p;
3031 
3032   merged_p = false;
3033   for (a = ira_regno_allocno_map[regno];
3034        a != NULL;
3035        a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3036     {
3037       if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
3038 	/* This allocno will be removed.  */
3039 	continue;
3040 
3041       /* Caps will be removed.  */
3042       ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3043       for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3044 	   parent != NULL;
3045 	   parent = parent->parent)
3046 	if ((parent_a = parent->regno_allocno_map[regno]) == NULL
3047 	    || (parent_a
3048 		== regno_top_level_allocno_map[REGNO
3049 					       (allocno_emit_reg (parent_a))]
3050 		&& ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
3051 	  break;
3052       if (parent == NULL || parent_a == NULL)
3053 	continue;
3054 
3055       copy_allocno_live_ranges (a, parent_a);
3056       merge_hard_reg_conflicts (a, parent_a, true);
3057 
3058       ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
3059       ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3060 	+= ALLOCNO_CALLS_CROSSED_NUM (a);
3061       ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3062 	+= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3063       IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
3064  			ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
3065       ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3066 	+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3067       merged_p = true;
3068     }
3069   return merged_p;
3070 }
3071 
3072 /* Flatten the IR.  In other words, this function transforms IR as if
3073    it were built with one region (without loops).  We could make it
3074    much simpler by rebuilding IR with one region, but unfortunately it
3075    takes a lot of time.  MAX_REGNO_BEFORE_EMIT and
3076    IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3077    IRA_MAX_POINT before emitting insns on the loop borders.  */
3078 void
3079 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
3080 {
3081   int i, j;
3082   bool keep_p;
3083   int hard_regs_num;
3084   bool new_pseudos_p, merged_p, mem_dest_p;
3085   unsigned int n;
3086   enum reg_class aclass;
3087   ira_allocno_t a, parent_a, first, second, node_first, node_second;
3088   ira_copy_t cp;
3089   ira_loop_tree_node_t node;
3090   live_range_t r;
3091   ira_allocno_iterator ai;
3092   ira_copy_iterator ci;
3093 
3094   regno_top_level_allocno_map
3095     = (ira_allocno_t *) ira_allocate (max_reg_num ()
3096 				      * sizeof (ira_allocno_t));
3097   memset (regno_top_level_allocno_map, 0,
3098 	  max_reg_num () * sizeof (ira_allocno_t));
3099   new_pseudos_p = merged_p = false;
3100   FOR_EACH_ALLOCNO (a, ai)
3101     {
3102       ira_allocno_object_iterator oi;
3103       ira_object_t obj;
3104 
3105       if (ALLOCNO_CAP_MEMBER (a) != NULL)
3106 	/* Caps are not in the regno allocno maps and they are never
3107 	   will be transformed into allocnos existing after IR
3108 	   flattening.  */
3109 	continue;
3110       FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3111 	COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
3112 			   OBJECT_CONFLICT_HARD_REGS (obj));
3113 #ifdef STACK_REGS
3114       ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
3115 #endif
3116     }
3117   /* Fix final allocno attributes.  */
3118   for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
3119     {
3120       mem_dest_p = false;
3121       for (a = ira_regno_allocno_map[i];
3122 	   a != NULL;
3123 	   a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3124 	{
3125 	  ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
3126 
3127 	  ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3128 	  if (data->somewhere_renamed_p)
3129 	    new_pseudos_p = true;
3130 	  parent_a = ira_parent_allocno (a);
3131 	  if (parent_a == NULL)
3132 	    {
3133 	      ALLOCNO_COPIES (a) = NULL;
3134 	      regno_top_level_allocno_map[REGNO (data->reg)] = a;
3135 	      continue;
3136 	    }
3137 	  ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
3138 
3139 	  if (data->mem_optimized_dest != NULL)
3140 	    mem_dest_p = true;
3141 	  parent_data = ALLOCNO_EMIT_DATA (parent_a);
3142 	  if (REGNO (data->reg) == REGNO (parent_data->reg))
3143 	    {
3144 	      merge_hard_reg_conflicts (a, parent_a, true);
3145 	      move_allocno_live_ranges (a, parent_a);
3146 	      merged_p = true;
3147 	      parent_data->mem_optimized_dest_p
3148 		= (parent_data->mem_optimized_dest_p
3149 		   || data->mem_optimized_dest_p);
3150 	      continue;
3151 	    }
3152 	  new_pseudos_p = true;
3153 	  for (;;)
3154 	    {
3155 	      ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
3156 	      ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
3157 	      ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
3158 	      ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3159 		-= ALLOCNO_CALLS_CROSSED_NUM (a);
3160 	      ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3161 		-= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3162 	      ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3163 		-= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3164 	      ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
3165 			  && ALLOCNO_NREFS (parent_a) >= 0
3166 			  && ALLOCNO_FREQ (parent_a) >= 0);
3167 	      aclass = ALLOCNO_CLASS (parent_a);
3168 	      hard_regs_num = ira_class_hard_regs_num[aclass];
3169 	      if (ALLOCNO_HARD_REG_COSTS (a) != NULL
3170 		  && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
3171 		for (j = 0; j < hard_regs_num; j++)
3172 		  ALLOCNO_HARD_REG_COSTS (parent_a)[j]
3173 		    -= ALLOCNO_HARD_REG_COSTS (a)[j];
3174 	      if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
3175 		  && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
3176 		for (j = 0; j < hard_regs_num; j++)
3177 		  ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
3178 		    -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
3179 	      ALLOCNO_CLASS_COST (parent_a)
3180 		-= ALLOCNO_CLASS_COST (a);
3181 	      ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
3182 	      parent_a = ira_parent_allocno (parent_a);
3183 	      if (parent_a == NULL)
3184 		break;
3185 	    }
3186 	  ALLOCNO_COPIES (a) = NULL;
3187 	  regno_top_level_allocno_map[REGNO (data->reg)] = a;
3188 	}
3189       if (mem_dest_p && copy_info_to_removed_store_destinations (i))
3190 	merged_p = true;
3191     }
3192   ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
3193   if (merged_p || ira_max_point_before_emit != ira_max_point)
3194     ira_rebuild_start_finish_chains ();
3195   if (new_pseudos_p)
3196     {
3197       sparseset objects_live;
3198 
3199       /* Rebuild conflicts.  */
3200       FOR_EACH_ALLOCNO (a, ai)
3201 	{
3202 	  ira_allocno_object_iterator oi;
3203 	  ira_object_t obj;
3204 
3205 	  if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3206 	      || ALLOCNO_CAP_MEMBER (a) != NULL)
3207 	    continue;
3208 	  FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3209 	    {
3210 	      for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3211 		ira_assert (r->object == obj);
3212 	      clear_conflicts (obj);
3213 	    }
3214 	}
3215       objects_live = sparseset_alloc (ira_objects_num);
3216       for (i = 0; i < ira_max_point; i++)
3217 	{
3218 	  for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
3219 	    {
3220 	      ira_object_t obj = r->object;
3221 
3222 	      a = OBJECT_ALLOCNO (obj);
3223 	      if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3224 		  || ALLOCNO_CAP_MEMBER (a) != NULL)
3225 		continue;
3226 
3227 	      aclass = ALLOCNO_CLASS (a);
3228 	      EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
3229 		{
3230 		  ira_object_t live_obj = ira_object_id_map[n];
3231 		  ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
3232 		  enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
3233 
3234 		  if (ira_reg_classes_intersect_p[aclass][live_aclass]
3235 		      /* Don't set up conflict for the allocno with itself.  */
3236 		      && live_a != a)
3237 		    ira_add_conflict (obj, live_obj);
3238 		}
3239 	      sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
3240 	    }
3241 
3242 	  for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
3243 	    sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
3244 	}
3245       sparseset_free (objects_live);
3246       compress_conflict_vecs ();
3247     }
3248   /* Mark some copies for removing and change allocnos in the rest
3249      copies.  */
3250   FOR_EACH_COPY (cp, ci)
3251     {
3252       if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
3253 	  || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
3254 	{
3255 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3256 	    fprintf
3257 	      (ira_dump_file, "      Remove cp%d:%c%dr%d-%c%dr%d\n",
3258 	       cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
3259 	       ALLOCNO_NUM (cp->first),
3260 	       REGNO (allocno_emit_reg (cp->first)),
3261 	       ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
3262 	       ALLOCNO_NUM (cp->second),
3263 	       REGNO (allocno_emit_reg (cp->second)));
3264 	  cp->loop_tree_node = NULL;
3265 	  continue;
3266 	}
3267       first
3268 	= regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
3269       second
3270 	= regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
3271       node = cp->loop_tree_node;
3272       if (node == NULL)
3273 	keep_p = true; /* It copy generated in ira-emit.c.  */
3274       else
3275 	{
3276 	  /* Check that the copy was not propagated from level on
3277 	     which we will have different pseudos.  */
3278 	  node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
3279 	  node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
3280 	  keep_p = ((REGNO (allocno_emit_reg (first))
3281 		     == REGNO (allocno_emit_reg (node_first)))
3282 		     && (REGNO (allocno_emit_reg (second))
3283 			 == REGNO (allocno_emit_reg (node_second))));
3284 	}
3285       if (keep_p)
3286 	{
3287 	  cp->loop_tree_node = ira_loop_tree_root;
3288 	  cp->first = first;
3289 	  cp->second = second;
3290 	}
3291       else
3292 	{
3293 	  cp->loop_tree_node = NULL;
3294 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3295 	    fprintf (ira_dump_file, "      Remove cp%d:a%dr%d-a%dr%d\n",
3296 		     cp->num, ALLOCNO_NUM (cp->first),
3297 		     REGNO (allocno_emit_reg (cp->first)),
3298 		     ALLOCNO_NUM (cp->second),
3299 		     REGNO (allocno_emit_reg (cp->second)));
3300 	}
3301     }
3302   /* Remove unnecessary allocnos on lower levels of the loop tree.  */
3303   FOR_EACH_ALLOCNO (a, ai)
3304     {
3305       if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3306 	  || ALLOCNO_CAP_MEMBER (a) != NULL)
3307 	{
3308 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3309 	    fprintf (ira_dump_file, "      Remove a%dr%d\n",
3310 		     ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
3311 	  ira_remove_allocno_prefs (a);
3312 	  finish_allocno (a);
3313 	  continue;
3314 	}
3315       ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
3316       ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
3317       ALLOCNO_CAP (a) = NULL;
3318       /* Restore updated costs for assignments from reload.  */
3319       ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3320       ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
3321       if (! ALLOCNO_ASSIGNED_P (a))
3322 	ira_free_allocno_updated_costs (a);
3323       ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3324       ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3325     }
3326   /* Remove unnecessary copies.  */
3327   FOR_EACH_COPY (cp, ci)
3328     {
3329       if (cp->loop_tree_node == NULL)
3330 	{
3331 	  ira_copies[cp->num] = NULL;
3332 	  finish_copy (cp);
3333 	  continue;
3334 	}
3335       ira_assert
3336 	(ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
3337 	 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
3338       add_allocno_copy_to_list (cp);
3339       swap_allocno_copy_ends_if_necessary (cp);
3340     }
3341   rebuild_regno_allocno_maps ();
3342   if (ira_max_point != ira_max_point_before_emit)
3343     ira_compress_allocno_live_ranges ();
3344   ira_free (regno_top_level_allocno_map);
3345 }
3346 
3347 
3348 
3349 #ifdef ENABLE_IRA_CHECKING
3350 /* Check creation of all allocnos.  Allocnos on lower levels should
3351    have allocnos or caps on all upper levels.  */
3352 static void
3353 check_allocno_creation (void)
3354 {
3355   ira_allocno_t a;
3356   ira_allocno_iterator ai;
3357   ira_loop_tree_node_t loop_tree_node;
3358 
3359   FOR_EACH_ALLOCNO (a, ai)
3360     {
3361       loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3362       ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3363 				ALLOCNO_NUM (a)));
3364       if (loop_tree_node == ira_loop_tree_root)
3365 	continue;
3366       if (ALLOCNO_CAP_MEMBER (a) != NULL)
3367 	ira_assert (ALLOCNO_CAP (a) != NULL);
3368       else if (ALLOCNO_CAP (a) == NULL)
3369 	ira_assert (loop_tree_node->parent
3370 		    ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3371 		    && bitmap_bit_p (loop_tree_node->border_allocnos,
3372 				     ALLOCNO_NUM (a)));
3373     }
3374 }
3375 #endif
3376 
3377 /* Identify allocnos which prefer a register class with a single hard register.
3378    Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3379    less likely to use the preferred singleton register.  */
3380 static void
3381 update_conflict_hard_reg_costs (void)
3382 {
3383   ira_allocno_t a;
3384   ira_allocno_iterator ai;
3385   int i, index, min;
3386 
3387   FOR_EACH_ALLOCNO (a, ai)
3388     {
3389       reg_class_t aclass = ALLOCNO_CLASS (a);
3390       reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3391       int singleton = ira_class_singleton[pref][ALLOCNO_MODE (a)];
3392       if (singleton < 0)
3393 	continue;
3394       index = ira_class_hard_reg_index[(int) aclass][singleton];
3395       if (index < 0)
3396 	continue;
3397       if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3398 	  || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3399 	continue;
3400       min = INT_MAX;
3401       for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3402 	if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3403 	    && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3404 	  min = ALLOCNO_HARD_REG_COSTS (a)[i];
3405       if (min == INT_MAX)
3406 	continue;
3407       ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3408 				  aclass, 0);
3409       ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3410 	-= min - ALLOCNO_CLASS_COST (a);
3411     }
3412 }
3413 
3414 /* Create a internal representation (IR) for IRA (allocnos, copies,
3415    loop tree nodes).  The function returns TRUE if we generate loop
3416    structure (besides nodes representing all function and the basic
3417    blocks) for regional allocation.  A true return means that we
3418    really need to flatten IR before the reload.  */
3419 bool
3420 ira_build (void)
3421 {
3422   bool loops_p;
3423 
3424   df_analyze ();
3425   initiate_cost_vectors ();
3426   initiate_allocnos ();
3427   initiate_prefs ();
3428   initiate_copies ();
3429   create_loop_tree_nodes ();
3430   form_loop_tree ();
3431   create_allocnos ();
3432   ira_costs ();
3433   create_allocno_objects ();
3434   ira_create_allocno_live_ranges ();
3435   remove_unnecessary_regions (false);
3436   ira_compress_allocno_live_ranges ();
3437   update_bad_spill_attribute ();
3438   loops_p = more_one_region_p ();
3439   if (loops_p)
3440     {
3441       propagate_allocno_info ();
3442       create_caps ();
3443     }
3444   ira_tune_allocno_costs ();
3445 #ifdef ENABLE_IRA_CHECKING
3446   check_allocno_creation ();
3447 #endif
3448   setup_min_max_allocno_live_range_point ();
3449   sort_conflict_id_map ();
3450   setup_min_max_conflict_allocno_ids ();
3451   ira_build_conflicts ();
3452   update_conflict_hard_reg_costs ();
3453   if (! ira_conflicts_p)
3454     {
3455       ira_allocno_t a;
3456       ira_allocno_iterator ai;
3457 
3458       /* Remove all regions but root one.  */
3459       if (loops_p)
3460 	{
3461 	  remove_unnecessary_regions (true);
3462 	  loops_p = false;
3463 	}
3464       /* We don't save hard registers around calls for fast allocation
3465 	 -- add caller clobbered registers as conflicting ones to
3466 	 allocno crossing calls.  */
3467       FOR_EACH_ALLOCNO (a, ai)
3468 	if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3469 	  ior_hard_reg_conflicts (a, &call_used_reg_set);
3470     }
3471   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3472     print_copies (ira_dump_file);
3473   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3474     print_prefs (ira_dump_file);
3475   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3476     {
3477       int n, nr, nr_big;
3478       ira_allocno_t a;
3479       live_range_t r;
3480       ira_allocno_iterator ai;
3481 
3482       n = 0;
3483       nr = 0;
3484       nr_big = 0;
3485       FOR_EACH_ALLOCNO (a, ai)
3486 	{
3487 	  int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3488 
3489 	  if (nobj > 1)
3490 	    nr_big++;
3491 	  for (j = 0; j < nobj; j++)
3492 	    {
3493 	      ira_object_t obj = ALLOCNO_OBJECT (a, j);
3494 	      n += OBJECT_NUM_CONFLICTS (obj);
3495 	      for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3496 		nr++;
3497 	    }
3498 	}
3499       fprintf (ira_dump_file, "  regions=%d, blocks=%d, points=%d\n",
3500 	       current_loops == NULL ? 1 : number_of_loops (cfun),
3501 	       n_basic_blocks_for_fn (cfun), ira_max_point);
3502       fprintf (ira_dump_file,
3503 	       "    allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3504 	       ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3505     }
3506   return loops_p;
3507 }
3508 
3509 /* Release the data created by function ira_build.  */
3510 void
3511 ira_destroy (void)
3512 {
3513   finish_loop_tree_nodes ();
3514   finish_prefs ();
3515   finish_copies ();
3516   finish_allocnos ();
3517   finish_cost_vectors ();
3518   ira_finish_allocno_live_ranges ();
3519 }
3520