xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-cfgcleanup.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /* CFG cleanup for trees.
2    Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3    Free Software Foundation, Inc.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
30 #include "output.h"
31 #include "toplev.h"
32 #include "flags.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "ggc.h"
36 #include "langhooks.h"
37 #include "diagnostic.h"
38 #include "tree-flow.h"
39 #include "timevar.h"
40 #include "tree-dump.h"
41 #include "tree-pass.h"
42 #include "toplev.h"
43 #include "except.h"
44 #include "cfgloop.h"
45 #include "cfglayout.h"
46 #include "hashtab.h"
47 #include "tree-ssa-propagate.h"
48 #include "tree-scalar-evolution.h"
49 
50 /* The set of blocks in that at least one of the following changes happened:
51    -- the statement at the end of the block was changed
52    -- the block was newly created
53    -- the set of the predecessors of the block changed
54    -- the set of the successors of the block changed
55    ??? Maybe we could track these changes separately, since they determine
56        what cleanups it makes sense to try on the block.  */
57 bitmap cfgcleanup_altered_bbs;
58 
59 /* Remove any fallthru edge from EV.  Return true if an edge was removed.  */
60 
61 static bool
62 remove_fallthru_edge (VEC(edge,gc) *ev)
63 {
64   edge_iterator ei;
65   edge e;
66 
67   FOR_EACH_EDGE (e, ei, ev)
68     if ((e->flags & EDGE_FALLTHRU) != 0)
69       {
70 	remove_edge_and_dominated_blocks (e);
71 	return true;
72       }
73   return false;
74 }
75 
76 
77 /* Disconnect an unreachable block in the control expression starting
78    at block BB.  */
79 
80 static bool
81 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
82 {
83   edge taken_edge;
84   bool retval = false;
85   gimple stmt = gsi_stmt (gsi);
86   tree val;
87 
88   if (!single_succ_p (bb))
89     {
90       edge e;
91       edge_iterator ei;
92       bool warned;
93       location_t loc;
94 
95       fold_defer_overflow_warnings ();
96       loc = gimple_location (stmt);
97       switch (gimple_code (stmt))
98 	{
99 	case GIMPLE_COND:
100 	  {
101 	    tree lhs = gimple_cond_lhs (stmt);
102 	    tree rhs = gimple_cond_rhs (stmt);
103 	    /* For conditions try harder and lookup single-argument
104 	       PHI nodes.  Only do so from the same basic-block though
105 	       as other basic-blocks may be dead already.  */
106 	    if (TREE_CODE (lhs) == SSA_NAME
107 		&& !name_registered_for_update_p (lhs))
108 	      {
109 		gimple def_stmt = SSA_NAME_DEF_STMT (lhs);
110 		if (gimple_code (def_stmt) == GIMPLE_PHI
111 		    && gimple_phi_num_args (def_stmt) == 1
112 		    && gimple_bb (def_stmt) == gimple_bb (stmt)
113 		    && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME
114 			|| !name_registered_for_update_p (PHI_ARG_DEF (def_stmt,
115 								       0))))
116 		  lhs = PHI_ARG_DEF (def_stmt, 0);
117 	      }
118 	    if (TREE_CODE (rhs) == SSA_NAME
119 		&& !name_registered_for_update_p (rhs))
120 	      {
121 		gimple def_stmt = SSA_NAME_DEF_STMT (rhs);
122 		if (gimple_code (def_stmt) == GIMPLE_PHI
123 		    && gimple_phi_num_args (def_stmt) == 1
124 		    && gimple_bb (def_stmt) == gimple_bb (stmt)
125 		    && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME
126 			|| !name_registered_for_update_p (PHI_ARG_DEF (def_stmt,
127 								       0))))
128 		  rhs = PHI_ARG_DEF (def_stmt, 0);
129 	      }
130 	    val = fold_binary_loc (loc, gimple_cond_code (stmt),
131 				   boolean_type_node, lhs, rhs);
132 	    break;
133 	  }
134 
135 	case GIMPLE_SWITCH:
136 	  val = gimple_switch_index (stmt);
137 	  break;
138 
139 	default:
140 	  val = NULL_TREE;
141 	}
142       taken_edge = find_taken_edge (bb, val);
143       if (!taken_edge)
144 	{
145 	  fold_undefer_and_ignore_overflow_warnings ();
146 	  return false;
147 	}
148 
149       /* Remove all the edges except the one that is always executed.  */
150       warned = false;
151       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
152 	{
153 	  if (e != taken_edge)
154 	    {
155 	      if (!warned)
156 		{
157 		  fold_undefer_overflow_warnings
158 		    (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
159 		  warned = true;
160 		}
161 
162 	      taken_edge->probability += e->probability;
163 	      taken_edge->count += e->count;
164 	      remove_edge_and_dominated_blocks (e);
165 	      retval = true;
166 	    }
167 	  else
168 	    ei_next (&ei);
169 	}
170       if (!warned)
171 	fold_undefer_and_ignore_overflow_warnings ();
172       if (taken_edge->probability > REG_BR_PROB_BASE)
173 	taken_edge->probability = REG_BR_PROB_BASE;
174     }
175   else
176     taken_edge = single_succ_edge (bb);
177 
178   bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
179   gsi_remove (&gsi, true);
180   taken_edge->flags = EDGE_FALLTHRU;
181 
182   return retval;
183 }
184 
185 /* Try to remove superfluous control structures in basic block BB.  Returns
186    true if anything changes.  */
187 
188 static bool
189 cleanup_control_flow_bb (basic_block bb)
190 {
191   gimple_stmt_iterator gsi;
192   bool retval = false;
193   gimple stmt;
194 
195   /* If the last statement of the block could throw and now cannot,
196      we need to prune cfg.  */
197   retval |= gimple_purge_dead_eh_edges (bb);
198 
199   gsi = gsi_last_bb (bb);
200   if (gsi_end_p (gsi))
201     return retval;
202 
203   stmt = gsi_stmt (gsi);
204 
205   if (gimple_code (stmt) == GIMPLE_COND
206       || gimple_code (stmt) == GIMPLE_SWITCH)
207     retval |= cleanup_control_expr_graph (bb, gsi);
208   else if (gimple_code (stmt) == GIMPLE_GOTO
209 	   && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
210 	   && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
211 	       == LABEL_DECL))
212     {
213       /* If we had a computed goto which has a compile-time determinable
214 	 destination, then we can eliminate the goto.  */
215       edge e;
216       tree label;
217       edge_iterator ei;
218       basic_block target_block;
219 
220       /* First look at all the outgoing edges.  Delete any outgoing
221 	 edges which do not go to the right block.  For the one
222 	 edge which goes to the right block, fix up its flags.  */
223       label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
224       target_block = label_to_block (label);
225       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
226 	{
227 	  if (e->dest != target_block)
228 	    remove_edge_and_dominated_blocks (e);
229 	  else
230 	    {
231 	      /* Turn off the EDGE_ABNORMAL flag.  */
232 	      e->flags &= ~EDGE_ABNORMAL;
233 
234 	      /* And set EDGE_FALLTHRU.  */
235 	      e->flags |= EDGE_FALLTHRU;
236 	      ei_next (&ei);
237 	    }
238 	}
239 
240       bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
241       bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
242 
243       /* Remove the GOTO_EXPR as it is not needed.  The CFG has all the
244 	 relevant information we need.  */
245       gsi_remove (&gsi, true);
246       retval = true;
247     }
248 
249   /* Check for indirect calls that have been turned into
250      noreturn calls.  */
251   else if (is_gimple_call (stmt)
252            && gimple_call_noreturn_p (stmt)
253            && remove_fallthru_edge (bb->succs))
254     retval = true;
255 
256   return retval;
257 }
258 
259 /* Return true if basic block BB does nothing except pass control
260    flow to another block and that we can safely insert a label at
261    the start of the successor block.
262 
263    As a precondition, we require that BB be not equal to
264    ENTRY_BLOCK_PTR.  */
265 
266 static bool
267 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
268 {
269   gimple_stmt_iterator gsi;
270   location_t locus;
271 
272   /* BB must have a single outgoing edge.  */
273   if (single_succ_p (bb) != 1
274       /* If PHI_WANTED is false, BB must not have any PHI nodes.
275 	 Otherwise, BB must have PHI nodes.  */
276       || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
277       /* BB may not be a predecessor of EXIT_BLOCK_PTR.  */
278       || single_succ (bb) == EXIT_BLOCK_PTR
279       /* Nor should this be an infinite loop.  */
280       || single_succ (bb) == bb
281       /* BB may not have an abnormal outgoing edge.  */
282       || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
283     return false;
284 
285 #if ENABLE_CHECKING
286   gcc_assert (bb != ENTRY_BLOCK_PTR);
287 #endif
288 
289   locus = single_succ_edge (bb)->goto_locus;
290 
291   /* There should not be an edge coming from entry, or an EH edge.  */
292   {
293     edge_iterator ei;
294     edge e;
295 
296     FOR_EACH_EDGE (e, ei, bb->preds)
297       if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH))
298 	return false;
299       /* If goto_locus of any of the edges differs, prevent removing
300 	 the forwarder block for -O0.  */
301       else if (optimize == 0 && e->goto_locus != locus)
302 	return false;
303   }
304 
305   /* Now walk through the statements backward.  We can ignore labels,
306      anything else means this is not a forwarder block.  */
307   for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
308     {
309       gimple stmt = gsi_stmt (gsi);
310 
311       switch (gimple_code (stmt))
312 	{
313 	case GIMPLE_LABEL:
314 	  if (DECL_NONLOCAL (gimple_label_label (stmt)))
315 	    return false;
316 	  if (optimize == 0 && gimple_location (stmt) != locus)
317 	    return false;
318 	  break;
319 
320 	  /* ??? For now, hope there's a corresponding debug
321 	     assignment at the destination.  */
322 	case GIMPLE_DEBUG:
323 	  break;
324 
325 	default:
326 	  return false;
327 	}
328     }
329 
330   if (current_loops)
331     {
332       basic_block dest;
333       /* Protect loop latches, headers and preheaders.  */
334       if (bb->loop_father->header == bb)
335 	return false;
336       dest = EDGE_SUCC (bb, 0)->dest;
337 
338       if (dest->loop_father->header == dest)
339 	return false;
340     }
341   return true;
342 }
343 
344 /* Return true if BB has at least one abnormal incoming edge.  */
345 
346 static inline bool
347 has_abnormal_incoming_edge_p (basic_block bb)
348 {
349   edge e;
350   edge_iterator ei;
351 
352   FOR_EACH_EDGE (e, ei, bb->preds)
353     if (e->flags & EDGE_ABNORMAL)
354       return true;
355 
356   return false;
357 }
358 
359 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
360    those alternatives are equal in each of the PHI nodes, then return
361    true, else return false.  */
362 
363 static bool
364 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
365 {
366   int n1 = e1->dest_idx;
367   int n2 = e2->dest_idx;
368   gimple_stmt_iterator gsi;
369 
370   for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
371     {
372       gimple phi = gsi_stmt (gsi);
373       tree val1 = gimple_phi_arg_def (phi, n1);
374       tree val2 = gimple_phi_arg_def (phi, n2);
375 
376       gcc_assert (val1 != NULL_TREE);
377       gcc_assert (val2 != NULL_TREE);
378 
379       if (!operand_equal_for_phi_arg_p (val1, val2))
380 	return false;
381     }
382 
383   return true;
384 }
385 
386 /* Removes forwarder block BB.  Returns false if this failed.  */
387 
388 static bool
389 remove_forwarder_block (basic_block bb)
390 {
391   edge succ = single_succ_edge (bb), e, s;
392   basic_block dest = succ->dest;
393   gimple label;
394   edge_iterator ei;
395   gimple_stmt_iterator gsi, gsi_to;
396   bool can_move_debug_stmts;
397 
398   /* We check for infinite loops already in tree_forwarder_block_p.
399      However it may happen that the infinite loop is created
400      afterwards due to removal of forwarders.  */
401   if (dest == bb)
402     return false;
403 
404   /* If the destination block consists of a nonlocal label or is a
405      EH landing pad, do not merge it.  */
406   label = first_stmt (dest);
407   if (label
408       && gimple_code (label) == GIMPLE_LABEL
409       && (DECL_NONLOCAL (gimple_label_label (label))
410 	  || EH_LANDING_PAD_NR (gimple_label_label (label)) != 0))
411     return false;
412 
413   /* If there is an abnormal edge to basic block BB, but not into
414      dest, problems might occur during removal of the phi node at out
415      of ssa due to overlapping live ranges of registers.
416 
417      If there is an abnormal edge in DEST, the problems would occur
418      anyway since cleanup_dead_labels would then merge the labels for
419      two different eh regions, and rest of exception handling code
420      does not like it.
421 
422      So if there is an abnormal edge to BB, proceed only if there is
423      no abnormal edge to DEST and there are no phi nodes in DEST.  */
424   if (has_abnormal_incoming_edge_p (bb)
425       && (has_abnormal_incoming_edge_p (dest)
426 	  || !gimple_seq_empty_p (phi_nodes (dest))))
427     return false;
428 
429   /* If there are phi nodes in DEST, and some of the blocks that are
430      predecessors of BB are also predecessors of DEST, check that the
431      phi node arguments match.  */
432   if (!gimple_seq_empty_p (phi_nodes (dest)))
433     {
434       FOR_EACH_EDGE (e, ei, bb->preds)
435 	{
436 	  s = find_edge (e->src, dest);
437 	  if (!s)
438 	    continue;
439 
440 	  if (!phi_alternatives_equal (dest, succ, s))
441 	    return false;
442 	}
443     }
444 
445   can_move_debug_stmts = single_pred_p (dest);
446 
447   /* Redirect the edges.  */
448   for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
449     {
450       bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
451 
452       if (e->flags & EDGE_ABNORMAL)
453 	{
454 	  /* If there is an abnormal edge, redirect it anyway, and
455 	     move the labels to the new block to make it legal.  */
456 	  s = redirect_edge_succ_nodup (e, dest);
457 	}
458       else
459 	s = redirect_edge_and_branch (e, dest);
460 
461       if (s == e)
462 	{
463 	  /* Create arguments for the phi nodes, since the edge was not
464 	     here before.  */
465 	  for (gsi = gsi_start_phis (dest);
466 	       !gsi_end_p (gsi);
467 	       gsi_next (&gsi))
468 	    {
469 	      gimple phi = gsi_stmt (gsi);
470 	      source_location l = gimple_phi_arg_location_from_edge (phi, succ);
471 	      add_phi_arg (phi, gimple_phi_arg_def (phi, succ->dest_idx), s, l);
472 	    }
473 	}
474     }
475 
476   /* Move nonlocal labels and computed goto targets as well as user
477      defined labels and labels with an EH landing pad number to the
478      new block, so that the redirection of the abnormal edges works,
479      jump targets end up in a sane place and debug information for
480      labels is retained.  */
481   gsi_to = gsi_start_bb (dest);
482   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
483     {
484       tree decl;
485       label = gsi_stmt (gsi);
486       if (is_gimple_debug (label))
487 	break;
488       decl = gimple_label_label (label);
489       if (EH_LANDING_PAD_NR (decl) != 0
490 	  || DECL_NONLOCAL (decl)
491 	  || FORCED_LABEL (decl)
492 	  || !DECL_ARTIFICIAL (decl))
493 	{
494 	  gsi_remove (&gsi, false);
495 	  gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
496 	}
497       else
498 	gsi_next (&gsi);
499     }
500 
501   /* Move debug statements if the destination has just a single
502      predecessor.  */
503   if (can_move_debug_stmts)
504     {
505       gsi_to = gsi_after_labels (dest);
506       for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
507 	{
508 	  gimple debug = gsi_stmt (gsi);
509 	  if (!is_gimple_debug (debug))
510 	    break;
511 	  gsi_remove (&gsi, false);
512 	  gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
513 	}
514     }
515 
516   bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
517 
518   /* Update the dominators.  */
519   if (dom_info_available_p (CDI_DOMINATORS))
520     {
521       basic_block dom, dombb, domdest;
522 
523       dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
524       domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
525       if (domdest == bb)
526 	{
527 	  /* Shortcut to avoid calling (relatively expensive)
528 	     nearest_common_dominator unless necessary.  */
529 	  dom = dombb;
530 	}
531       else
532 	dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
533 
534       set_immediate_dominator (CDI_DOMINATORS, dest, dom);
535     }
536 
537   /* And kill the forwarder block.  */
538   delete_basic_block (bb);
539 
540   return true;
541 }
542 
543 /* Split basic blocks on calls in the middle of a basic block that are now
544    known not to return, and remove the unreachable code.  */
545 
546 static bool
547 split_bbs_on_noreturn_calls (void)
548 {
549   bool changed = false;
550   gimple stmt;
551   basic_block bb;
552 
553   /* Detect cases where a mid-block call is now known not to return.  */
554   if (cfun->gimple_df)
555     while (VEC_length (gimple, MODIFIED_NORETURN_CALLS (cfun)))
556       {
557 	stmt = VEC_pop (gimple, MODIFIED_NORETURN_CALLS (cfun));
558 	bb = gimple_bb (stmt);
559 	/* BB might be deleted at this point, so verify first
560 	   BB is present in the cfg.  */
561 	if (bb == NULL
562 	    || bb->index < NUM_FIXED_BLOCKS
563 	    || bb->index >= last_basic_block
564 	    || BASIC_BLOCK (bb->index) != bb
565 	    || last_stmt (bb) == stmt
566 	    || !gimple_call_noreturn_p (stmt))
567 	  continue;
568 
569 	changed = true;
570 	split_block (bb, stmt);
571 	remove_fallthru_edge (bb->succs);
572       }
573 
574   return changed;
575 }
576 
577 /* If GIMPLE_OMP_RETURN in basic block BB is unreachable, remove it.  */
578 
579 static bool
580 cleanup_omp_return (basic_block bb)
581 {
582   gimple stmt = last_stmt (bb);
583   basic_block control_bb;
584 
585   if (stmt == NULL
586       || gimple_code (stmt) != GIMPLE_OMP_RETURN
587       || !single_pred_p (bb))
588     return false;
589 
590   control_bb = single_pred (bb);
591   stmt = last_stmt (control_bb);
592 
593   if (stmt == NULL || gimple_code (stmt) != GIMPLE_OMP_SECTIONS_SWITCH)
594     return false;
595 
596   /* The block with the control statement normally has two entry edges -- one
597      from entry, one from continue.  If continue is removed, return is
598      unreachable, so we remove it here as well.  */
599   if (EDGE_COUNT (control_bb->preds) == 2)
600     return false;
601 
602   gcc_assert (EDGE_COUNT (control_bb->preds) == 1);
603   remove_edge_and_dominated_blocks (single_pred_edge (bb));
604   return true;
605 }
606 
607 /* Tries to cleanup cfg in basic block BB.  Returns true if anything
608    changes.  */
609 
610 static bool
611 cleanup_tree_cfg_bb (basic_block bb)
612 {
613   bool retval = false;
614 
615   if (cleanup_omp_return (bb))
616     return true;
617 
618   retval = cleanup_control_flow_bb (bb);
619 
620   if (tree_forwarder_block_p (bb, false)
621       && remove_forwarder_block (bb))
622     return true;
623 
624   /* Merging the blocks may create new opportunities for folding
625      conditional branches (due to the elimination of single-valued PHI
626      nodes).  */
627   if (single_succ_p (bb)
628       && can_merge_blocks_p (bb, single_succ (bb)))
629     {
630       merge_blocks (bb, single_succ (bb));
631       return true;
632     }
633 
634   return retval;
635 }
636 
637 /* Iterate the cfg cleanups, while anything changes.  */
638 
639 static bool
640 cleanup_tree_cfg_1 (void)
641 {
642   bool retval = false;
643   basic_block bb;
644   unsigned i, n;
645 
646   retval |= split_bbs_on_noreturn_calls ();
647 
648   /* Prepare the worklists of altered blocks.  */
649   cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
650 
651   /* During forwarder block cleanup, we may redirect edges out of
652      SWITCH_EXPRs, which can get expensive.  So we want to enable
653      recording of edge to CASE_LABEL_EXPR.  */
654   start_recording_case_labels ();
655 
656   /* Start by iterating over all basic blocks.  We cannot use FOR_EACH_BB,
657      since the basic blocks may get removed.  */
658   n = last_basic_block;
659   for (i = NUM_FIXED_BLOCKS; i < n; i++)
660     {
661       bb = BASIC_BLOCK (i);
662       if (bb)
663 	retval |= cleanup_tree_cfg_bb (bb);
664     }
665 
666   /* Now process the altered blocks, as long as any are available.  */
667   while (!bitmap_empty_p (cfgcleanup_altered_bbs))
668     {
669       i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
670       bitmap_clear_bit (cfgcleanup_altered_bbs, i);
671       if (i < NUM_FIXED_BLOCKS)
672 	continue;
673 
674       bb = BASIC_BLOCK (i);
675       if (!bb)
676 	continue;
677 
678       retval |= cleanup_tree_cfg_bb (bb);
679 
680       /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn
681 	 calls.  */
682       retval |= split_bbs_on_noreturn_calls ();
683     }
684 
685   end_recording_case_labels ();
686   BITMAP_FREE (cfgcleanup_altered_bbs);
687   return retval;
688 }
689 
690 
691 /* Remove unreachable blocks and other miscellaneous clean up work.
692    Return true if the flowgraph was modified, false otherwise.  */
693 
694 static bool
695 cleanup_tree_cfg_noloop (void)
696 {
697   bool changed;
698 
699   timevar_push (TV_TREE_CLEANUP_CFG);
700 
701   /* Iterate until there are no more cleanups left to do.  If any
702      iteration changed the flowgraph, set CHANGED to true.
703 
704      If dominance information is available, there cannot be any unreachable
705      blocks.  */
706   if (!dom_info_available_p (CDI_DOMINATORS))
707     {
708       changed = delete_unreachable_blocks ();
709       calculate_dominance_info (CDI_DOMINATORS);
710     }
711   else
712     {
713 #ifdef ENABLE_CHECKING
714       verify_dominators (CDI_DOMINATORS);
715 #endif
716       changed = false;
717     }
718 
719   changed |= cleanup_tree_cfg_1 ();
720 
721   gcc_assert (dom_info_available_p (CDI_DOMINATORS));
722   compact_blocks ();
723 
724 #ifdef ENABLE_CHECKING
725   verify_flow_info ();
726 #endif
727 
728   timevar_pop (TV_TREE_CLEANUP_CFG);
729 
730   if (changed && current_loops)
731     loops_state_set (LOOPS_NEED_FIXUP);
732 
733   return changed;
734 }
735 
736 /* Repairs loop structures.  */
737 
738 static void
739 repair_loop_structures (void)
740 {
741   bitmap changed_bbs = BITMAP_ALLOC (NULL);
742   fix_loop_structure (changed_bbs);
743 
744   /* This usually does nothing.  But sometimes parts of cfg that originally
745      were inside a loop get out of it due to edge removal (since they
746      become unreachable by back edges from latch).  */
747   if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
748     rewrite_into_loop_closed_ssa (changed_bbs, TODO_update_ssa);
749 
750   BITMAP_FREE (changed_bbs);
751 
752 #ifdef ENABLE_CHECKING
753   verify_loop_structure ();
754 #endif
755   scev_reset ();
756 
757   loops_state_clear (LOOPS_NEED_FIXUP);
758 }
759 
760 /* Cleanup cfg and repair loop structures.  */
761 
762 bool
763 cleanup_tree_cfg (void)
764 {
765   bool changed = cleanup_tree_cfg_noloop ();
766 
767   if (current_loops != NULL
768       && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
769     repair_loop_structures ();
770 
771   return changed;
772 }
773 
774 /* Merge the PHI nodes at BB into those at BB's sole successor.  */
775 
776 static void
777 remove_forwarder_block_with_phi (basic_block bb)
778 {
779   edge succ = single_succ_edge (bb);
780   basic_block dest = succ->dest;
781   gimple label;
782   basic_block dombb, domdest, dom;
783 
784   /* We check for infinite loops already in tree_forwarder_block_p.
785      However it may happen that the infinite loop is created
786      afterwards due to removal of forwarders.  */
787   if (dest == bb)
788     return;
789 
790   /* If the destination block consists of a nonlocal label, do not
791      merge it.  */
792   label = first_stmt (dest);
793   if (label
794       && gimple_code (label) == GIMPLE_LABEL
795       && DECL_NONLOCAL (gimple_label_label (label)))
796     return;
797 
798   /* Redirect each incoming edge to BB to DEST.  */
799   while (EDGE_COUNT (bb->preds) > 0)
800     {
801       edge e = EDGE_PRED (bb, 0), s;
802       gimple_stmt_iterator gsi;
803 
804       s = find_edge (e->src, dest);
805       if (s)
806 	{
807 	  /* We already have an edge S from E->src to DEST.  If S and
808 	     E->dest's sole successor edge have the same PHI arguments
809 	     at DEST, redirect S to DEST.  */
810 	  if (phi_alternatives_equal (dest, s, succ))
811 	    {
812 	      e = redirect_edge_and_branch (e, dest);
813 	      redirect_edge_var_map_clear (e);
814 	      continue;
815 	    }
816 
817 	  /* PHI arguments are different.  Create a forwarder block by
818 	     splitting E so that we can merge PHI arguments on E to
819 	     DEST.  */
820 	  e = single_succ_edge (split_edge (e));
821 	}
822 
823       s = redirect_edge_and_branch (e, dest);
824 
825       /* redirect_edge_and_branch must not create a new edge.  */
826       gcc_assert (s == e);
827 
828       /* Add to the PHI nodes at DEST each PHI argument removed at the
829 	 destination of E.  */
830       for (gsi = gsi_start_phis (dest);
831 	   !gsi_end_p (gsi);
832 	   gsi_next (&gsi))
833 	{
834 	  gimple phi = gsi_stmt (gsi);
835 	  tree def = gimple_phi_arg_def (phi, succ->dest_idx);
836 	  source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
837 
838 	  if (TREE_CODE (def) == SSA_NAME)
839 	    {
840 	      edge_var_map_vector head;
841 	      edge_var_map *vm;
842 	      size_t i;
843 
844 	      /* If DEF is one of the results of PHI nodes removed during
845 		 redirection, replace it with the PHI argument that used
846 		 to be on E.  */
847 	      head = redirect_edge_var_map_vector (e);
848 	      for (i = 0; VEC_iterate (edge_var_map, head, i, vm); ++i)
849 		{
850 		  tree old_arg = redirect_edge_var_map_result (vm);
851 		  tree new_arg = redirect_edge_var_map_def (vm);
852 
853 		  if (def == old_arg)
854 		    {
855 		      def = new_arg;
856 		      locus = redirect_edge_var_map_location (vm);
857 		      break;
858 		    }
859 		}
860 	    }
861 
862 	  add_phi_arg (phi, def, s, locus);
863 	}
864 
865       redirect_edge_var_map_clear (e);
866     }
867 
868   /* Update the dominators.  */
869   dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
870   domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
871   if (domdest == bb)
872     {
873       /* Shortcut to avoid calling (relatively expensive)
874 	 nearest_common_dominator unless necessary.  */
875       dom = dombb;
876     }
877   else
878     dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
879 
880   set_immediate_dominator (CDI_DOMINATORS, dest, dom);
881 
882   /* Remove BB since all of BB's incoming edges have been redirected
883      to DEST.  */
884   delete_basic_block (bb);
885 }
886 
887 /* This pass merges PHI nodes if one feeds into another.  For example,
888    suppose we have the following:
889 
890   goto <bb 9> (<L9>);
891 
892 <L8>:;
893   tem_17 = foo ();
894 
895   # tem_6 = PHI <tem_17(8), tem_23(7)>;
896 <L9>:;
897 
898   # tem_3 = PHI <tem_6(9), tem_2(5)>;
899 <L10>:;
900 
901   Then we merge the first PHI node into the second one like so:
902 
903   goto <bb 9> (<L10>);
904 
905 <L8>:;
906   tem_17 = foo ();
907 
908   # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
909 <L10>:;
910 */
911 
912 static unsigned int
913 merge_phi_nodes (void)
914 {
915   basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
916   basic_block *current = worklist;
917   basic_block bb;
918 
919   calculate_dominance_info (CDI_DOMINATORS);
920 
921   /* Find all PHI nodes that we may be able to merge.  */
922   FOR_EACH_BB (bb)
923     {
924       basic_block dest;
925 
926       /* Look for a forwarder block with PHI nodes.  */
927       if (!tree_forwarder_block_p (bb, true))
928 	continue;
929 
930       dest = single_succ (bb);
931 
932       /* We have to feed into another basic block with PHI
933 	 nodes.  */
934       if (gimple_seq_empty_p (phi_nodes (dest))
935 	  /* We don't want to deal with a basic block with
936 	     abnormal edges.  */
937 	  || has_abnormal_incoming_edge_p (bb))
938 	continue;
939 
940       if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
941 	{
942 	  /* If BB does not dominate DEST, then the PHI nodes at
943 	     DEST must be the only users of the results of the PHI
944 	     nodes at BB.  */
945 	  *current++ = bb;
946 	}
947       else
948 	{
949 	  gimple_stmt_iterator gsi;
950 	  unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
951 
952 	  /* BB dominates DEST.  There may be many users of the PHI
953 	     nodes in BB.  However, there is still a trivial case we
954 	     can handle.  If the result of every PHI in BB is used
955 	     only by a PHI in DEST, then we can trivially merge the
956 	     PHI nodes from BB into DEST.  */
957 	  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
958 	       gsi_next (&gsi))
959 	    {
960 	      gimple phi = gsi_stmt (gsi);
961 	      tree result = gimple_phi_result (phi);
962 	      use_operand_p imm_use;
963 	      gimple use_stmt;
964 
965 	      /* If the PHI's result is never used, then we can just
966 		 ignore it.  */
967 	      if (has_zero_uses (result))
968 		continue;
969 
970 	      /* Get the single use of the result of this PHI node.  */
971   	      if (!single_imm_use (result, &imm_use, &use_stmt)
972 		  || gimple_code (use_stmt) != GIMPLE_PHI
973 		  || gimple_bb (use_stmt) != dest
974 		  || gimple_phi_arg_def (use_stmt, dest_idx) != result)
975 		break;
976 	    }
977 
978 	  /* If the loop above iterated through all the PHI nodes
979 	     in BB, then we can merge the PHIs from BB into DEST.  */
980 	  if (gsi_end_p (gsi))
981 	    *current++ = bb;
982 	}
983     }
984 
985   /* Now let's drain WORKLIST.  */
986   while (current != worklist)
987     {
988       bb = *--current;
989       remove_forwarder_block_with_phi (bb);
990     }
991 
992   free (worklist);
993   return 0;
994 }
995 
996 static bool
997 gate_merge_phi (void)
998 {
999   return 1;
1000 }
1001 
1002 struct gimple_opt_pass pass_merge_phi =
1003 {
1004  {
1005   GIMPLE_PASS,
1006   "mergephi",			/* name */
1007   gate_merge_phi,		/* gate */
1008   merge_phi_nodes,		/* execute */
1009   NULL,				/* sub */
1010   NULL,				/* next */
1011   0,				/* static_pass_number */
1012   TV_TREE_MERGE_PHI,		/* tv_id */
1013   PROP_cfg | PROP_ssa,		/* properties_required */
1014   0,				/* properties_provided */
1015   0,				/* properties_destroyed */
1016   0,				/* todo_flags_start */
1017   TODO_dump_func | TODO_ggc_collect	/* todo_flags_finish */
1018   | TODO_verify_ssa
1019  }
1020 };
1021