xref: /netbsd-src/external/gpl3/gcc/dist/gcc/tree-cfgcleanup.cc (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1 /* CFG cleanup for trees.
2    Copyright (C) 2001-2022 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "cfganal.h"
33 #include "cfgcleanup.h"
34 #include "tree-eh.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-manip.h"
39 #include "tree-dfa.h"
40 #include "tree-ssa.h"
41 #include "cfgloop.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
46 #include "cgraph.h"
47 #include "tree-into-ssa.h"
48 #include "tree-cfgcleanup.h"
49 
50 
51 /* The set of blocks in that at least one of the following changes happened:
52    -- the statement at the end of the block was changed
53    -- the block was newly created
54    -- the set of the predecessors of the block changed
55    -- the set of the successors of the block changed
56    ??? Maybe we could track these changes separately, since they determine
57        what cleanups it makes sense to try on the block.  */
58 bitmap cfgcleanup_altered_bbs;
59 
60 /* Remove any fallthru edge from EV.  Return true if an edge was removed.  */
61 
62 static bool
remove_fallthru_edge(vec<edge,va_gc> * ev)63 remove_fallthru_edge (vec<edge, va_gc> *ev)
64 {
65   edge_iterator ei;
66   edge e;
67 
68   FOR_EACH_EDGE (e, ei, ev)
69     if ((e->flags & EDGE_FALLTHRU) != 0)
70       {
71 	if (e->flags & EDGE_COMPLEX)
72 	  e->flags &= ~EDGE_FALLTHRU;
73 	else
74 	  remove_edge_and_dominated_blocks (e);
75 	return true;
76       }
77   return false;
78 }
79 
80 /* Convert a SWTCH with single non-default case to gcond and replace it
81    at GSI.  */
82 
83 static bool
convert_single_case_switch(gswitch * swtch,gimple_stmt_iterator & gsi)84 convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
85 {
86   if (gimple_switch_num_labels (swtch) != 2)
87     return false;
88 
89   tree index = gimple_switch_index (swtch);
90   tree label = gimple_switch_label (swtch, 1);
91   tree low = CASE_LOW (label);
92   tree high = CASE_HIGH (label);
93 
94   basic_block default_bb = gimple_switch_default_bb (cfun, swtch);
95   basic_block case_bb = label_to_block (cfun, CASE_LABEL (label));
96 
97   basic_block bb = gimple_bb (swtch);
98   gcond *cond;
99 
100   /* Replace switch statement with condition statement.  */
101   if (high)
102     {
103       tree lhs, rhs;
104       if (range_check_type (TREE_TYPE (index)) == NULL_TREE)
105 	return false;
106       generate_range_test (bb, index, low, high, &lhs, &rhs);
107       cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
108     }
109   else
110     cond = gimple_build_cond (EQ_EXPR, index,
111 			      fold_convert (TREE_TYPE (index), low),
112 			      NULL_TREE, NULL_TREE);
113 
114   gsi_replace (&gsi, cond, true);
115 
116   /* Update edges.  */
117   edge case_edge = find_edge (bb, case_bb);
118   edge default_edge = find_edge (bb, default_bb);
119 
120   case_edge->flags |= EDGE_TRUE_VALUE;
121   default_edge->flags |= EDGE_FALSE_VALUE;
122   return true;
123 }
124 
125 /* Disconnect an unreachable block in the control expression starting
126    at block BB.  */
127 
128 static bool
cleanup_control_expr_graph(basic_block bb,gimple_stmt_iterator gsi)129 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
130 {
131   edge taken_edge;
132   bool retval = false;
133   gimple *stmt = gsi_stmt (gsi);
134 
135   if (!single_succ_p (bb))
136     {
137       edge e;
138       edge_iterator ei;
139       bool warned;
140       tree val = NULL_TREE;
141 
142       /* Try to convert a switch with just a single non-default case to
143 	 GIMPLE condition.  */
144       if (gimple_code (stmt) == GIMPLE_SWITCH
145 	  && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
146 	stmt = gsi_stmt (gsi);
147 
148       fold_defer_overflow_warnings ();
149       switch (gimple_code (stmt))
150 	{
151 	case GIMPLE_COND:
152 	  {
153 	    gimple_match_op res_op;
154 	    if (gimple_simplify (stmt, &res_op, NULL, no_follow_ssa_edges,
155 				 no_follow_ssa_edges)
156 		&& res_op.code == INTEGER_CST)
157 	      val = res_op.ops[0];
158 	  }
159 	  break;
160 
161 	case GIMPLE_SWITCH:
162 	  val = gimple_switch_index (as_a <gswitch *> (stmt));
163 	  break;
164 
165 	default:
166 	  ;
167 	}
168       taken_edge = find_taken_edge (bb, val);
169       if (!taken_edge)
170 	{
171 	  fold_undefer_and_ignore_overflow_warnings ();
172 	  return false;
173 	}
174 
175       /* Remove all the edges except the one that is always executed.  */
176       warned = false;
177       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
178 	{
179 	  if (e != taken_edge)
180 	    {
181 	      if (!warned)
182 		{
183 		  fold_undefer_overflow_warnings
184 		    (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
185 		  warned = true;
186 		}
187 
188 	      taken_edge->probability += e->probability;
189 	      remove_edge_and_dominated_blocks (e);
190 	      retval = true;
191 	    }
192 	  else
193 	    ei_next (&ei);
194 	}
195       if (!warned)
196 	fold_undefer_and_ignore_overflow_warnings ();
197     }
198   else
199     taken_edge = single_succ_edge (bb);
200 
201   bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
202   gsi_remove (&gsi, true);
203   taken_edge->flags = EDGE_FALLTHRU;
204 
205   return retval;
206 }
207 
208 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
209    to updated gimple_call_flags.  */
210 
211 static void
cleanup_call_ctrl_altering_flag(basic_block bb,gimple * bb_end)212 cleanup_call_ctrl_altering_flag (basic_block bb, gimple *bb_end)
213 {
214   if (!is_gimple_call (bb_end)
215       || !gimple_call_ctrl_altering_p (bb_end)
216       || (/* IFN_UNIQUE should be the last insn, to make checking for it
217 	     as cheap as possible.  */
218 	  gimple_call_internal_p (bb_end)
219 	  && gimple_call_internal_unique_p (bb_end)))
220     return;
221 
222   int flags = gimple_call_flags (bb_end);
223   if (((flags & (ECF_CONST | ECF_PURE))
224        && !(flags & ECF_LOOPING_CONST_OR_PURE))
225       || (flags & ECF_LEAF))
226     gimple_call_set_ctrl_altering (bb_end, false);
227   else
228     {
229       edge_iterator ei;
230       edge e;
231       bool found = false;
232       FOR_EACH_EDGE (e, ei, bb->succs)
233 	if (e->flags & EDGE_FALLTHRU)
234 	  found = true;
235 	else if (e->flags & EDGE_ABNORMAL)
236 	  {
237 	    found = false;
238 	    break;
239 	  }
240       /* If there's no abnormal edge and a fallthru edge the call
241 	 isn't control-altering anymore.  */
242       if (found)
243 	gimple_call_set_ctrl_altering (bb_end, false);
244     }
245 }
246 
247 /* Try to remove superfluous control structures in basic block BB.  Returns
248    true if anything changes.  */
249 
250 static bool
cleanup_control_flow_bb(basic_block bb)251 cleanup_control_flow_bb (basic_block bb)
252 {
253   gimple_stmt_iterator gsi;
254   bool retval = false;
255   gimple *stmt;
256 
257   /* If the last statement of the block could throw and now cannot,
258      we need to prune cfg.  */
259   retval |= gimple_purge_dead_eh_edges (bb);
260 
261   gsi = gsi_last_nondebug_bb (bb);
262   if (gsi_end_p (gsi))
263     return retval;
264 
265   stmt = gsi_stmt (gsi);
266 
267   /* Try to cleanup ctrl altering flag for call which ends bb.  */
268   cleanup_call_ctrl_altering_flag (bb, stmt);
269 
270   if (gimple_code (stmt) == GIMPLE_COND
271       || gimple_code (stmt) == GIMPLE_SWITCH)
272     {
273       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
274       retval |= cleanup_control_expr_graph (bb, gsi);
275     }
276   else if (gimple_code (stmt) == GIMPLE_GOTO
277 	   && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
278 	   && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
279 	       == LABEL_DECL))
280     {
281       /* If we had a computed goto which has a compile-time determinable
282 	 destination, then we can eliminate the goto.  */
283       edge e;
284       tree label;
285       edge_iterator ei;
286       basic_block target_block;
287 
288       gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
289       /* First look at all the outgoing edges.  Delete any outgoing
290 	 edges which do not go to the right block.  For the one
291 	 edge which goes to the right block, fix up its flags.  */
292       label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
293       if (DECL_CONTEXT (label) != cfun->decl)
294 	return retval;
295       target_block = label_to_block (cfun, label);
296       for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
297 	{
298 	  if (e->dest != target_block)
299 	    remove_edge_and_dominated_blocks (e);
300 	  else
301 	    {
302 	      /* Turn off the EDGE_ABNORMAL flag.  */
303 	      e->flags &= ~EDGE_ABNORMAL;
304 
305 	      /* And set EDGE_FALLTHRU.  */
306 	      e->flags |= EDGE_FALLTHRU;
307 	      ei_next (&ei);
308 	    }
309 	}
310 
311       bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
312       bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
313 
314       /* Remove the GOTO_EXPR as it is not needed.  The CFG has all the
315 	 relevant information we need.  */
316       gsi_remove (&gsi, true);
317       retval = true;
318     }
319 
320   /* Check for indirect calls that have been turned into
321      noreturn calls.  */
322   else if (is_gimple_call (stmt)
323 	   && gimple_call_noreturn_p (stmt))
324     {
325       /* If there are debug stmts after the noreturn call, remove them
326 	 now, they should be all unreachable anyway.  */
327       for (gsi_next (&gsi); !gsi_end_p (gsi); )
328 	gsi_remove (&gsi, true);
329       if (remove_fallthru_edge (bb->succs))
330 	retval = true;
331     }
332 
333   return retval;
334 }
335 
336 /* Return true if basic block BB does nothing except pass control
337    flow to another block and that we can safely insert a label at
338    the start of the successor block.
339 
340    As a precondition, we require that BB be not equal to
341    the entry block.  */
342 
343 static bool
tree_forwarder_block_p(basic_block bb,bool phi_wanted)344 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
345 {
346   gimple_stmt_iterator gsi;
347   location_t locus;
348 
349   /* BB must have a single outgoing edge.  */
350   if (single_succ_p (bb) != 1
351       /* If PHI_WANTED is false, BB must not have any PHI nodes.
352 	 Otherwise, BB must have PHI nodes.  */
353       || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
354       /* BB may not be a predecessor of the exit block.  */
355       || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
356       /* Nor should this be an infinite loop.  */
357       || single_succ (bb) == bb
358       /* BB may not have an abnormal outgoing edge.  */
359       || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
360     return false;
361 
362   gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
363 
364   locus = single_succ_edge (bb)->goto_locus;
365 
366   /* There should not be an edge coming from entry, or an EH edge.  */
367   {
368     edge_iterator ei;
369     edge e;
370 
371     FOR_EACH_EDGE (e, ei, bb->preds)
372       if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
373 	return false;
374       /* If goto_locus of any of the edges differs, prevent removing
375 	 the forwarder block when not optimizing.  */
376       else if (!optimize
377 	       && (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
378 		   || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
379 	       && e->goto_locus != locus)
380 	return false;
381   }
382 
383   /* Now walk through the statements backward.  We can ignore labels,
384      anything else means this is not a forwarder block.  */
385   for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
386     {
387       gimple *stmt = gsi_stmt (gsi);
388 
389       switch (gimple_code (stmt))
390 	{
391 	case GIMPLE_LABEL:
392 	  if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
393 	    return false;
394 	  if (!optimize
395 	      && (gimple_has_location (stmt)
396 		  || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
397 	      && gimple_location (stmt) != locus)
398 	    return false;
399 	  break;
400 
401 	  /* ??? For now, hope there's a corresponding debug
402 	     assignment at the destination.  */
403 	case GIMPLE_DEBUG:
404 	  break;
405 
406 	default:
407 	  return false;
408 	}
409     }
410 
411   if (current_loops)
412     {
413       basic_block dest;
414       /* Protect loop headers.  */
415       if (bb_loop_header_p (bb))
416 	return false;
417 
418       dest = EDGE_SUCC (bb, 0)->dest;
419       /* Protect loop preheaders and latches if requested.  */
420       if (dest->loop_father->header == dest)
421 	{
422 	  if (bb->loop_father == dest->loop_father)
423 	    {
424 	      if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
425 		return false;
426 	      /* If bb doesn't have a single predecessor we'd make this
427 		 loop have multiple latches.  Don't do that if that
428 		 would in turn require disambiguating them.  */
429 	      return (single_pred_p (bb)
430 		      || loops_state_satisfies_p
431 		      	   (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
432 	    }
433 	  else if (bb->loop_father == loop_outer (dest->loop_father))
434 	    return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
435 	  /* Always preserve other edges into loop headers that are
436 	     not simple latches or preheaders.  */
437 	  return false;
438 	}
439     }
440 
441   return true;
442 }
443 
444 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
445    those alternatives are equal in each of the PHI nodes, then return
446    true, else return false.  */
447 
448 static bool
phi_alternatives_equal(basic_block dest,edge e1,edge e2)449 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
450 {
451   int n1 = e1->dest_idx;
452   int n2 = e2->dest_idx;
453   gphi_iterator gsi;
454 
455   for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
456     {
457       gphi *phi = gsi.phi ();
458       tree val1 = gimple_phi_arg_def (phi, n1);
459       tree val2 = gimple_phi_arg_def (phi, n2);
460 
461       gcc_assert (val1 != NULL_TREE);
462       gcc_assert (val2 != NULL_TREE);
463 
464       if (!operand_equal_for_phi_arg_p (val1, val2))
465 	return false;
466     }
467 
468   return true;
469 }
470 
471 /* Move debug stmts from the forwarder block SRC to DEST.  */
472 
473 static void
move_debug_stmts_from_forwarder(basic_block src,basic_block dest,bool dest_single_pred_p)474 move_debug_stmts_from_forwarder (basic_block src, basic_block dest,
475 				 bool dest_single_pred_p)
476 {
477   if (!MAY_HAVE_DEBUG_STMTS)
478     return;
479 
480   gimple_stmt_iterator gsi_to = gsi_after_labels (dest);
481   for (gimple_stmt_iterator gsi = gsi_after_labels (src); !gsi_end_p (gsi);)
482     {
483       gimple *debug = gsi_stmt (gsi);
484       gcc_assert (is_gimple_debug (debug));
485       /* Move debug binds anyway, but not anything else like begin-stmt
486 	 markers unless they are always valid at the destination.  */
487       if (dest_single_pred_p
488 	  || gimple_debug_bind_p (debug))
489 	{
490 	  gsi_move_before (&gsi, &gsi_to);
491 	  /* Reset debug-binds that are not always valid at the destination.
492 	     Simply dropping them can cause earlier values to become live,
493 	     generating wrong debug information.
494 	     ???  There are several things we could improve here.  For
495 	     one we might be able to move stmts to the predecessor.
496 	     For anther, if the debug stmt is immediately followed by a
497 	     (debug) definition in the destination (on a post-dominated path?)
498 	     we can elide it without any bad effects.  */
499 	  if (!dest_single_pred_p)
500 	    {
501 	      gimple_debug_bind_reset_value (debug);
502 	      update_stmt (debug);
503 	    }
504 	}
505       else
506 	gsi_next (&gsi);
507     }
508 }
509 
510 /* Removes forwarder block BB.  Returns false if this failed.  */
511 
512 static bool
remove_forwarder_block(basic_block bb)513 remove_forwarder_block (basic_block bb)
514 {
515   edge succ = single_succ_edge (bb), e, s;
516   basic_block dest = succ->dest;
517   gimple *stmt;
518   edge_iterator ei;
519   gimple_stmt_iterator gsi, gsi_to;
520 
521   /* We check for infinite loops already in tree_forwarder_block_p.
522      However it may happen that the infinite loop is created
523      afterwards due to removal of forwarders.  */
524   if (dest == bb)
525     return false;
526 
527   /* If the destination block consists of a nonlocal label or is a
528      EH landing pad, do not merge it.  */
529   stmt = first_stmt (dest);
530   if (stmt)
531     if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
532       if (DECL_NONLOCAL (gimple_label_label (label_stmt))
533 	  || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
534 	return false;
535 
536   /* If there is an abnormal edge to basic block BB, but not into
537      dest, problems might occur during removal of the phi node at out
538      of ssa due to overlapping live ranges of registers.
539 
540      If there is an abnormal edge in DEST, the problems would occur
541      anyway since cleanup_dead_labels would then merge the labels for
542      two different eh regions, and rest of exception handling code
543      does not like it.
544 
545      So if there is an abnormal edge to BB, proceed only if there is
546      no abnormal edge to DEST and there are no phi nodes in DEST.  */
547   if (bb_has_abnormal_pred (bb)
548       && (bb_has_abnormal_pred (dest)
549 	  || !gimple_seq_empty_p (phi_nodes (dest))))
550     return false;
551 
552   /* If there are phi nodes in DEST, and some of the blocks that are
553      predecessors of BB are also predecessors of DEST, check that the
554      phi node arguments match.  */
555   if (!gimple_seq_empty_p (phi_nodes (dest)))
556     {
557       FOR_EACH_EDGE (e, ei, bb->preds)
558 	{
559 	  s = find_edge (e->src, dest);
560 	  if (!s)
561 	    continue;
562 
563 	  if (!phi_alternatives_equal (dest, succ, s))
564 	    return false;
565 	}
566     }
567 
568   basic_block pred = NULL;
569   if (single_pred_p (bb))
570     pred = single_pred (bb);
571   bool dest_single_pred_p = single_pred_p (dest);
572 
573   /* Redirect the edges.  */
574   for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
575     {
576       bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
577 
578       if (e->flags & EDGE_ABNORMAL)
579 	{
580 	  /* If there is an abnormal edge, redirect it anyway, and
581 	     move the labels to the new block to make it legal.  */
582 	  s = redirect_edge_succ_nodup (e, dest);
583 	}
584       else
585 	s = redirect_edge_and_branch (e, dest);
586 
587       if (s == e)
588 	{
589 	  /* Create arguments for the phi nodes, since the edge was not
590 	     here before.  */
591 	  for (gphi_iterator psi = gsi_start_phis (dest);
592 	       !gsi_end_p (psi);
593 	       gsi_next (&psi))
594 	    {
595 	      gphi *phi = psi.phi ();
596 	      location_t l = gimple_phi_arg_location_from_edge (phi, succ);
597 	      tree def = gimple_phi_arg_def (phi, succ->dest_idx);
598 	      add_phi_arg (phi, unshare_expr (def), s, l);
599 	    }
600 	}
601     }
602 
603   /* Move nonlocal labels and computed goto targets as well as user
604      defined labels and labels with an EH landing pad number to the
605      new block, so that the redirection of the abnormal edges works,
606      jump targets end up in a sane place and debug information for
607      labels is retained.  */
608   gsi_to = gsi_start_bb (dest);
609   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
610     {
611       stmt = gsi_stmt (gsi);
612       if (is_gimple_debug (stmt))
613 	break;
614 
615       /* Forwarder blocks can only contain labels and debug stmts, and
616 	 labels must come first, so if we get to this point, we know
617 	 we're looking at a label.  */
618       tree decl = gimple_label_label (as_a <glabel *> (stmt));
619       if (EH_LANDING_PAD_NR (decl) != 0
620 	  || DECL_NONLOCAL (decl)
621 	  || FORCED_LABEL (decl)
622 	  || !DECL_ARTIFICIAL (decl))
623 	gsi_move_before (&gsi, &gsi_to);
624       else
625 	gsi_next (&gsi);
626     }
627 
628   /* Move debug statements.  Reset them if the destination does not
629      have a single predecessor.  */
630   move_debug_stmts_from_forwarder (bb, dest, dest_single_pred_p);
631 
632   bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
633 
634   /* Update the dominators.  */
635   if (dom_info_available_p (CDI_DOMINATORS))
636     {
637       basic_block dom, dombb, domdest;
638 
639       dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
640       domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
641       if (domdest == bb)
642 	{
643 	  /* Shortcut to avoid calling (relatively expensive)
644 	     nearest_common_dominator unless necessary.  */
645 	  dom = dombb;
646 	}
647       else
648 	dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
649 
650       set_immediate_dominator (CDI_DOMINATORS, dest, dom);
651     }
652 
653   /* Adjust latch infomation of BB's parent loop as otherwise
654      the cfg hook has a hard time not to kill the loop.  */
655   if (current_loops && bb->loop_father->latch == bb)
656     bb->loop_father->latch = pred;
657 
658   /* And kill the forwarder block.  */
659   delete_basic_block (bb);
660 
661   return true;
662 }
663 
664 /* STMT is a call that has been discovered noreturn.  Split the
665    block to prepare fixing up the CFG and remove LHS.
666    Return true if cleanup-cfg needs to run.  */
667 
668 bool
fixup_noreturn_call(gimple * stmt)669 fixup_noreturn_call (gimple *stmt)
670 {
671   basic_block bb = gimple_bb (stmt);
672   bool changed = false;
673 
674   if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
675     return false;
676 
677   /* First split basic block if stmt is not last.  */
678   if (stmt != gsi_stmt (gsi_last_bb (bb)))
679     {
680       if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
681 	{
682 	  /* Don't split if there are only debug stmts
683 	     after stmt, that can result in -fcompare-debug
684 	     failures.  Remove the debug stmts instead,
685 	     they should be all unreachable anyway.  */
686 	  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
687 	  for (gsi_next (&gsi); !gsi_end_p (gsi); )
688 	    gsi_remove (&gsi, true);
689 	}
690       else
691 	{
692 	  split_block (bb, stmt);
693 	  changed = true;
694 	}
695     }
696 
697   /* If there is an LHS, remove it, but only if its type has fixed size.
698      The LHS will need to be recreated during RTL expansion and creating
699      temporaries of variable-sized types is not supported.  Also don't
700      do this with TREE_ADDRESSABLE types, as assign_temp will abort.
701      Drop LHS regardless of TREE_ADDRESSABLE, if the function call
702      has been changed into a call that does not return a value, like
703      __builtin_unreachable or __cxa_pure_virtual.  */
704   tree lhs = gimple_call_lhs (stmt);
705   if (lhs
706       && (should_remove_lhs_p (lhs)
707 	  || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
708     {
709       gimple_call_set_lhs (stmt, NULL_TREE);
710 
711       /* We need to fix up the SSA name to avoid checking errors.  */
712       if (TREE_CODE (lhs) == SSA_NAME)
713 	{
714 	  tree new_var = create_tmp_reg (TREE_TYPE (lhs));
715 	  SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
716 	  SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
717 	  set_ssa_default_def (cfun, new_var, lhs);
718 	}
719 
720       update_stmt (stmt);
721     }
722 
723   /* Mark the call as altering control flow.  */
724   if (!gimple_call_ctrl_altering_p (stmt))
725     {
726       gimple_call_set_ctrl_altering (stmt, true);
727       changed = true;
728     }
729 
730   return changed;
731 }
732 
733 /* Return true if we want to merge BB1 and BB2 into a single block.  */
734 
735 static bool
want_merge_blocks_p(basic_block bb1,basic_block bb2)736 want_merge_blocks_p (basic_block bb1, basic_block bb2)
737 {
738   if (!can_merge_blocks_p (bb1, bb2))
739     return false;
740   gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
741   if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
742     return true;
743   return bb1->count.ok_for_merging (bb2->count);
744 }
745 
746 
747 /* Tries to cleanup cfg in basic block BB by merging blocks.  Returns
748    true if anything changes.  */
749 
750 static bool
cleanup_tree_cfg_bb(basic_block bb)751 cleanup_tree_cfg_bb (basic_block bb)
752 {
753   if (tree_forwarder_block_p (bb, false)
754       && remove_forwarder_block (bb))
755     return true;
756 
757   /* If there is a merge opportunity with the predecessor
758      do nothing now but wait until we process the predecessor.
759      This happens when we visit BBs in a non-optimal order and
760      avoids quadratic behavior with adjusting stmts BB pointer.  */
761   if (single_pred_p (bb)
762       && want_merge_blocks_p (single_pred (bb), bb))
763     /* But make sure we _do_ visit it.  When we remove unreachable paths
764        ending in a backedge we fail to mark the destinations predecessors
765        as changed.  */
766     bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
767 
768   /* Merging the blocks may create new opportunities for folding
769      conditional branches (due to the elimination of single-valued PHI
770      nodes).  */
771   else if (single_succ_p (bb)
772 	   && want_merge_blocks_p (bb, single_succ (bb)))
773     {
774       merge_blocks (bb, single_succ (bb));
775       return true;
776     }
777 
778   return false;
779 }
780 
781 /* Return true if E is an EDGE_ABNORMAL edge for returns_twice calls,
782    i.e. one going from .ABNORMAL_DISPATCHER to basic block which doesn't
783    start with a forced or nonlocal label.  Calls which return twice can return
784    the second time only if they are called normally the first time, so basic
785    blocks which can be only entered through these abnormal edges but not
786    normally are effectively unreachable as well.  Additionally ignore
787    __builtin_setjmp_receiver starting blocks, which have one FORCED_LABEL
788    and which are always only reachable through EDGE_ABNORMAL edge.  They are
789    handled in cleanup_control_flow_pre.  */
790 
791 static bool
maybe_dead_abnormal_edge_p(edge e)792 maybe_dead_abnormal_edge_p (edge e)
793 {
794   if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL)
795     return false;
796 
797   gimple_stmt_iterator gsi = gsi_start_nondebug_after_labels_bb (e->src);
798   gimple *g = gsi_stmt (gsi);
799   if (!g || !gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
800     return false;
801 
802   tree target = NULL_TREE;
803   for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
804     if (glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi)))
805       {
806 	tree this_target = gimple_label_label (label_stmt);
807 	if (DECL_NONLOCAL (this_target))
808 	  return false;
809 	if (FORCED_LABEL (this_target))
810 	  {
811 	    if (target)
812 	      return false;
813 	    target = this_target;
814 	  }
815       }
816     else
817       break;
818 
819   if (target)
820     {
821       /* If there was a single FORCED_LABEL, check for
822 	 __builtin_setjmp_receiver with address of that label.  */
823       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
824 	gsi_next_nondebug (&gsi);
825       if (gsi_end_p (gsi))
826 	return false;
827       if (!gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_SETJMP_RECEIVER))
828 	return false;
829 
830       tree arg = gimple_call_arg (gsi_stmt (gsi), 0);
831       if (TREE_CODE (arg) != ADDR_EXPR || TREE_OPERAND (arg, 0) != target)
832 	return false;
833     }
834   return true;
835 }
836 
837 /* If BB is a basic block ending with __builtin_setjmp_setup, return edge
838    from .ABNORMAL_DISPATCHER basic block to corresponding
839    __builtin_setjmp_receiver basic block, otherwise return NULL.  */
840 static edge
builtin_setjmp_setup_bb(basic_block bb)841 builtin_setjmp_setup_bb (basic_block bb)
842 {
843   if (EDGE_COUNT (bb->succs) != 2
844       || ((EDGE_SUCC (bb, 0)->flags
845 	   & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL
846 	  && (EDGE_SUCC (bb, 1)->flags
847 	      & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL))
848     return NULL;
849 
850   gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
851   if (gsi_end_p (gsi)
852       || !gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_SETJMP_SETUP))
853     return NULL;
854 
855   tree arg = gimple_call_arg (gsi_stmt (gsi), 1);
856   if (TREE_CODE (arg) != ADDR_EXPR
857       || TREE_CODE (TREE_OPERAND (arg, 0)) != LABEL_DECL)
858     return NULL;
859 
860   basic_block recv_bb = label_to_block (cfun, TREE_OPERAND (arg, 0));
861   if (EDGE_COUNT (recv_bb->preds) != 1
862       || (EDGE_PRED (recv_bb, 0)->flags
863 	  & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL
864       || (EDGE_SUCC (bb, 0)->dest != EDGE_PRED (recv_bb, 0)->src
865 	  && EDGE_SUCC (bb, 1)->dest != EDGE_PRED (recv_bb, 0)->src))
866     return NULL;
867 
868   /* EDGE_PRED (recv_bb, 0)->src should be the .ABNORMAL_DISPATCHER bb.  */
869   return EDGE_PRED (recv_bb, 0);
870 }
871 
872 /* Do cleanup_control_flow_bb in PRE order.  */
873 
874 static bool
cleanup_control_flow_pre()875 cleanup_control_flow_pre ()
876 {
877   bool retval = false;
878 
879   /* We want remove_edge_and_dominated_blocks to only remove edges,
880      not dominated blocks which it does when dom info isn't available.
881      Pretend so.  */
882   dom_state saved_state = dom_info_state (CDI_DOMINATORS);
883   set_dom_info_availability (CDI_DOMINATORS, DOM_NONE);
884 
885   auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 2);
886   auto_sbitmap visited (last_basic_block_for_fn (cfun));
887   bitmap_clear (visited);
888 
889   vec<edge, va_gc> *setjmp_vec = NULL;
890   auto_vec<basic_block, 4> abnormal_dispatchers;
891 
892   stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
893 
894   while (! stack.is_empty ())
895     {
896       /* Look at the edge on the top of the stack.  */
897       edge_iterator ei = stack.last ();
898       basic_block dest = ei_edge (ei)->dest;
899 
900       if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
901 	  && !bitmap_bit_p (visited, dest->index)
902 	  && (ei_container (ei) == setjmp_vec
903 	      || !maybe_dead_abnormal_edge_p (ei_edge (ei))))
904 	{
905 	  bitmap_set_bit (visited, dest->index);
906 	  /* We only possibly remove edges from DEST here, leaving
907 	     possibly unreachable code in the IL.  */
908 	  retval |= cleanup_control_flow_bb (dest);
909 
910 	  /* Check for __builtin_setjmp_setup.  Edges from .ABNORMAL_DISPATCH
911 	     to __builtin_setjmp_receiver will be normally ignored by
912 	     maybe_dead_abnormal_edge_p.  If DEST is a visited
913 	     __builtin_setjmp_setup, queue edge from .ABNORMAL_DISPATCH
914 	     to __builtin_setjmp_receiver, so that it will be visited too.  */
915 	  if (edge e = builtin_setjmp_setup_bb (dest))
916 	    {
917 	      vec_safe_push (setjmp_vec, e);
918 	      if (vec_safe_length (setjmp_vec) == 1)
919 		stack.quick_push (ei_start (setjmp_vec));
920 	    }
921 
922 	  if ((ei_edge (ei)->flags
923 	       & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
924 	    {
925 	      gimple_stmt_iterator gsi
926 		= gsi_start_nondebug_after_labels_bb (dest);
927 	      gimple *g = gsi_stmt (gsi);
928 	      if (g && gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
929 		abnormal_dispatchers.safe_push (dest);
930 	    }
931 
932 	  if (EDGE_COUNT (dest->succs) > 0)
933 	    stack.quick_push (ei_start (dest->succs));
934 	}
935       else
936 	{
937 	  if (!ei_one_before_end_p (ei))
938 	    ei_next (&stack.last ());
939 	  else
940 	    {
941 	      if (ei_container (ei) == setjmp_vec)
942 		vec_safe_truncate (setjmp_vec, 0);
943 	      stack.pop ();
944 	    }
945 	}
946     }
947 
948   vec_free (setjmp_vec);
949 
950   /* If we've marked .ABNORMAL_DISPATCHER basic block(s) as visited
951      above, but haven't marked any of their successors as visited,
952      unmark them now, so that they can be removed as useless.  */
953   for (basic_block dispatcher_bb : abnormal_dispatchers)
954     {
955       edge e;
956       edge_iterator ei;
957       FOR_EACH_EDGE (e, ei, dispatcher_bb->succs)
958 	if (bitmap_bit_p (visited, e->dest->index))
959 	  break;
960       if (e == NULL)
961 	bitmap_clear_bit (visited, dispatcher_bb->index);
962     }
963 
964   set_dom_info_availability (CDI_DOMINATORS, saved_state);
965 
966   /* We are deleting BBs in non-reverse dominator order, make sure
967      insert_debug_temps_for_defs is prepared for that.  */
968   if (retval)
969     free_dominance_info (CDI_DOMINATORS);
970 
971   /* Remove all now (and previously) unreachable blocks.  */
972   for (int i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); ++i)
973     {
974       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
975       if (bb && !bitmap_bit_p (visited, bb->index))
976 	{
977 	  if (!retval)
978 	    free_dominance_info (CDI_DOMINATORS);
979 	  delete_basic_block (bb);
980 	  retval = true;
981 	}
982     }
983 
984   return retval;
985 }
986 
987 static bool
mfb_keep_latches(edge e)988 mfb_keep_latches (edge e)
989 {
990   return !((dom_info_available_p (CDI_DOMINATORS)
991 	    && dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
992 	   || (e->flags & EDGE_DFS_BACK));
993 }
994 
995 /* Remove unreachable blocks and other miscellaneous clean up work.
996    Return true if the flowgraph was modified, false otherwise.  */
997 
998 static bool
cleanup_tree_cfg_noloop(unsigned ssa_update_flags)999 cleanup_tree_cfg_noloop (unsigned ssa_update_flags)
1000 {
1001   timevar_push (TV_TREE_CLEANUP_CFG);
1002 
1003   /* Ensure that we have single entries into loop headers.  Otherwise
1004      if one of the entries is becoming a latch due to CFG cleanup
1005      (from formerly being part of an irreducible region) then we mess
1006      up loop fixup and associate the old loop with a different region
1007      which makes niter upper bounds invalid.  See for example PR80549.
1008      This needs to be done before we remove trivially dead edges as
1009      we need to capture the dominance state before the pending transform.  */
1010   if (current_loops)
1011     {
1012       /* This needs backedges or dominators.  */
1013       if (!dom_info_available_p (CDI_DOMINATORS))
1014 	mark_dfs_back_edges ();
1015 
1016       for (loop_p loop : *get_loops (cfun))
1017 	if (loop && loop->header)
1018 	  {
1019 	    basic_block bb = loop->header;
1020 	    edge_iterator ei;
1021 	    edge e;
1022 	    bool found_latch = false;
1023 	    bool any_abnormal = false;
1024 	    unsigned n = 0;
1025 	    /* We are only interested in preserving existing loops, but
1026 	       we need to check whether they are still real and of course
1027 	       if we need to add a preheader at all.  */
1028 	    FOR_EACH_EDGE (e, ei, bb->preds)
1029 	      {
1030 		if (e->flags & EDGE_ABNORMAL)
1031 		  {
1032 		    any_abnormal = true;
1033 		    break;
1034 		  }
1035 		if ((dom_info_available_p (CDI_DOMINATORS)
1036 		     && dominated_by_p (CDI_DOMINATORS, e->src, bb))
1037 		    || (e->flags & EDGE_DFS_BACK))
1038 		  {
1039 		    found_latch = true;
1040 		    continue;
1041 		  }
1042 		n++;
1043 	      }
1044 	    /* If we have more than one entry to the loop header
1045 	       create a forwarder.  */
1046 	    if (found_latch && ! any_abnormal && n > 1)
1047 	      {
1048 		edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
1049 						      NULL);
1050 		loop->header = fallthru->dest;
1051 		if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1052 		  {
1053 		    /* The loop updating from the CFG hook is incomplete
1054 		       when we have multiple latches, fixup manually.  */
1055 		    remove_bb_from_loops (fallthru->src);
1056 		    loop_p cloop = loop;
1057 		    FOR_EACH_EDGE (e, ei, fallthru->src->preds)
1058 		      cloop = find_common_loop (cloop, e->src->loop_father);
1059 		    add_bb_to_loop (fallthru->src, cloop);
1060 		  }
1061 	      }
1062 	  }
1063     }
1064 
1065   /* Prepare the worklists of altered blocks.  */
1066   cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
1067 
1068   /* Start by iterating over all basic blocks in PRE order looking for
1069      edge removal opportunities.  Do this first because incoming SSA form
1070      may be invalid and we want to avoid performing SSA related tasks such
1071      as propgating out a PHI node during BB merging in that state.  This
1072      also gets rid of unreachable blocks.  */
1073   bool changed = cleanup_control_flow_pre ();
1074 
1075   /* After doing the above SSA form should be valid (or an update SSA
1076      should be required).  */
1077   if (ssa_update_flags)
1078     update_ssa (ssa_update_flags);
1079 
1080   /* Compute dominator info which we need for the iterative process below.  */
1081   if (!dom_info_available_p (CDI_DOMINATORS))
1082     calculate_dominance_info (CDI_DOMINATORS);
1083   else
1084     checking_verify_dominators (CDI_DOMINATORS);
1085 
1086   /* During forwarder block cleanup, we may redirect edges out of
1087      SWITCH_EXPRs, which can get expensive.  So we want to enable
1088      recording of edge to CASE_LABEL_EXPR.  */
1089   start_recording_case_labels ();
1090 
1091   /* Continue by iterating over all basic blocks looking for BB merging
1092      opportunities.  We cannot use FOR_EACH_BB_FN for the BB iteration
1093      since the basic blocks may get removed.  */
1094   unsigned n = last_basic_block_for_fn (cfun);
1095   for (unsigned i = NUM_FIXED_BLOCKS; i < n; i++)
1096     {
1097       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1098       if (bb)
1099 	changed |= cleanup_tree_cfg_bb (bb);
1100     }
1101 
1102   /* Now process the altered blocks, as long as any are available.  */
1103   while (!bitmap_empty_p (cfgcleanup_altered_bbs))
1104     {
1105       unsigned i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
1106       bitmap_clear_bit (cfgcleanup_altered_bbs, i);
1107       if (i < NUM_FIXED_BLOCKS)
1108 	continue;
1109 
1110       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1111       if (!bb)
1112 	continue;
1113 
1114       /* BB merging done by cleanup_tree_cfg_bb can end up propagating
1115 	 out single-argument PHIs which in turn can expose
1116 	 cleanup_control_flow_bb opportunities so we have to repeat
1117 	 that here.  */
1118       changed |= cleanup_control_flow_bb (bb);
1119       changed |= cleanup_tree_cfg_bb (bb);
1120     }
1121 
1122   end_recording_case_labels ();
1123   BITMAP_FREE (cfgcleanup_altered_bbs);
1124 
1125   gcc_assert (dom_info_available_p (CDI_DOMINATORS));
1126 
1127   /* Do not renumber blocks if the SCEV cache is active, it is indexed by
1128      basic-block numbers.  */
1129   if (! scev_initialized_p ())
1130     compact_blocks ();
1131 
1132   checking_verify_flow_info ();
1133 
1134   timevar_pop (TV_TREE_CLEANUP_CFG);
1135 
1136   if (changed && current_loops)
1137     {
1138       /* Removing edges and/or blocks may make recorded bounds refer
1139          to stale GIMPLE stmts now, so clear them.  */
1140       free_numbers_of_iterations_estimates (cfun);
1141       loops_state_set (LOOPS_NEED_FIXUP);
1142     }
1143 
1144   return changed;
1145 }
1146 
1147 /* Repairs loop structures.  */
1148 
1149 static void
repair_loop_structures(void)1150 repair_loop_structures (void)
1151 {
1152   bitmap changed_bbs;
1153   unsigned n_new_loops;
1154 
1155   calculate_dominance_info (CDI_DOMINATORS);
1156 
1157   timevar_push (TV_REPAIR_LOOPS);
1158   changed_bbs = BITMAP_ALLOC (NULL);
1159   n_new_loops = fix_loop_structure (changed_bbs);
1160 
1161   /* This usually does nothing.  But sometimes parts of cfg that originally
1162      were inside a loop get out of it due to edge removal (since they
1163      become unreachable by back edges from latch).  Also a former
1164      irreducible loop can become reducible - in this case force a full
1165      rewrite into loop-closed SSA form.  */
1166   if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1167     rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
1168 				  TODO_update_ssa);
1169 
1170   BITMAP_FREE (changed_bbs);
1171 
1172   checking_verify_loop_structure ();
1173   scev_reset ();
1174 
1175   timevar_pop (TV_REPAIR_LOOPS);
1176 }
1177 
1178 /* Cleanup cfg and repair loop structures.  */
1179 
1180 bool
cleanup_tree_cfg(unsigned ssa_update_flags)1181 cleanup_tree_cfg (unsigned ssa_update_flags)
1182 {
1183   bool changed = cleanup_tree_cfg_noloop (ssa_update_flags);
1184 
1185   if (current_loops != NULL
1186       && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1187     repair_loop_structures ();
1188 
1189   return changed;
1190 }
1191 
1192 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
1193    Returns true if successful.  */
1194 
1195 static bool
remove_forwarder_block_with_phi(basic_block bb)1196 remove_forwarder_block_with_phi (basic_block bb)
1197 {
1198   edge succ = single_succ_edge (bb);
1199   basic_block dest = succ->dest;
1200   gimple *label;
1201   basic_block dombb, domdest, dom;
1202 
1203   /* We check for infinite loops already in tree_forwarder_block_p.
1204      However it may happen that the infinite loop is created
1205      afterwards due to removal of forwarders.  */
1206   if (dest == bb)
1207     return false;
1208 
1209   /* Removal of forwarders may expose new natural loops and thus
1210      a block may turn into a loop header.  */
1211   if (current_loops && bb_loop_header_p (bb))
1212     return false;
1213 
1214   /* If the destination block consists of a nonlocal label, do not
1215      merge it.  */
1216   label = first_stmt (dest);
1217   if (label)
1218     if (glabel *label_stmt = dyn_cast <glabel *> (label))
1219       if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1220 	return false;
1221 
1222   /* Record BB's single pred in case we need to update the father
1223      loop's latch information later.  */
1224   basic_block pred = NULL;
1225   if (single_pred_p (bb))
1226     pred = single_pred (bb);
1227   bool dest_single_pred_p = single_pred_p (dest);
1228 
1229   /* Redirect each incoming edge to BB to DEST.  */
1230   while (EDGE_COUNT (bb->preds) > 0)
1231     {
1232       edge e = EDGE_PRED (bb, 0), s;
1233       gphi_iterator gsi;
1234 
1235       s = find_edge (e->src, dest);
1236       if (s)
1237 	{
1238 	  /* We already have an edge S from E->src to DEST.  If S and
1239 	     E->dest's sole successor edge have the same PHI arguments
1240 	     at DEST, redirect S to DEST.  */
1241 	  if (phi_alternatives_equal (dest, s, succ))
1242 	    {
1243 	      e = redirect_edge_and_branch (e, dest);
1244 	      redirect_edge_var_map_clear (e);
1245 	      continue;
1246 	    }
1247 
1248 	  /* PHI arguments are different.  Create a forwarder block by
1249 	     splitting E so that we can merge PHI arguments on E to
1250 	     DEST.  */
1251 	  e = single_succ_edge (split_edge (e));
1252 	}
1253       else
1254 	{
1255 	  /* If we merge the forwarder into a loop header verify if we
1256 	     are creating another loop latch edge.  If so, reset
1257 	     number of iteration information of the loop.  */
1258 	  if (dest->loop_father->header == dest
1259 	      && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1260 	    {
1261 	      dest->loop_father->any_upper_bound = false;
1262 	      dest->loop_father->any_likely_upper_bound = false;
1263 	      free_numbers_of_iterations_estimates (dest->loop_father);
1264 	    }
1265 	}
1266 
1267       s = redirect_edge_and_branch (e, dest);
1268 
1269       /* redirect_edge_and_branch must not create a new edge.  */
1270       gcc_assert (s == e);
1271 
1272       /* Add to the PHI nodes at DEST each PHI argument removed at the
1273 	 destination of E.  */
1274       for (gsi = gsi_start_phis (dest);
1275 	   !gsi_end_p (gsi);
1276 	   gsi_next (&gsi))
1277 	{
1278 	  gphi *phi = gsi.phi ();
1279 	  tree def = gimple_phi_arg_def (phi, succ->dest_idx);
1280 	  location_t locus = gimple_phi_arg_location_from_edge (phi, succ);
1281 
1282 	  if (TREE_CODE (def) == SSA_NAME)
1283 	    {
1284 	      /* If DEF is one of the results of PHI nodes removed during
1285 		 redirection, replace it with the PHI argument that used
1286 		 to be on E.  */
1287 	      vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1288 	      size_t length = head ? head->length () : 0;
1289 	      for (size_t i = 0; i < length; i++)
1290 		{
1291 		  edge_var_map *vm = &(*head)[i];
1292 		  tree old_arg = redirect_edge_var_map_result (vm);
1293 		  tree new_arg = redirect_edge_var_map_def (vm);
1294 
1295 		  if (def == old_arg)
1296 		    {
1297 		      def = new_arg;
1298 		      locus = redirect_edge_var_map_location (vm);
1299 		      break;
1300 		    }
1301 		}
1302 	    }
1303 
1304 	  add_phi_arg (phi, def, s, locus);
1305 	}
1306 
1307       redirect_edge_var_map_clear (e);
1308     }
1309 
1310   /* Move debug statements.  Reset them if the destination does not
1311      have a single predecessor.  */
1312   move_debug_stmts_from_forwarder (bb, dest, dest_single_pred_p);
1313 
1314   /* Update the dominators.  */
1315   dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1316   domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1317   if (domdest == bb)
1318     {
1319       /* Shortcut to avoid calling (relatively expensive)
1320 	 nearest_common_dominator unless necessary.  */
1321       dom = dombb;
1322     }
1323   else
1324     dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1325 
1326   set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1327 
1328   /* Adjust latch infomation of BB's parent loop as otherwise
1329      the cfg hook has a hard time not to kill the loop.  */
1330   if (current_loops && bb->loop_father->latch == bb)
1331     bb->loop_father->latch = pred;
1332 
1333   /* Remove BB since all of BB's incoming edges have been redirected
1334      to DEST.  */
1335   delete_basic_block (bb);
1336 
1337   return true;
1338 }
1339 
1340 /* This pass merges PHI nodes if one feeds into another.  For example,
1341    suppose we have the following:
1342 
1343   goto <bb 9> (<L9>);
1344 
1345 <L8>:;
1346   tem_17 = foo ();
1347 
1348   # tem_6 = PHI <tem_17(8), tem_23(7)>;
1349 <L9>:;
1350 
1351   # tem_3 = PHI <tem_6(9), tem_2(5)>;
1352 <L10>:;
1353 
1354   Then we merge the first PHI node into the second one like so:
1355 
1356   goto <bb 9> (<L10>);
1357 
1358 <L8>:;
1359   tem_17 = foo ();
1360 
1361   # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1362 <L10>:;
1363 */
1364 
1365 namespace {
1366 
1367 const pass_data pass_data_merge_phi =
1368 {
1369   GIMPLE_PASS, /* type */
1370   "mergephi", /* name */
1371   OPTGROUP_NONE, /* optinfo_flags */
1372   TV_TREE_MERGE_PHI, /* tv_id */
1373   ( PROP_cfg | PROP_ssa ), /* properties_required */
1374   0, /* properties_provided */
1375   0, /* properties_destroyed */
1376   0, /* todo_flags_start */
1377   0, /* todo_flags_finish */
1378 };
1379 
1380 class pass_merge_phi : public gimple_opt_pass
1381 {
1382 public:
pass_merge_phi(gcc::context * ctxt)1383   pass_merge_phi (gcc::context *ctxt)
1384     : gimple_opt_pass (pass_data_merge_phi, ctxt)
1385   {}
1386 
1387   /* opt_pass methods: */
clone()1388   opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1389   virtual unsigned int execute (function *);
1390 
1391 }; // class pass_merge_phi
1392 
1393 unsigned int
execute(function * fun)1394 pass_merge_phi::execute (function *fun)
1395 {
1396   basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
1397   basic_block *current = worklist;
1398   basic_block bb;
1399 
1400   calculate_dominance_info (CDI_DOMINATORS);
1401 
1402   /* Find all PHI nodes that we may be able to merge.  */
1403   FOR_EACH_BB_FN (bb, fun)
1404     {
1405       basic_block dest;
1406 
1407       /* Look for a forwarder block with PHI nodes.  */
1408       if (!tree_forwarder_block_p (bb, true))
1409 	continue;
1410 
1411       dest = single_succ (bb);
1412 
1413       /* We have to feed into another basic block with PHI
1414 	 nodes.  */
1415       if (gimple_seq_empty_p (phi_nodes (dest))
1416 	  /* We don't want to deal with a basic block with
1417 	     abnormal edges.  */
1418 	  || bb_has_abnormal_pred (bb))
1419 	continue;
1420 
1421       if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1422 	{
1423 	  /* If BB does not dominate DEST, then the PHI nodes at
1424 	     DEST must be the only users of the results of the PHI
1425 	     nodes at BB.  */
1426 	  *current++ = bb;
1427 	}
1428       else
1429 	{
1430 	  gphi_iterator gsi;
1431 	  unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1432 
1433 	  /* BB dominates DEST.  There may be many users of the PHI
1434 	     nodes in BB.  However, there is still a trivial case we
1435 	     can handle.  If the result of every PHI in BB is used
1436 	     only by a PHI in DEST, then we can trivially merge the
1437 	     PHI nodes from BB into DEST.  */
1438 	  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1439 	       gsi_next (&gsi))
1440 	    {
1441 	      gphi *phi = gsi.phi ();
1442 	      tree result = gimple_phi_result (phi);
1443 	      use_operand_p imm_use;
1444 	      gimple *use_stmt;
1445 
1446 	      /* If the PHI's result is never used, then we can just
1447 		 ignore it.  */
1448 	      if (has_zero_uses (result))
1449 		continue;
1450 
1451 	      /* Get the single use of the result of this PHI node.  */
1452   	      if (!single_imm_use (result, &imm_use, &use_stmt)
1453 		  || gimple_code (use_stmt) != GIMPLE_PHI
1454 		  || gimple_bb (use_stmt) != dest
1455 		  || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1456 		break;
1457 	    }
1458 
1459 	  /* If the loop above iterated through all the PHI nodes
1460 	     in BB, then we can merge the PHIs from BB into DEST.  */
1461 	  if (gsi_end_p (gsi))
1462 	    *current++ = bb;
1463 	}
1464     }
1465 
1466   /* Now let's drain WORKLIST.  */
1467   bool changed = false;
1468   while (current != worklist)
1469     {
1470       bb = *--current;
1471       changed |= remove_forwarder_block_with_phi (bb);
1472     }
1473   free (worklist);
1474 
1475   /* Removing forwarder blocks can cause formerly irreducible loops
1476      to become reducible if we merged two entry blocks.  */
1477   if (changed
1478       && current_loops)
1479     loops_state_set (LOOPS_NEED_FIXUP);
1480 
1481   return 0;
1482 }
1483 
1484 } // anon namespace
1485 
1486 gimple_opt_pass *
make_pass_merge_phi(gcc::context * ctxt)1487 make_pass_merge_phi (gcc::context *ctxt)
1488 {
1489   return new pass_merge_phi (ctxt);
1490 }
1491 
1492 /* Pass: cleanup the CFG just before expanding trees to RTL.
1493    This is just a round of label cleanups and case node grouping
1494    because after the tree optimizers have run such cleanups may
1495    be necessary.  */
1496 
1497 static unsigned int
execute_cleanup_cfg_post_optimizing(void)1498 execute_cleanup_cfg_post_optimizing (void)
1499 {
1500   unsigned int todo = execute_fixup_cfg ();
1501   if (cleanup_tree_cfg ())
1502     {
1503       todo &= ~TODO_cleanup_cfg;
1504       todo |= TODO_update_ssa;
1505     }
1506   maybe_remove_unreachable_handlers ();
1507   cleanup_dead_labels ();
1508   if (group_case_labels ())
1509     todo |= TODO_cleanup_cfg;
1510   if ((flag_compare_debug_opt || flag_compare_debug)
1511       && flag_dump_final_insns)
1512     {
1513       FILE *final_output = fopen (flag_dump_final_insns, "a");
1514 
1515       if (!final_output)
1516 	{
1517 	  error ("could not open final insn dump file %qs: %m",
1518 		 flag_dump_final_insns);
1519 	  flag_dump_final_insns = NULL;
1520 	}
1521       else
1522 	{
1523 	  int save_unnumbered = flag_dump_unnumbered;
1524 	  int save_noaddr = flag_dump_noaddr;
1525 
1526 	  flag_dump_noaddr = flag_dump_unnumbered = 1;
1527 	  fprintf (final_output, "\n");
1528 	  dump_enumerated_decls (final_output,
1529 				 dump_flags | TDF_SLIM | TDF_NOUID);
1530 	  flag_dump_noaddr = save_noaddr;
1531 	  flag_dump_unnumbered = save_unnumbered;
1532 	  if (fclose (final_output))
1533 	    {
1534 	      error ("could not close final insn dump file %qs: %m",
1535 		     flag_dump_final_insns);
1536 	      flag_dump_final_insns = NULL;
1537 	    }
1538 	}
1539     }
1540   return todo;
1541 }
1542 
1543 namespace {
1544 
1545 const pass_data pass_data_cleanup_cfg_post_optimizing =
1546 {
1547   GIMPLE_PASS, /* type */
1548   "optimized", /* name */
1549   OPTGROUP_NONE, /* optinfo_flags */
1550   TV_TREE_CLEANUP_CFG, /* tv_id */
1551   PROP_cfg, /* properties_required */
1552   0, /* properties_provided */
1553   0, /* properties_destroyed */
1554   0, /* todo_flags_start */
1555   TODO_remove_unused_locals, /* todo_flags_finish */
1556 };
1557 
1558 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1559 {
1560 public:
pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)1561   pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1562     : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1563   {}
1564 
1565   /* opt_pass methods: */
execute(function *)1566   virtual unsigned int execute (function *)
1567     {
1568       return execute_cleanup_cfg_post_optimizing ();
1569     }
1570 
1571 }; // class pass_cleanup_cfg_post_optimizing
1572 
1573 } // anon namespace
1574 
1575 gimple_opt_pass *
make_pass_cleanup_cfg_post_optimizing(gcc::context * ctxt)1576 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1577 {
1578   return new pass_cleanup_cfg_post_optimizing (ctxt);
1579 }
1580 
1581 
1582 /* Delete all unreachable basic blocks and update callgraph.
1583    Doing so is somewhat nontrivial because we need to update all clones and
1584    remove inline function that become unreachable.  */
1585 
1586 bool
delete_unreachable_blocks_update_callgraph(cgraph_node * dst_node,bool update_clones)1587 delete_unreachable_blocks_update_callgraph (cgraph_node *dst_node,
1588 					    bool update_clones)
1589 {
1590   bool changed = false;
1591   basic_block b, next_bb;
1592 
1593   find_unreachable_blocks ();
1594 
1595   /* Delete all unreachable basic blocks.  */
1596 
1597   for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
1598        != EXIT_BLOCK_PTR_FOR_FN (cfun); b = next_bb)
1599     {
1600       next_bb = b->next_bb;
1601 
1602       if (!(b->flags & BB_REACHABLE))
1603 	{
1604           gimple_stmt_iterator bsi;
1605 
1606           for (bsi = gsi_start_bb (b); !gsi_end_p (bsi); gsi_next (&bsi))
1607 	    {
1608 	      struct cgraph_edge *e;
1609 	      struct cgraph_node *node;
1610 
1611 	      dst_node->remove_stmt_references (gsi_stmt (bsi));
1612 
1613 	      if (gimple_code (gsi_stmt (bsi)) == GIMPLE_CALL
1614 		  &&(e = dst_node->get_edge (gsi_stmt (bsi))) != NULL)
1615 		{
1616 		  if (!e->inline_failed)
1617 		    e->callee->remove_symbol_and_inline_clones (dst_node);
1618 		  else
1619 		    cgraph_edge::remove (e);
1620 		}
1621 	      if (update_clones && dst_node->clones)
1622 		for (node = dst_node->clones; node != dst_node;)
1623 		  {
1624 		    node->remove_stmt_references (gsi_stmt (bsi));
1625 		    if (gimple_code (gsi_stmt (bsi)) == GIMPLE_CALL
1626 			&& (e = node->get_edge (gsi_stmt (bsi))) != NULL)
1627 		      {
1628 			if (!e->inline_failed)
1629 			  e->callee->remove_symbol_and_inline_clones (dst_node);
1630 			else
1631 			  cgraph_edge::remove (e);
1632 		      }
1633 
1634 		    if (node->clones)
1635 		      node = node->clones;
1636 		    else if (node->next_sibling_clone)
1637 		      node = node->next_sibling_clone;
1638 		    else
1639 		      {
1640 			while (node != dst_node && !node->next_sibling_clone)
1641 			  node = node->clone_of;
1642 			if (node != dst_node)
1643 			  node = node->next_sibling_clone;
1644 		      }
1645 		  }
1646 	    }
1647 	  delete_basic_block (b);
1648 	  changed = true;
1649 	}
1650     }
1651 
1652   return changed;
1653 }
1654 
1655