xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/cfgcleanup.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /* Control flow optimization code for GNU compiler.
2    Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3    1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010
4    Free Software Foundation, Inc.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* This file contains optimizer of the control flow.  The main entry point is
23    cleanup_cfg.  Following optimizations are performed:
24 
25    - Unreachable blocks removal
26    - Edge forwarding (edge to the forwarder block is forwarded to its
27      successor.  Simplification of the branch instruction is performed by
28      underlying infrastructure so branch can be converted to simplejump or
29      eliminated).
30    - Cross jumping (tail merging)
31    - Conditional jump-around-simplejump simplification
32    - Basic block merging.  */
33 
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56 #include "df.h"
57 #include "dce.h"
58 #include "dbgcnt.h"
59 
60 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
61 
62 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
63 static bool first_pass;
64 
65 /* Set to true if crossjumps occured in the latest run of try_optimize_cfg.  */
66 static bool crossjumps_occured;
67 
68 static bool try_crossjump_to_edge (int, edge, edge);
69 static bool try_crossjump_bb (int, basic_block);
70 static bool outgoing_edges_match (int, basic_block, basic_block);
71 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
72 static bool old_insns_match_p (int, rtx, rtx);
73 
74 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
75 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
76 static bool try_optimize_cfg (int);
77 static bool try_simplify_condjump (basic_block);
78 static bool try_forward_edges (int, basic_block);
79 static edge thread_jump (edge, basic_block);
80 static bool mark_effect (rtx, bitmap);
81 static void notice_new_block (basic_block);
82 static void update_forwarder_flag (basic_block);
83 static int mentions_nonequal_regs (rtx *, void *);
84 static void merge_memattrs (rtx, rtx);
85 
86 /* Set flags for newly created block.  */
87 
88 static void
89 notice_new_block (basic_block bb)
90 {
91   if (!bb)
92     return;
93 
94   if (forwarder_block_p (bb))
95     bb->flags |= BB_FORWARDER_BLOCK;
96 }
97 
98 /* Recompute forwarder flag after block has been modified.  */
99 
100 static void
101 update_forwarder_flag (basic_block bb)
102 {
103   if (forwarder_block_p (bb))
104     bb->flags |= BB_FORWARDER_BLOCK;
105   else
106     bb->flags &= ~BB_FORWARDER_BLOCK;
107 }
108 
109 /* Simplify a conditional jump around an unconditional jump.
110    Return true if something changed.  */
111 
112 static bool
113 try_simplify_condjump (basic_block cbranch_block)
114 {
115   basic_block jump_block, jump_dest_block, cbranch_dest_block;
116   edge cbranch_jump_edge, cbranch_fallthru_edge;
117   rtx cbranch_insn;
118 
119   /* Verify that there are exactly two successors.  */
120   if (EDGE_COUNT (cbranch_block->succs) != 2)
121     return false;
122 
123   /* Verify that we've got a normal conditional branch at the end
124      of the block.  */
125   cbranch_insn = BB_END (cbranch_block);
126   if (!any_condjump_p (cbranch_insn))
127     return false;
128 
129   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
130   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
131 
132   /* The next block must not have multiple predecessors, must not
133      be the last block in the function, and must contain just the
134      unconditional jump.  */
135   jump_block = cbranch_fallthru_edge->dest;
136   if (!single_pred_p (jump_block)
137       || jump_block->next_bb == EXIT_BLOCK_PTR
138       || !FORWARDER_BLOCK_P (jump_block))
139     return false;
140   jump_dest_block = single_succ (jump_block);
141 
142   /* If we are partitioning hot/cold basic blocks, we don't want to
143      mess up unconditional or indirect jumps that cross between hot
144      and cold sections.
145 
146      Basic block partitioning may result in some jumps that appear to
147      be optimizable (or blocks that appear to be mergeable), but which really
148      must be left untouched (they are required to make it safely across
149      partition boundaries).  See the comments at the top of
150      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
151 
152   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
153       || (cbranch_jump_edge->flags & EDGE_CROSSING))
154     return false;
155 
156   /* The conditional branch must target the block after the
157      unconditional branch.  */
158   cbranch_dest_block = cbranch_jump_edge->dest;
159 
160   if (cbranch_dest_block == EXIT_BLOCK_PTR
161       || !can_fallthru (jump_block, cbranch_dest_block))
162     return false;
163 
164   /* Invert the conditional branch.  */
165   if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
166     return false;
167 
168   if (dump_file)
169     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
170 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
171 
172   /* Success.  Update the CFG to match.  Note that after this point
173      the edge variable names appear backwards; the redirection is done
174      this way to preserve edge profile data.  */
175   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
176 						cbranch_dest_block);
177   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
178 						    jump_dest_block);
179   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
180   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
181   update_br_prob_note (cbranch_block);
182 
183   /* Delete the block with the unconditional jump, and clean up the mess.  */
184   delete_basic_block (jump_block);
185   tidy_fallthru_edge (cbranch_jump_edge);
186   update_forwarder_flag (cbranch_block);
187 
188   return true;
189 }
190 
191 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
192    on register.  Used by jump threading.  */
193 
194 static bool
195 mark_effect (rtx exp, regset nonequal)
196 {
197   int regno;
198   rtx dest;
199   switch (GET_CODE (exp))
200     {
201       /* In case we do clobber the register, mark it as equal, as we know the
202 	 value is dead so it don't have to match.  */
203     case CLOBBER:
204       if (REG_P (XEXP (exp, 0)))
205 	{
206 	  dest = XEXP (exp, 0);
207 	  regno = REGNO (dest);
208 	  CLEAR_REGNO_REG_SET (nonequal, regno);
209 	  if (regno < FIRST_PSEUDO_REGISTER)
210 	    {
211 	      int n = hard_regno_nregs[regno][GET_MODE (dest)];
212 	      while (--n > 0)
213 		CLEAR_REGNO_REG_SET (nonequal, regno + n);
214 	    }
215 	}
216       return false;
217 
218     case SET:
219       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
220 	return false;
221       dest = SET_DEST (exp);
222       if (dest == pc_rtx)
223 	return false;
224       if (!REG_P (dest))
225 	return true;
226       regno = REGNO (dest);
227       SET_REGNO_REG_SET (nonequal, regno);
228       if (regno < FIRST_PSEUDO_REGISTER)
229 	{
230 	  int n = hard_regno_nregs[regno][GET_MODE (dest)];
231 	  while (--n > 0)
232 	    SET_REGNO_REG_SET (nonequal, regno + n);
233 	}
234       return false;
235 
236     default:
237       return false;
238     }
239 }
240 
241 /* Return nonzero if X is a register set in regset DATA.
242    Called via for_each_rtx.  */
243 static int
244 mentions_nonequal_regs (rtx *x, void *data)
245 {
246   regset nonequal = (regset) data;
247   if (REG_P (*x))
248     {
249       int regno;
250 
251       regno = REGNO (*x);
252       if (REGNO_REG_SET_P (nonequal, regno))
253 	return 1;
254       if (regno < FIRST_PSEUDO_REGISTER)
255 	{
256 	  int n = hard_regno_nregs[regno][GET_MODE (*x)];
257 	  while (--n > 0)
258 	    if (REGNO_REG_SET_P (nonequal, regno + n))
259 	      return 1;
260 	}
261     }
262   return 0;
263 }
264 /* Attempt to prove that the basic block B will have no side effects and
265    always continues in the same edge if reached via E.  Return the edge
266    if exist, NULL otherwise.  */
267 
268 static edge
269 thread_jump (edge e, basic_block b)
270 {
271   rtx set1, set2, cond1, cond2, insn;
272   enum rtx_code code1, code2, reversed_code2;
273   bool reverse1 = false;
274   unsigned i;
275   regset nonequal;
276   bool failed = false;
277   reg_set_iterator rsi;
278 
279   if (b->flags & BB_NONTHREADABLE_BLOCK)
280     return NULL;
281 
282   /* At the moment, we do handle only conditional jumps, but later we may
283      want to extend this code to tablejumps and others.  */
284   if (EDGE_COUNT (e->src->succs) != 2)
285     return NULL;
286   if (EDGE_COUNT (b->succs) != 2)
287     {
288       b->flags |= BB_NONTHREADABLE_BLOCK;
289       return NULL;
290     }
291 
292   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
293   if (!any_condjump_p (BB_END (e->src)))
294     return NULL;
295 
296   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
297     {
298       b->flags |= BB_NONTHREADABLE_BLOCK;
299       return NULL;
300     }
301 
302   set1 = pc_set (BB_END (e->src));
303   set2 = pc_set (BB_END (b));
304   if (((e->flags & EDGE_FALLTHRU) != 0)
305       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
306     reverse1 = true;
307 
308   cond1 = XEXP (SET_SRC (set1), 0);
309   cond2 = XEXP (SET_SRC (set2), 0);
310   if (reverse1)
311     code1 = reversed_comparison_code (cond1, BB_END (e->src));
312   else
313     code1 = GET_CODE (cond1);
314 
315   code2 = GET_CODE (cond2);
316   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
317 
318   if (!comparison_dominates_p (code1, code2)
319       && !comparison_dominates_p (code1, reversed_code2))
320     return NULL;
321 
322   /* Ensure that the comparison operators are equivalent.
323      ??? This is far too pessimistic.  We should allow swapped operands,
324      different CCmodes, or for example comparisons for interval, that
325      dominate even when operands are not equivalent.  */
326   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
327       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
328     return NULL;
329 
330   /* Short circuit cases where block B contains some side effects, as we can't
331      safely bypass it.  */
332   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
333        insn = NEXT_INSN (insn))
334     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
335       {
336 	b->flags |= BB_NONTHREADABLE_BLOCK;
337 	return NULL;
338       }
339 
340   cselib_init (0);
341 
342   /* First process all values computed in the source basic block.  */
343   for (insn = NEXT_INSN (BB_HEAD (e->src));
344        insn != NEXT_INSN (BB_END (e->src));
345        insn = NEXT_INSN (insn))
346     if (INSN_P (insn))
347       cselib_process_insn (insn);
348 
349   nonequal = BITMAP_ALLOC (NULL);
350   CLEAR_REG_SET (nonequal);
351 
352   /* Now assume that we've continued by the edge E to B and continue
353      processing as if it were same basic block.
354      Our goal is to prove that whole block is an NOOP.  */
355 
356   for (insn = NEXT_INSN (BB_HEAD (b));
357        insn != NEXT_INSN (BB_END (b)) && !failed;
358        insn = NEXT_INSN (insn))
359     {
360       if (INSN_P (insn))
361 	{
362 	  rtx pat = PATTERN (insn);
363 
364 	  if (GET_CODE (pat) == PARALLEL)
365 	    {
366 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
367 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
368 	    }
369 	  else
370 	    failed |= mark_effect (pat, nonequal);
371 	}
372 
373       cselib_process_insn (insn);
374     }
375 
376   /* Later we should clear nonequal of dead registers.  So far we don't
377      have life information in cfg_cleanup.  */
378   if (failed)
379     {
380       b->flags |= BB_NONTHREADABLE_BLOCK;
381       goto failed_exit;
382     }
383 
384   /* cond2 must not mention any register that is not equal to the
385      former block.  */
386   if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
387     goto failed_exit;
388 
389   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
390     goto failed_exit;
391 
392   BITMAP_FREE (nonequal);
393   cselib_finish ();
394   if ((comparison_dominates_p (code1, code2) != 0)
395       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
396     return BRANCH_EDGE (b);
397   else
398     return FALLTHRU_EDGE (b);
399 
400 failed_exit:
401   BITMAP_FREE (nonequal);
402   cselib_finish ();
403   return NULL;
404 }
405 
406 /* Attempt to forward edges leaving basic block B.
407    Return true if successful.  */
408 
409 static bool
410 try_forward_edges (int mode, basic_block b)
411 {
412   bool changed = false;
413   edge_iterator ei;
414   edge e, *threaded_edges = NULL;
415 
416   /* If we are partitioning hot/cold basic blocks, we don't want to
417      mess up unconditional or indirect jumps that cross between hot
418      and cold sections.
419 
420      Basic block partitioning may result in some jumps that appear to
421      be optimizable (or blocks that appear to be mergeable), but which really
422      must be left untouched (they are required to make it safely across
423      partition boundaries).  See the comments at the top of
424      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
425 
426   if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
427     return false;
428 
429   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
430     {
431       basic_block target, first;
432       int counter, goto_locus;
433       bool threaded = false;
434       int nthreaded_edges = 0;
435       bool may_thread = first_pass | df_get_bb_dirty (b);
436 
437       /* Skip complex edges because we don't know how to update them.
438 
439 	 Still handle fallthru edges, as we can succeed to forward fallthru
440 	 edge to the same place as the branch edge of conditional branch
441 	 and turn conditional branch to an unconditional branch.  */
442       if (e->flags & EDGE_COMPLEX)
443 	{
444 	  ei_next (&ei);
445 	  continue;
446 	}
447 
448       target = first = e->dest;
449       counter = NUM_FIXED_BLOCKS;
450       goto_locus = e->goto_locus;
451 
452       /* If we are partitioning hot/cold basic_blocks, we don't want to mess
453 	 up jumps that cross between hot/cold sections.
454 
455 	 Basic block partitioning may result in some jumps that appear
456 	 to be optimizable (or blocks that appear to be mergeable), but which
457 	 really must be left untouched (they are required to make it safely
458 	 across partition boundaries).  See the comments at the top of
459 	 bb-reorder.c:partition_hot_cold_basic_blocks for complete
460 	 details.  */
461 
462       if (first != EXIT_BLOCK_PTR
463 	  && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
464 	return false;
465 
466       while (counter < n_basic_blocks)
467 	{
468 	  basic_block new_target = NULL;
469 	  bool new_target_threaded = false;
470 	  may_thread |= df_get_bb_dirty (target);
471 
472 	  if (FORWARDER_BLOCK_P (target)
473 	      && !(single_succ_edge (target)->flags & EDGE_CROSSING)
474 	      && single_succ (target) != EXIT_BLOCK_PTR)
475 	    {
476 	      /* Bypass trivial infinite loops.  */
477 	      new_target = single_succ (target);
478 	      if (target == new_target)
479 		counter = n_basic_blocks;
480 	      else if (!optimize)
481 		{
482 		  /* When not optimizing, ensure that edges or forwarder
483 		     blocks with different locus are not optimized out.  */
484 		  int locus = single_succ_edge (target)->goto_locus;
485 		  rtx last ;
486 
487 		  if (locus && goto_locus && !locator_eq (locus, goto_locus))
488 		    counter = n_basic_blocks;
489 		  else if (locus)
490 		    goto_locus = locus;
491 
492 		  last = BB_END (target);
493 		  if (DEBUG_INSN_P (last))
494 		    last = prev_nondebug_insn (last);
495 
496 		  if (last && INSN_P (last))
497 		    {
498 		      locus = INSN_LOCATOR (last);
499 
500 		      if (locus && goto_locus
501 			  && !locator_eq (locus, goto_locus))
502 			counter = n_basic_blocks;
503 		      else if (locus)
504 			goto_locus = locus;
505 		    }
506 		}
507 	    }
508 
509 	  /* Allow to thread only over one edge at time to simplify updating
510 	     of probabilities.  */
511 	  else if ((mode & CLEANUP_THREADING) && may_thread)
512 	    {
513 	      edge t = thread_jump (e, target);
514 	      if (t)
515 		{
516 		  if (!threaded_edges)
517 		    threaded_edges = XNEWVEC (edge, n_basic_blocks);
518 		  else
519 		    {
520 		      int i;
521 
522 		      /* Detect an infinite loop across blocks not
523 			 including the start block.  */
524 		      for (i = 0; i < nthreaded_edges; ++i)
525 			if (threaded_edges[i] == t)
526 			  break;
527 		      if (i < nthreaded_edges)
528 			{
529 			  counter = n_basic_blocks;
530 			  break;
531 			}
532 		    }
533 
534 		  /* Detect an infinite loop across the start block.  */
535 		  if (t->dest == b)
536 		    break;
537 
538 		  gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
539 		  threaded_edges[nthreaded_edges++] = t;
540 
541 		  new_target = t->dest;
542 		  new_target_threaded = true;
543 		}
544 	    }
545 
546 	  if (!new_target)
547 	    break;
548 
549 	  counter++;
550 	  target = new_target;
551 	  threaded |= new_target_threaded;
552 	}
553 
554       if (counter >= n_basic_blocks)
555 	{
556 	  if (dump_file)
557 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
558 		     target->index);
559 	}
560       else if (target == first)
561 	; /* We didn't do anything.  */
562       else
563 	{
564 	  /* Save the values now, as the edge may get removed.  */
565 	  gcov_type edge_count = e->count;
566 	  int edge_probability = e->probability;
567 	  int edge_frequency;
568 	  int n = 0;
569 
570 	  e->goto_locus = goto_locus;
571 
572 	  /* Don't force if target is exit block.  */
573 	  if (threaded && target != EXIT_BLOCK_PTR)
574 	    {
575 	      notice_new_block (redirect_edge_and_branch_force (e, target));
576 	      if (dump_file)
577 		fprintf (dump_file, "Conditionals threaded.\n");
578 	    }
579 	  else if (!redirect_edge_and_branch (e, target))
580 	    {
581 	      if (dump_file)
582 		fprintf (dump_file,
583 			 "Forwarding edge %i->%i to %i failed.\n",
584 			 b->index, e->dest->index, target->index);
585 	      ei_next (&ei);
586 	      continue;
587 	    }
588 
589 	  /* We successfully forwarded the edge.  Now update profile
590 	     data: for each edge we traversed in the chain, remove
591 	     the original edge's execution count.  */
592 	  edge_frequency = ((edge_probability * b->frequency
593 			     + REG_BR_PROB_BASE / 2)
594 			    / REG_BR_PROB_BASE);
595 
596 	  if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
597 	    b->flags |= BB_FORWARDER_BLOCK;
598 
599 	  do
600 	    {
601 	      edge t;
602 
603 	      if (!single_succ_p (first))
604 		{
605 		  gcc_assert (n < nthreaded_edges);
606 		  t = threaded_edges [n++];
607 		  gcc_assert (t->src == first);
608 		  update_bb_profile_for_threading (first, edge_frequency,
609 						   edge_count, t);
610 		  update_br_prob_note (first);
611 		}
612 	      else
613 		{
614 		  first->count -= edge_count;
615 		  if (first->count < 0)
616 		    first->count = 0;
617 		  first->frequency -= edge_frequency;
618 		  if (first->frequency < 0)
619 		    first->frequency = 0;
620 		  /* It is possible that as the result of
621 		     threading we've removed edge as it is
622 		     threaded to the fallthru edge.  Avoid
623 		     getting out of sync.  */
624 		  if (n < nthreaded_edges
625 		      && first == threaded_edges [n]->src)
626 		    n++;
627 		  t = single_succ_edge (first);
628 		}
629 
630 	      t->count -= edge_count;
631 	      if (t->count < 0)
632 		t->count = 0;
633 	      first = t->dest;
634 	    }
635 	  while (first != target);
636 
637 	  changed = true;
638 	  continue;
639 	}
640       ei_next (&ei);
641     }
642 
643   if (threaded_edges)
644     free (threaded_edges);
645   return changed;
646 }
647 
648 
649 /* Blocks A and B are to be merged into a single block.  A has no incoming
650    fallthru edge, so it can be moved before B without adding or modifying
651    any jumps (aside from the jump from A to B).  */
652 
653 static void
654 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
655 {
656   rtx barrier;
657 
658   /* If we are partitioning hot/cold basic blocks, we don't want to
659      mess up unconditional or indirect jumps that cross between hot
660      and cold sections.
661 
662      Basic block partitioning may result in some jumps that appear to
663      be optimizable (or blocks that appear to be mergeable), but which really
664      must be left untouched (they are required to make it safely across
665      partition boundaries).  See the comments at the top of
666      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
667 
668   if (BB_PARTITION (a) != BB_PARTITION (b))
669     return;
670 
671   barrier = next_nonnote_insn (BB_END (a));
672   gcc_assert (BARRIER_P (barrier));
673   delete_insn (barrier);
674 
675   /* Scramble the insn chain.  */
676   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
677     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
678   df_set_bb_dirty (a);
679 
680   if (dump_file)
681     fprintf (dump_file, "Moved block %d before %d and merged.\n",
682 	     a->index, b->index);
683 
684   /* Swap the records for the two blocks around.  */
685 
686   unlink_block (a);
687   link_block (a, b->prev_bb);
688 
689   /* Now blocks A and B are contiguous.  Merge them.  */
690   merge_blocks (a, b);
691 }
692 
693 /* Blocks A and B are to be merged into a single block.  B has no outgoing
694    fallthru edge, so it can be moved after A without adding or modifying
695    any jumps (aside from the jump from A to B).  */
696 
697 static void
698 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
699 {
700   rtx barrier, real_b_end;
701   rtx label, table;
702 
703   /* If we are partitioning hot/cold basic blocks, we don't want to
704      mess up unconditional or indirect jumps that cross between hot
705      and cold sections.
706 
707      Basic block partitioning may result in some jumps that appear to
708      be optimizable (or blocks that appear to be mergeable), but which really
709      must be left untouched (they are required to make it safely across
710      partition boundaries).  See the comments at the top of
711      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
712 
713   if (BB_PARTITION (a) != BB_PARTITION (b))
714     return;
715 
716   real_b_end = BB_END (b);
717 
718   /* If there is a jump table following block B temporarily add the jump table
719      to block B so that it will also be moved to the correct location.  */
720   if (tablejump_p (BB_END (b), &label, &table)
721       && prev_active_insn (label) == BB_END (b))
722     {
723       BB_END (b) = table;
724     }
725 
726   /* There had better have been a barrier there.  Delete it.  */
727   barrier = NEXT_INSN (BB_END (b));
728   if (barrier && BARRIER_P (barrier))
729     delete_insn (barrier);
730 
731 
732   /* Scramble the insn chain.  */
733   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
734 
735   /* Restore the real end of b.  */
736   BB_END (b) = real_b_end;
737 
738   if (dump_file)
739     fprintf (dump_file, "Moved block %d after %d and merged.\n",
740 	     b->index, a->index);
741 
742   /* Now blocks A and B are contiguous.  Merge them.  */
743   merge_blocks (a, b);
744 }
745 
746 /* Attempt to merge basic blocks that are potentially non-adjacent.
747    Return NULL iff the attempt failed, otherwise return basic block
748    where cleanup_cfg should continue.  Because the merging commonly
749    moves basic block away or introduces another optimization
750    possibility, return basic block just before B so cleanup_cfg don't
751    need to iterate.
752 
753    It may be good idea to return basic block before C in the case
754    C has been moved after B and originally appeared earlier in the
755    insn sequence, but we have no information available about the
756    relative ordering of these two.  Hopefully it is not too common.  */
757 
758 static basic_block
759 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
760 {
761   basic_block next;
762 
763   /* If we are partitioning hot/cold basic blocks, we don't want to
764      mess up unconditional or indirect jumps that cross between hot
765      and cold sections.
766 
767      Basic block partitioning may result in some jumps that appear to
768      be optimizable (or blocks that appear to be mergeable), but which really
769      must be left untouched (they are required to make it safely across
770      partition boundaries).  See the comments at the top of
771      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
772 
773   if (BB_PARTITION (b) != BB_PARTITION (c))
774     return NULL;
775 
776   /* If B has a fallthru edge to C, no need to move anything.  */
777   if (e->flags & EDGE_FALLTHRU)
778     {
779       int b_index = b->index, c_index = c->index;
780       merge_blocks (b, c);
781       update_forwarder_flag (b);
782 
783       if (dump_file)
784 	fprintf (dump_file, "Merged %d and %d without moving.\n",
785 		 b_index, c_index);
786 
787       return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
788     }
789 
790   /* Otherwise we will need to move code around.  Do that only if expensive
791      transformations are allowed.  */
792   else if (mode & CLEANUP_EXPENSIVE)
793     {
794       edge tmp_edge, b_fallthru_edge;
795       bool c_has_outgoing_fallthru;
796       bool b_has_incoming_fallthru;
797       edge_iterator ei;
798 
799       /* Avoid overactive code motion, as the forwarder blocks should be
800 	 eliminated by edge redirection instead.  One exception might have
801 	 been if B is a forwarder block and C has no fallthru edge, but
802 	 that should be cleaned up by bb-reorder instead.  */
803       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
804 	return NULL;
805 
806       /* We must make sure to not munge nesting of lexical blocks,
807 	 and loop notes.  This is done by squeezing out all the notes
808 	 and leaving them there to lie.  Not ideal, but functional.  */
809 
810       FOR_EACH_EDGE (tmp_edge, ei, c->succs)
811 	if (tmp_edge->flags & EDGE_FALLTHRU)
812 	  break;
813 
814       c_has_outgoing_fallthru = (tmp_edge != NULL);
815 
816       FOR_EACH_EDGE (tmp_edge, ei, b->preds)
817 	if (tmp_edge->flags & EDGE_FALLTHRU)
818 	  break;
819 
820       b_has_incoming_fallthru = (tmp_edge != NULL);
821       b_fallthru_edge = tmp_edge;
822       next = b->prev_bb;
823       if (next == c)
824 	next = next->prev_bb;
825 
826       /* Otherwise, we're going to try to move C after B.  If C does
827 	 not have an outgoing fallthru, then it can be moved
828 	 immediately after B without introducing or modifying jumps.  */
829       if (! c_has_outgoing_fallthru)
830 	{
831 	  merge_blocks_move_successor_nojumps (b, c);
832 	  return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
833 	}
834 
835       /* If B does not have an incoming fallthru, then it can be moved
836 	 immediately before C without introducing or modifying jumps.
837 	 C cannot be the first block, so we do not have to worry about
838 	 accessing a non-existent block.  */
839 
840       if (b_has_incoming_fallthru)
841 	{
842 	  basic_block bb;
843 
844 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
845 	    return NULL;
846 	  bb = force_nonfallthru (b_fallthru_edge);
847 	  if (bb)
848 	    notice_new_block (bb);
849 	}
850 
851       merge_blocks_move_predecessor_nojumps (b, c);
852       return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
853     }
854 
855   return NULL;
856 }
857 
858 
859 /* Removes the memory attributes of MEM expression
860    if they are not equal.  */
861 
862 void
863 merge_memattrs (rtx x, rtx y)
864 {
865   int i;
866   int j;
867   enum rtx_code code;
868   const char *fmt;
869 
870   if (x == y)
871     return;
872   if (x == 0 || y == 0)
873     return;
874 
875   code = GET_CODE (x);
876 
877   if (code != GET_CODE (y))
878     return;
879 
880   if (GET_MODE (x) != GET_MODE (y))
881     return;
882 
883   if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
884     {
885       if (! MEM_ATTRS (x))
886 	MEM_ATTRS (y) = 0;
887       else if (! MEM_ATTRS (y))
888 	MEM_ATTRS (x) = 0;
889       else
890 	{
891 	  rtx mem_size;
892 
893 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
894 	    {
895 	      set_mem_alias_set (x, 0);
896 	      set_mem_alias_set (y, 0);
897 	    }
898 
899 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
900 	    {
901 	      set_mem_expr (x, 0);
902 	      set_mem_expr (y, 0);
903 	      set_mem_offset (x, 0);
904 	      set_mem_offset (y, 0);
905 	    }
906 	  else if (MEM_OFFSET (x) != MEM_OFFSET (y))
907 	    {
908 	      set_mem_offset (x, 0);
909 	      set_mem_offset (y, 0);
910 	    }
911 
912 	  if (!MEM_SIZE (x))
913 	    mem_size = NULL_RTX;
914 	  else if (!MEM_SIZE (y))
915 	    mem_size = NULL_RTX;
916 	  else
917 	    mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
918 				     INTVAL (MEM_SIZE (y))));
919 	  set_mem_size (x, mem_size);
920 	  set_mem_size (y, mem_size);
921 
922 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
923 	  set_mem_align (y, MEM_ALIGN (x));
924 	}
925     }
926 
927   fmt = GET_RTX_FORMAT (code);
928   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
929     {
930       switch (fmt[i])
931 	{
932 	case 'E':
933 	  /* Two vectors must have the same length.  */
934 	  if (XVECLEN (x, i) != XVECLEN (y, i))
935 	    return;
936 
937 	  for (j = 0; j < XVECLEN (x, i); j++)
938 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
939 
940 	  break;
941 
942 	case 'e':
943 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
944 	}
945     }
946   return;
947 }
948 
949 
950 /* Return true if I1 and I2 are equivalent and thus can be crossjumped.  */
951 
952 static bool
953 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
954 {
955   rtx p1, p2;
956 
957   /* Verify that I1 and I2 are equivalent.  */
958   if (GET_CODE (i1) != GET_CODE (i2))
959     return false;
960 
961   /* __builtin_unreachable() may lead to empty blocks (ending with
962      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
963   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
964     return true;
965 
966   p1 = PATTERN (i1);
967   p2 = PATTERN (i2);
968 
969   if (GET_CODE (p1) != GET_CODE (p2))
970     return false;
971 
972   /* If this is a CALL_INSN, compare register usage information.
973      If we don't check this on stack register machines, the two
974      CALL_INSNs might be merged leaving reg-stack.c with mismatching
975      numbers of stack registers in the same basic block.
976      If we don't check this on machines with delay slots, a delay slot may
977      be filled that clobbers a parameter expected by the subroutine.
978 
979      ??? We take the simple route for now and assume that if they're
980      equal, they were constructed identically.  */
981 
982   if (CALL_P (i1)
983       && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
984 			CALL_INSN_FUNCTION_USAGE (i2))
985 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
986     return false;
987 
988 #ifdef STACK_REGS
989   /* If cross_jump_death_matters is not 0, the insn's mode
990      indicates whether or not the insn contains any stack-like
991      regs.  */
992 
993   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
994     {
995       /* If register stack conversion has already been done, then
996 	 death notes must also be compared before it is certain that
997 	 the two instruction streams match.  */
998 
999       rtx note;
1000       HARD_REG_SET i1_regset, i2_regset;
1001 
1002       CLEAR_HARD_REG_SET (i1_regset);
1003       CLEAR_HARD_REG_SET (i2_regset);
1004 
1005       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1006 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1007 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1008 
1009       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1010 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1011 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1012 
1013       if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1014 	return false;
1015     }
1016 #endif
1017 
1018   if (reload_completed
1019       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1020     return true;
1021 
1022   return false;
1023 }
1024 
1025 /* Look through the insns at the end of BB1 and BB2 and find the longest
1026    sequence that are equivalent.  Store the first insns for that sequence
1027    in *F1 and *F2 and return the sequence length.
1028 
1029    To simplify callers of this function, if the blocks match exactly,
1030    store the head of the blocks in *F1 and *F2.  */
1031 
1032 static int
1033 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1034 		      basic_block bb2, rtx *f1, rtx *f2)
1035 {
1036   rtx i1, i2, last1, last2, afterlast1, afterlast2;
1037   int ninsns = 0;
1038 
1039   /* Skip simple jumps at the end of the blocks.  Complex jumps still
1040      need to be compared for equivalence, which we'll do below.  */
1041 
1042   i1 = BB_END (bb1);
1043   last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1044   if (onlyjump_p (i1)
1045       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1046     {
1047       last1 = i1;
1048       i1 = PREV_INSN (i1);
1049     }
1050 
1051   i2 = BB_END (bb2);
1052   if (onlyjump_p (i2)
1053       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1054     {
1055       last2 = i2;
1056       /* Count everything except for unconditional jump as insn.  */
1057       if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1058 	ninsns++;
1059       i2 = PREV_INSN (i2);
1060     }
1061 
1062   while (true)
1063     {
1064       /* Ignore notes.  */
1065       while (!NONDEBUG_INSN_P (i1) && i1 != BB_HEAD (bb1))
1066 	i1 = PREV_INSN (i1);
1067 
1068       while (!NONDEBUG_INSN_P (i2) && i2 != BB_HEAD (bb2))
1069 	i2 = PREV_INSN (i2);
1070 
1071       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1072 	break;
1073 
1074       if (!old_insns_match_p (mode, i1, i2))
1075 	break;
1076 
1077       merge_memattrs (i1, i2);
1078 
1079       /* Don't begin a cross-jump with a NOTE insn.  */
1080       if (INSN_P (i1))
1081 	{
1082 	  /* If the merged insns have different REG_EQUAL notes, then
1083 	     remove them.  */
1084 	  rtx equiv1 = find_reg_equal_equiv_note (i1);
1085 	  rtx equiv2 = find_reg_equal_equiv_note (i2);
1086 
1087 	  if (equiv1 && !equiv2)
1088 	    remove_note (i1, equiv1);
1089 	  else if (!equiv1 && equiv2)
1090 	    remove_note (i2, equiv2);
1091 	  else if (equiv1 && equiv2
1092 		   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1093 	    {
1094 	      remove_note (i1, equiv1);
1095 	      remove_note (i2, equiv2);
1096 	    }
1097 
1098 	  afterlast1 = last1, afterlast2 = last2;
1099 	  last1 = i1, last2 = i2;
1100 	  ninsns++;
1101 	}
1102 
1103       i1 = PREV_INSN (i1);
1104       i2 = PREV_INSN (i2);
1105     }
1106 
1107 #ifdef HAVE_cc0
1108   /* Don't allow the insn after a compare to be shared by
1109      cross-jumping unless the compare is also shared.  */
1110   if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1111     last1 = afterlast1, last2 = afterlast2, ninsns--;
1112 #endif
1113 
1114   /* Include preceding notes and labels in the cross-jump.  One,
1115      this may bring us to the head of the blocks as requested above.
1116      Two, it keeps line number notes as matched as may be.  */
1117   if (ninsns)
1118     {
1119       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1120 	last1 = PREV_INSN (last1);
1121 
1122       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1123 	last1 = PREV_INSN (last1);
1124 
1125       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1126 	last2 = PREV_INSN (last2);
1127 
1128       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1129 	last2 = PREV_INSN (last2);
1130 
1131       *f1 = last1;
1132       *f2 = last2;
1133     }
1134 
1135   return ninsns;
1136 }
1137 
1138 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1139    the branch instruction.  This means that if we commonize the control
1140    flow before end of the basic block, the semantic remains unchanged.
1141 
1142    We may assume that there exists one edge with a common destination.  */
1143 
1144 static bool
1145 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1146 {
1147   int nehedges1 = 0, nehedges2 = 0;
1148   edge fallthru1 = 0, fallthru2 = 0;
1149   edge e1, e2;
1150   edge_iterator ei;
1151 
1152   /* If BB1 has only one successor, we may be looking at either an
1153      unconditional jump, or a fake edge to exit.  */
1154   if (single_succ_p (bb1)
1155       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1156       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1157     return (single_succ_p (bb2)
1158 	    && (single_succ_edge (bb2)->flags
1159 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1160 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1161 
1162   /* Match conditional jumps - this may get tricky when fallthru and branch
1163      edges are crossed.  */
1164   if (EDGE_COUNT (bb1->succs) == 2
1165       && any_condjump_p (BB_END (bb1))
1166       && onlyjump_p (BB_END (bb1)))
1167     {
1168       edge b1, f1, b2, f2;
1169       bool reverse, match;
1170       rtx set1, set2, cond1, cond2;
1171       enum rtx_code code1, code2;
1172 
1173       if (EDGE_COUNT (bb2->succs) != 2
1174 	  || !any_condjump_p (BB_END (bb2))
1175 	  || !onlyjump_p (BB_END (bb2)))
1176 	return false;
1177 
1178       b1 = BRANCH_EDGE (bb1);
1179       b2 = BRANCH_EDGE (bb2);
1180       f1 = FALLTHRU_EDGE (bb1);
1181       f2 = FALLTHRU_EDGE (bb2);
1182 
1183       /* Get around possible forwarders on fallthru edges.  Other cases
1184 	 should be optimized out already.  */
1185       if (FORWARDER_BLOCK_P (f1->dest))
1186 	f1 = single_succ_edge (f1->dest);
1187 
1188       if (FORWARDER_BLOCK_P (f2->dest))
1189 	f2 = single_succ_edge (f2->dest);
1190 
1191       /* To simplify use of this function, return false if there are
1192 	 unneeded forwarder blocks.  These will get eliminated later
1193 	 during cleanup_cfg.  */
1194       if (FORWARDER_BLOCK_P (f1->dest)
1195 	  || FORWARDER_BLOCK_P (f2->dest)
1196 	  || FORWARDER_BLOCK_P (b1->dest)
1197 	  || FORWARDER_BLOCK_P (b2->dest))
1198 	return false;
1199 
1200       if (f1->dest == f2->dest && b1->dest == b2->dest)
1201 	reverse = false;
1202       else if (f1->dest == b2->dest && b1->dest == f2->dest)
1203 	reverse = true;
1204       else
1205 	return false;
1206 
1207       set1 = pc_set (BB_END (bb1));
1208       set2 = pc_set (BB_END (bb2));
1209       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1210 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1211 	reverse = !reverse;
1212 
1213       cond1 = XEXP (SET_SRC (set1), 0);
1214       cond2 = XEXP (SET_SRC (set2), 0);
1215       code1 = GET_CODE (cond1);
1216       if (reverse)
1217 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1218       else
1219 	code2 = GET_CODE (cond2);
1220 
1221       if (code2 == UNKNOWN)
1222 	return false;
1223 
1224       /* Verify codes and operands match.  */
1225       match = ((code1 == code2
1226 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1227 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1228 	       || (code1 == swap_condition (code2)
1229 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1230 					      XEXP (cond2, 0))
1231 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1232 					      XEXP (cond2, 1))));
1233 
1234       /* If we return true, we will join the blocks.  Which means that
1235 	 we will only have one branch prediction bit to work with.  Thus
1236 	 we require the existing branches to have probabilities that are
1237 	 roughly similar.  */
1238       if (match
1239 	  && optimize_bb_for_speed_p (bb1)
1240 	  && optimize_bb_for_speed_p (bb2))
1241 	{
1242 	  int prob2;
1243 
1244 	  if (b1->dest == b2->dest)
1245 	    prob2 = b2->probability;
1246 	  else
1247 	    /* Do not use f2 probability as f2 may be forwarded.  */
1248 	    prob2 = REG_BR_PROB_BASE - b2->probability;
1249 
1250 	  /* Fail if the difference in probabilities is greater than 50%.
1251 	     This rules out two well-predicted branches with opposite
1252 	     outcomes.  */
1253 	  if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1254 	    {
1255 	      if (dump_file)
1256 		fprintf (dump_file,
1257 			 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1258 			 bb1->index, bb2->index, b1->probability, prob2);
1259 
1260 	      return false;
1261 	    }
1262 	}
1263 
1264       if (dump_file && match)
1265 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1266 		 bb1->index, bb2->index);
1267 
1268       return match;
1269     }
1270 
1271   /* Generic case - we are seeing a computed jump, table jump or trapping
1272      instruction.  */
1273 
1274   /* Check whether there are tablejumps in the end of BB1 and BB2.
1275      Return true if they are identical.  */
1276     {
1277       rtx label1, label2;
1278       rtx table1, table2;
1279 
1280       if (tablejump_p (BB_END (bb1), &label1, &table1)
1281 	  && tablejump_p (BB_END (bb2), &label2, &table2)
1282 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1283 	{
1284 	  /* The labels should never be the same rtx.  If they really are same
1285 	     the jump tables are same too. So disable crossjumping of blocks BB1
1286 	     and BB2 because when deleting the common insns in the end of BB1
1287 	     by delete_basic_block () the jump table would be deleted too.  */
1288 	  /* If LABEL2 is referenced in BB1->END do not do anything
1289 	     because we would loose information when replacing
1290 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1291 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1292 	    {
1293 	      /* Set IDENTICAL to true when the tables are identical.  */
1294 	      bool identical = false;
1295 	      rtx p1, p2;
1296 
1297 	      p1 = PATTERN (table1);
1298 	      p2 = PATTERN (table2);
1299 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1300 		{
1301 		  identical = true;
1302 		}
1303 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1304 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1305 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1306 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1307 		{
1308 		  int i;
1309 
1310 		  identical = true;
1311 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1312 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1313 		      identical = false;
1314 		}
1315 
1316 	      if (identical)
1317 		{
1318 		  replace_label_data rr;
1319 		  bool match;
1320 
1321 		  /* Temporarily replace references to LABEL1 with LABEL2
1322 		     in BB1->END so that we could compare the instructions.  */
1323 		  rr.r1 = label1;
1324 		  rr.r2 = label2;
1325 		  rr.update_label_nuses = false;
1326 		  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1327 
1328 		  match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1329 		  if (dump_file && match)
1330 		    fprintf (dump_file,
1331 			     "Tablejumps in bb %i and %i match.\n",
1332 			     bb1->index, bb2->index);
1333 
1334 		  /* Set the original label in BB1->END because when deleting
1335 		     a block whose end is a tablejump, the tablejump referenced
1336 		     from the instruction is deleted too.  */
1337 		  rr.r1 = label2;
1338 		  rr.r2 = label1;
1339 		  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1340 
1341 		  return match;
1342 		}
1343 	    }
1344 	  return false;
1345 	}
1346     }
1347 
1348   /* First ensure that the instructions match.  There may be many outgoing
1349      edges so this test is generally cheaper.  */
1350   if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1351     return false;
1352 
1353   /* Search the outgoing edges, ensure that the counts do match, find possible
1354      fallthru and exception handling edges since these needs more
1355      validation.  */
1356   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1357     return false;
1358 
1359   FOR_EACH_EDGE (e1, ei, bb1->succs)
1360     {
1361       e2 = EDGE_SUCC (bb2, ei.index);
1362 
1363       if (e1->flags & EDGE_EH)
1364 	nehedges1++;
1365 
1366       if (e2->flags & EDGE_EH)
1367 	nehedges2++;
1368 
1369       if (e1->flags & EDGE_FALLTHRU)
1370 	fallthru1 = e1;
1371       if (e2->flags & EDGE_FALLTHRU)
1372 	fallthru2 = e2;
1373     }
1374 
1375   /* If number of edges of various types does not match, fail.  */
1376   if (nehedges1 != nehedges2
1377       || (fallthru1 != 0) != (fallthru2 != 0))
1378     return false;
1379 
1380   /* fallthru edges must be forwarded to the same destination.  */
1381   if (fallthru1)
1382     {
1383       basic_block d1 = (forwarder_block_p (fallthru1->dest)
1384 			? single_succ (fallthru1->dest): fallthru1->dest);
1385       basic_block d2 = (forwarder_block_p (fallthru2->dest)
1386 			? single_succ (fallthru2->dest): fallthru2->dest);
1387 
1388       if (d1 != d2)
1389 	return false;
1390     }
1391 
1392   /* Ensure the same EH region.  */
1393   {
1394     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1395     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1396 
1397     if (!n1 && n2)
1398       return false;
1399 
1400     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1401       return false;
1402   }
1403 
1404   /* The same checks as in try_crossjump_to_edge. It is required for RTL
1405      version of sequence abstraction.  */
1406   FOR_EACH_EDGE (e1, ei, bb2->succs)
1407     {
1408       edge e2;
1409       edge_iterator ei;
1410       basic_block d1 = e1->dest;
1411 
1412       if (FORWARDER_BLOCK_P (d1))
1413         d1 = EDGE_SUCC (d1, 0)->dest;
1414 
1415       FOR_EACH_EDGE (e2, ei, bb1->succs)
1416         {
1417           basic_block d2 = e2->dest;
1418           if (FORWARDER_BLOCK_P (d2))
1419             d2 = EDGE_SUCC (d2, 0)->dest;
1420           if (d1 == d2)
1421             break;
1422         }
1423 
1424       if (!e2)
1425         return false;
1426     }
1427 
1428   return true;
1429 }
1430 
1431 /* Returns true if BB basic block has a preserve label.  */
1432 
1433 static bool
1434 block_has_preserve_label (basic_block bb)
1435 {
1436   return (bb
1437           && block_label (bb)
1438           && LABEL_PRESERVE_P (block_label (bb)));
1439 }
1440 
1441 /* E1 and E2 are edges with the same destination block.  Search their
1442    predecessors for common code.  If found, redirect control flow from
1443    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC.  */
1444 
1445 static bool
1446 try_crossjump_to_edge (int mode, edge e1, edge e2)
1447 {
1448   int nmatch;
1449   basic_block src1 = e1->src, src2 = e2->src;
1450   basic_block redirect_to, redirect_from, to_remove;
1451   rtx newpos1, newpos2;
1452   edge s;
1453   edge_iterator ei;
1454 
1455   newpos1 = newpos2 = NULL_RTX;
1456 
1457   /* If we have partitioned hot/cold basic blocks, it is a bad idea
1458      to try this optimization.
1459 
1460      Basic block partitioning may result in some jumps that appear to
1461      be optimizable (or blocks that appear to be mergeable), but which really
1462      must be left untouched (they are required to make it safely across
1463      partition boundaries).  See the comments at the top of
1464      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1465 
1466   if (flag_reorder_blocks_and_partition && reload_completed)
1467     return false;
1468 
1469   /* Search backward through forwarder blocks.  We don't need to worry
1470      about multiple entry or chained forwarders, as they will be optimized
1471      away.  We do this to look past the unconditional jump following a
1472      conditional jump that is required due to the current CFG shape.  */
1473   if (single_pred_p (src1)
1474       && FORWARDER_BLOCK_P (src1))
1475     e1 = single_pred_edge (src1), src1 = e1->src;
1476 
1477   if (single_pred_p (src2)
1478       && FORWARDER_BLOCK_P (src2))
1479     e2 = single_pred_edge (src2), src2 = e2->src;
1480 
1481   /* Nothing to do if we reach ENTRY, or a common source block.  */
1482   if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1483     return false;
1484   if (src1 == src2)
1485     return false;
1486 
1487   /* Seeing more than 1 forwarder blocks would confuse us later...  */
1488   if (FORWARDER_BLOCK_P (e1->dest)
1489       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1490     return false;
1491 
1492   if (FORWARDER_BLOCK_P (e2->dest)
1493       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1494     return false;
1495 
1496   /* Likewise with dead code (possibly newly created by the other optimizations
1497      of cfg_cleanup).  */
1498   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1499     return false;
1500 
1501   /* Look for the common insn sequence, part the first ...  */
1502   if (!outgoing_edges_match (mode, src1, src2))
1503     return false;
1504 
1505   /* ... and part the second.  */
1506   nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1507 
1508   /* Don't proceed with the crossjump unless we found a sufficient number
1509      of matching instructions or the 'from' block was totally matched
1510      (such that its predecessors will hopefully be redirected and the
1511      block removed).  */
1512   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1513       && (newpos1 != BB_HEAD (src1)))
1514     return false;
1515 
1516   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
1517   if (block_has_preserve_label (e1->dest)
1518       && (e1->flags & EDGE_ABNORMAL))
1519     return false;
1520 
1521   /* Here we know that the insns in the end of SRC1 which are common with SRC2
1522      will be deleted.
1523      If we have tablejumps in the end of SRC1 and SRC2
1524      they have been already compared for equivalence in outgoing_edges_match ()
1525      so replace the references to TABLE1 by references to TABLE2.  */
1526     {
1527       rtx label1, label2;
1528       rtx table1, table2;
1529 
1530       if (tablejump_p (BB_END (src1), &label1, &table1)
1531 	  && tablejump_p (BB_END (src2), &label2, &table2)
1532 	  && label1 != label2)
1533 	{
1534 	  replace_label_data rr;
1535 	  rtx insn;
1536 
1537 	  /* Replace references to LABEL1 with LABEL2.  */
1538 	  rr.r1 = label1;
1539 	  rr.r2 = label2;
1540 	  rr.update_label_nuses = true;
1541 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1542 	    {
1543 	      /* Do not replace the label in SRC1->END because when deleting
1544 		 a block whose end is a tablejump, the tablejump referenced
1545 		 from the instruction is deleted too.  */
1546 	      if (insn != BB_END (src1))
1547 		for_each_rtx (&insn, replace_label, &rr);
1548 	    }
1549 	}
1550     }
1551 
1552   /* Avoid splitting if possible.  We must always split when SRC2 has
1553      EH predecessor edges, or we may end up with basic blocks with both
1554      normal and EH predecessor edges.  */
1555   if (newpos2 == BB_HEAD (src2)
1556       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1557     redirect_to = src2;
1558   else
1559     {
1560       if (newpos2 == BB_HEAD (src2))
1561 	{
1562 	  /* Skip possible basic block header.  */
1563 	  if (LABEL_P (newpos2))
1564 	    newpos2 = NEXT_INSN (newpos2);
1565 	  while (DEBUG_INSN_P (newpos2))
1566 	    newpos2 = NEXT_INSN (newpos2);
1567 	  if (NOTE_P (newpos2))
1568 	    newpos2 = NEXT_INSN (newpos2);
1569 	  while (DEBUG_INSN_P (newpos2))
1570 	    newpos2 = NEXT_INSN (newpos2);
1571 	}
1572 
1573       if (dump_file)
1574 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
1575 		 src2->index, nmatch);
1576       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1577     }
1578 
1579   if (dump_file)
1580     fprintf (dump_file,
1581 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
1582 	     src1->index, src2->index, nmatch);
1583 
1584   /* We may have some registers visible through the block.  */
1585   df_set_bb_dirty (redirect_to);
1586 
1587   /* Recompute the frequencies and counts of outgoing edges.  */
1588   FOR_EACH_EDGE (s, ei, redirect_to->succs)
1589     {
1590       edge s2;
1591       edge_iterator ei;
1592       basic_block d = s->dest;
1593 
1594       if (FORWARDER_BLOCK_P (d))
1595 	d = single_succ (d);
1596 
1597       FOR_EACH_EDGE (s2, ei, src1->succs)
1598 	{
1599 	  basic_block d2 = s2->dest;
1600 	  if (FORWARDER_BLOCK_P (d2))
1601 	    d2 = single_succ (d2);
1602 	  if (d == d2)
1603 	    break;
1604 	}
1605 
1606       s->count += s2->count;
1607 
1608       /* Take care to update possible forwarder blocks.  We verified
1609 	 that there is no more than one in the chain, so we can't run
1610 	 into infinite loop.  */
1611       if (FORWARDER_BLOCK_P (s->dest))
1612 	{
1613 	  single_succ_edge (s->dest)->count += s2->count;
1614 	  s->dest->count += s2->count;
1615 	  s->dest->frequency += EDGE_FREQUENCY (s);
1616 	}
1617 
1618       if (FORWARDER_BLOCK_P (s2->dest))
1619 	{
1620 	  single_succ_edge (s2->dest)->count -= s2->count;
1621 	  if (single_succ_edge (s2->dest)->count < 0)
1622 	    single_succ_edge (s2->dest)->count = 0;
1623 	  s2->dest->count -= s2->count;
1624 	  s2->dest->frequency -= EDGE_FREQUENCY (s);
1625 	  if (s2->dest->frequency < 0)
1626 	    s2->dest->frequency = 0;
1627 	  if (s2->dest->count < 0)
1628 	    s2->dest->count = 0;
1629 	}
1630 
1631       if (!redirect_to->frequency && !src1->frequency)
1632 	s->probability = (s->probability + s2->probability) / 2;
1633       else
1634 	s->probability
1635 	  = ((s->probability * redirect_to->frequency +
1636 	      s2->probability * src1->frequency)
1637 	     / (redirect_to->frequency + src1->frequency));
1638     }
1639 
1640   /* Adjust count and frequency for the block.  An earlier jump
1641      threading pass may have left the profile in an inconsistent
1642      state (see update_bb_profile_for_threading) so we must be
1643      prepared for overflows.  */
1644   redirect_to->count += src1->count;
1645   redirect_to->frequency += src1->frequency;
1646   if (redirect_to->frequency > BB_FREQ_MAX)
1647     redirect_to->frequency = BB_FREQ_MAX;
1648   update_br_prob_note (redirect_to);
1649 
1650   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
1651 
1652   /* Skip possible basic block header.  */
1653   if (LABEL_P (newpos1))
1654     newpos1 = NEXT_INSN (newpos1);
1655 
1656   while (DEBUG_INSN_P (newpos1))
1657     newpos1 = NEXT_INSN (newpos1);
1658 
1659   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
1660     newpos1 = NEXT_INSN (newpos1);
1661 
1662   while (DEBUG_INSN_P (newpos1))
1663     newpos1 = NEXT_INSN (newpos1);
1664 
1665   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1666   to_remove = single_succ (redirect_from);
1667 
1668   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1669   delete_basic_block (to_remove);
1670 
1671   update_forwarder_flag (redirect_from);
1672   if (redirect_to != src2)
1673     update_forwarder_flag (src2);
1674 
1675   return true;
1676 }
1677 
1678 /* Search the predecessors of BB for common insn sequences.  When found,
1679    share code between them by redirecting control flow.  Return true if
1680    any changes made.  */
1681 
1682 static bool
1683 try_crossjump_bb (int mode, basic_block bb)
1684 {
1685   edge e, e2, fallthru;
1686   bool changed;
1687   unsigned max, ix, ix2;
1688   basic_block ev, ev2;
1689   edge_iterator ei;
1690 
1691   /* Nothing to do if there is not at least two incoming edges.  */
1692   if (EDGE_COUNT (bb->preds) < 2)
1693     return false;
1694 
1695   /* Don't crossjump if this block ends in a computed jump,
1696      unless we are optimizing for size.  */
1697   if (optimize_bb_for_size_p (bb)
1698       && bb != EXIT_BLOCK_PTR
1699       && computed_jump_p (BB_END (bb)))
1700     return false;
1701 
1702   /* If we are partitioning hot/cold basic blocks, we don't want to
1703      mess up unconditional or indirect jumps that cross between hot
1704      and cold sections.
1705 
1706      Basic block partitioning may result in some jumps that appear to
1707      be optimizable (or blocks that appear to be mergeable), but which really
1708      must be left untouched (they are required to make it safely across
1709      partition boundaries).  See the comments at the top of
1710      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1711 
1712   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1713 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
1714       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1715     return false;
1716 
1717   /* It is always cheapest to redirect a block that ends in a branch to
1718      a block that falls through into BB, as that adds no branches to the
1719      program.  We'll try that combination first.  */
1720   fallthru = NULL;
1721   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1722 
1723   if (EDGE_COUNT (bb->preds) > max)
1724     return false;
1725 
1726   FOR_EACH_EDGE (e, ei, bb->preds)
1727     {
1728       if (e->flags & EDGE_FALLTHRU)
1729 	{
1730 	  fallthru = e;
1731 	  break;
1732 	}
1733     }
1734 
1735   changed = false;
1736   for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1737     {
1738       e = EDGE_PRED (ev, ix);
1739       ix++;
1740 
1741       /* As noted above, first try with the fallthru predecessor (or, a
1742 	 fallthru predecessor if we are in cfglayout mode).  */
1743       if (fallthru)
1744 	{
1745 	  /* Don't combine the fallthru edge into anything else.
1746 	     If there is a match, we'll do it the other way around.  */
1747 	  if (e == fallthru)
1748 	    continue;
1749 	  /* If nothing changed since the last attempt, there is nothing
1750 	     we can do.  */
1751 	  if (!first_pass
1752 	      && (!(df_get_bb_dirty (e->src))
1753 		  && !(df_get_bb_dirty (fallthru->src))))
1754 	    continue;
1755 
1756 	  if (try_crossjump_to_edge (mode, e, fallthru))
1757 	    {
1758 	      changed = true;
1759 	      ix = 0;
1760 	      ev = bb;
1761 	      continue;
1762 	    }
1763 	}
1764 
1765       /* Non-obvious work limiting check: Recognize that we're going
1766 	 to call try_crossjump_bb on every basic block.  So if we have
1767 	 two blocks with lots of outgoing edges (a switch) and they
1768 	 share lots of common destinations, then we would do the
1769 	 cross-jump check once for each common destination.
1770 
1771 	 Now, if the blocks actually are cross-jump candidates, then
1772 	 all of their destinations will be shared.  Which means that
1773 	 we only need check them for cross-jump candidacy once.  We
1774 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1775 	 choosing to do the check from the block for which the edge
1776 	 in question is the first successor of A.  */
1777       if (EDGE_SUCC (e->src, 0) != e)
1778 	continue;
1779 
1780       for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1781 	{
1782 	  e2 = EDGE_PRED (ev2, ix2);
1783 	  ix2++;
1784 
1785 	  if (e2 == e)
1786 	    continue;
1787 
1788 	  /* We've already checked the fallthru edge above.  */
1789 	  if (e2 == fallthru)
1790 	    continue;
1791 
1792 	  /* The "first successor" check above only prevents multiple
1793 	     checks of crossjump(A,B).  In order to prevent redundant
1794 	     checks of crossjump(B,A), require that A be the block
1795 	     with the lowest index.  */
1796 	  if (e->src->index > e2->src->index)
1797 	    continue;
1798 
1799 	  /* If nothing changed since the last attempt, there is nothing
1800 	     we can do.  */
1801 	  if (!first_pass
1802 	      && (!(df_get_bb_dirty (e->src))
1803 		  && !(df_get_bb_dirty (e2->src))))
1804 	    continue;
1805 
1806 	  if (try_crossjump_to_edge (mode, e, e2))
1807 	    {
1808 	      changed = true;
1809 	      ev2 = bb;
1810 	      ix = 0;
1811 	      break;
1812 	    }
1813 	}
1814     }
1815 
1816   if (changed)
1817     crossjumps_occured = true;
1818 
1819   return changed;
1820 }
1821 
1822 /* Return true if BB contains just bb note, or bb note followed
1823    by only DEBUG_INSNs.  */
1824 
1825 static bool
1826 trivially_empty_bb_p (basic_block bb)
1827 {
1828   rtx insn = BB_END (bb);
1829 
1830   while (1)
1831     {
1832       if (insn == BB_HEAD (bb))
1833 	return true;
1834       if (!DEBUG_INSN_P (insn))
1835 	return false;
1836       insn = PREV_INSN (insn);
1837     }
1838 }
1839 
1840 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1841    instructions etc.  Return nonzero if changes were made.  */
1842 
1843 static bool
1844 try_optimize_cfg (int mode)
1845 {
1846   bool changed_overall = false;
1847   bool changed;
1848   int iterations = 0;
1849   basic_block bb, b, next;
1850 
1851   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1852     clear_bb_flags ();
1853 
1854   crossjumps_occured = false;
1855 
1856   FOR_EACH_BB (bb)
1857     update_forwarder_flag (bb);
1858 
1859   if (! targetm.cannot_modify_jumps_p ())
1860     {
1861       first_pass = true;
1862       /* Attempt to merge blocks as made possible by edge removal.  If
1863 	 a block has only one successor, and the successor has only
1864 	 one predecessor, they may be combined.  */
1865       do
1866 	{
1867 	  changed = false;
1868 	  iterations++;
1869 
1870 	  if (dump_file)
1871 	    fprintf (dump_file,
1872 		     "\n\ntry_optimize_cfg iteration %i\n\n",
1873 		     iterations);
1874 
1875 	  for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1876 	    {
1877 	      basic_block c;
1878 	      edge s;
1879 	      bool changed_here = false;
1880 
1881 	      /* Delete trivially dead basic blocks.  This is either
1882 		 blocks with no predecessors, or empty blocks with no
1883 		 successors.  However if the empty block with no
1884 		 successors is the successor of the ENTRY_BLOCK, it is
1885 		 kept.  This ensures that the ENTRY_BLOCK will have a
1886 		 successor which is a precondition for many RTL
1887 		 passes.  Empty blocks may result from expanding
1888 		 __builtin_unreachable ().  */
1889 	      if (EDGE_COUNT (b->preds) == 0
1890 		  || (EDGE_COUNT (b->succs) == 0
1891 		      && trivially_empty_bb_p (b)
1892 		      && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
1893 		{
1894 		  c = b->prev_bb;
1895 		  if (EDGE_COUNT (b->preds) > 0)
1896 		    {
1897 		      edge e;
1898 		      edge_iterator ei;
1899 
1900 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
1901 			{
1902 			  if (b->il.rtl->footer
1903 			      && BARRIER_P (b->il.rtl->footer))
1904 			    FOR_EACH_EDGE (e, ei, b->preds)
1905 			      if ((e->flags & EDGE_FALLTHRU)
1906 				  && e->src->il.rtl->footer == NULL)
1907 				{
1908 				  if (b->il.rtl->footer)
1909 				    {
1910 				      e->src->il.rtl->footer = b->il.rtl->footer;
1911 				      b->il.rtl->footer = NULL;
1912 				    }
1913 				  else
1914 				    {
1915 				      start_sequence ();
1916 				      e->src->il.rtl->footer = emit_barrier ();
1917 				      end_sequence ();
1918 				    }
1919 				}
1920 			}
1921 		      else
1922 			{
1923 			  rtx last = get_last_bb_insn (b);
1924 			  if (last && BARRIER_P (last))
1925 			    FOR_EACH_EDGE (e, ei, b->preds)
1926 			      if ((e->flags & EDGE_FALLTHRU))
1927 				emit_barrier_after (BB_END (e->src));
1928 			}
1929 		    }
1930 		  delete_basic_block (b);
1931 		  changed = true;
1932 		  /* Avoid trying to remove ENTRY_BLOCK_PTR.  */
1933 		  b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
1934 		  continue;
1935 		}
1936 
1937 	      /* Remove code labels no longer used.  */
1938 	      if (single_pred_p (b)
1939 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1940 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1941 		  && LABEL_P (BB_HEAD (b))
1942 		  /* If the previous block ends with a branch to this
1943 		     block, we can't delete the label.  Normally this
1944 		     is a condjump that is yet to be simplified, but
1945 		     if CASE_DROPS_THRU, this can be a tablejump with
1946 		     some element going to the same place as the
1947 		     default (fallthru).  */
1948 		  && (single_pred (b) == ENTRY_BLOCK_PTR
1949 		      || !JUMP_P (BB_END (single_pred (b)))
1950 		      || ! label_is_jump_target_p (BB_HEAD (b),
1951 						   BB_END (single_pred (b)))))
1952 		{
1953 		  rtx label = BB_HEAD (b);
1954 
1955 		  delete_insn_chain (label, label, false);
1956 		  /* If the case label is undeletable, move it after the
1957 		     BASIC_BLOCK note.  */
1958 		  if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1959 		    {
1960 		      rtx bb_note = NEXT_INSN (BB_HEAD (b));
1961 
1962 		      reorder_insns_nobb (label, label, bb_note);
1963 		      BB_HEAD (b) = bb_note;
1964 		      if (BB_END (b) == bb_note)
1965 			BB_END (b) = label;
1966 		    }
1967 		  if (dump_file)
1968 		    fprintf (dump_file, "Deleted label in block %i.\n",
1969 			     b->index);
1970 		}
1971 
1972 	      /* If we fall through an empty block, we can remove it.  */
1973 	      if (!(mode & CLEANUP_CFGLAYOUT)
1974 		  && single_pred_p (b)
1975 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1976 		  && !LABEL_P (BB_HEAD (b))
1977 		  && FORWARDER_BLOCK_P (b)
1978 		  /* Note that forwarder_block_p true ensures that
1979 		     there is a successor for this block.  */
1980 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
1981 		  && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
1982 		{
1983 		  if (dump_file)
1984 		    fprintf (dump_file,
1985 			     "Deleting fallthru block %i.\n",
1986 			     b->index);
1987 
1988 		  c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1989 		  redirect_edge_succ_nodup (single_pred_edge (b),
1990 					    single_succ (b));
1991 		  delete_basic_block (b);
1992 		  changed = true;
1993 		  b = c;
1994 		  continue;
1995 		}
1996 
1997 	      if (single_succ_p (b)
1998 		  && (s = single_succ_edge (b))
1999 		  && !(s->flags & EDGE_COMPLEX)
2000 		  && (c = s->dest) != EXIT_BLOCK_PTR
2001 		  && single_pred_p (c)
2002 		  && b != c)
2003 		{
2004 		  /* When not in cfg_layout mode use code aware of reordering
2005 		     INSN.  This code possibly creates new basic blocks so it
2006 		     does not fit merge_blocks interface and is kept here in
2007 		     hope that it will become useless once more of compiler
2008 		     is transformed to use cfg_layout mode.  */
2009 
2010 		  if ((mode & CLEANUP_CFGLAYOUT)
2011 		      && can_merge_blocks_p (b, c))
2012 		    {
2013 		      merge_blocks (b, c);
2014 		      update_forwarder_flag (b);
2015 		      changed_here = true;
2016 		    }
2017 		  else if (!(mode & CLEANUP_CFGLAYOUT)
2018 			   /* If the jump insn has side effects,
2019 			      we can't kill the edge.  */
2020 			   && (!JUMP_P (BB_END (b))
2021 			       || (reload_completed
2022 				   ? simplejump_p (BB_END (b))
2023 				   : (onlyjump_p (BB_END (b))
2024 				      && !tablejump_p (BB_END (b),
2025 						       NULL, NULL))))
2026 			   && (next = merge_blocks_move (s, b, c, mode)))
2027 		      {
2028 			b = next;
2029 			changed_here = true;
2030 		      }
2031 		}
2032 
2033 	      /* Simplify branch over branch.  */
2034 	      if ((mode & CLEANUP_EXPENSIVE)
2035 		   && !(mode & CLEANUP_CFGLAYOUT)
2036 		   && try_simplify_condjump (b))
2037 		changed_here = true;
2038 
2039 	      /* If B has a single outgoing edge, but uses a
2040 		 non-trivial jump instruction without side-effects, we
2041 		 can either delete the jump entirely, or replace it
2042 		 with a simple unconditional jump.  */
2043 	      if (single_succ_p (b)
2044 		  && single_succ (b) != EXIT_BLOCK_PTR
2045 		  && onlyjump_p (BB_END (b))
2046 		  && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2047 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
2048 						     single_succ (b),
2049 						     (mode & CLEANUP_CFGLAYOUT) != 0))
2050 		{
2051 		  update_forwarder_flag (b);
2052 		  changed_here = true;
2053 		}
2054 
2055 	      /* Simplify branch to branch.  */
2056 	      if (try_forward_edges (mode, b))
2057 		changed_here = true;
2058 
2059 	      /* Look for shared code between blocks.  */
2060 	      if ((mode & CLEANUP_CROSSJUMP)
2061 		  && try_crossjump_bb (mode, b))
2062 		changed_here = true;
2063 
2064 	      /* Don't get confused by the index shift caused by
2065 		 deleting blocks.  */
2066 	      if (!changed_here)
2067 		b = b->next_bb;
2068 	      else
2069 		changed = true;
2070 	    }
2071 
2072 	  if ((mode & CLEANUP_CROSSJUMP)
2073 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2074 	    changed = true;
2075 
2076 #ifdef ENABLE_CHECKING
2077 	  if (changed)
2078 	    verify_flow_info ();
2079 #endif
2080 
2081 	  changed_overall |= changed;
2082 	  first_pass = false;
2083 	}
2084       while (changed);
2085     }
2086 
2087   FOR_ALL_BB (b)
2088     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2089 
2090   return changed_overall;
2091 }
2092 
2093 /* Delete all unreachable basic blocks.  */
2094 
2095 bool
2096 delete_unreachable_blocks (void)
2097 {
2098   bool changed = false;
2099   basic_block b, prev_bb;
2100 
2101   find_unreachable_blocks ();
2102 
2103   /* When we're in GIMPLE mode and there may be debug insns, we should
2104      delete blocks in reverse dominator order, so as to get a chance
2105      to substitute all released DEFs into debug stmts.  If we don't
2106      have dominators information, walking blocks backward gets us a
2107      better chance of retaining most debug information than
2108      otherwise.  */
2109   if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE
2110       && dom_info_available_p (CDI_DOMINATORS))
2111     {
2112       for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2113 	{
2114 	  prev_bb = b->prev_bb;
2115 
2116 	  if (!(b->flags & BB_REACHABLE))
2117 	    {
2118 	      /* Speed up the removal of blocks that don't dominate
2119 		 others.  Walking backwards, this should be the common
2120 		 case.  */
2121 	      if (!first_dom_son (CDI_DOMINATORS, b))
2122 		delete_basic_block (b);
2123 	      else
2124 		{
2125 		  VEC (basic_block, heap) *h
2126 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
2127 
2128 		  while (VEC_length (basic_block, h))
2129 		    {
2130 		      b = VEC_pop (basic_block, h);
2131 
2132 		      prev_bb = b->prev_bb;
2133 
2134 		      gcc_assert (!(b->flags & BB_REACHABLE));
2135 
2136 		      delete_basic_block (b);
2137 		    }
2138 
2139 		  VEC_free (basic_block, heap, h);
2140 		}
2141 
2142 	      changed = true;
2143 	    }
2144 	}
2145     }
2146   else
2147     {
2148       for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2149 	{
2150 	  prev_bb = b->prev_bb;
2151 
2152 	  if (!(b->flags & BB_REACHABLE))
2153 	    {
2154 	      delete_basic_block (b);
2155 	      changed = true;
2156 	    }
2157 	}
2158     }
2159 
2160   if (changed)
2161     tidy_fallthru_edges ();
2162   return changed;
2163 }
2164 
2165 /* Delete any jump tables never referenced.  We can't delete them at the
2166    time of removing tablejump insn as they are referenced by the preceding
2167    insns computing the destination, so we delay deleting and garbagecollect
2168    them once life information is computed.  */
2169 void
2170 delete_dead_jumptables (void)
2171 {
2172   basic_block bb;
2173 
2174   /* A dead jump table does not belong to any basic block.  Scan insns
2175      between two adjacent basic blocks.  */
2176   FOR_EACH_BB (bb)
2177     {
2178       rtx insn, next;
2179 
2180       for (insn = NEXT_INSN (BB_END (bb));
2181 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2182 	   insn = next)
2183 	{
2184 	  next = NEXT_INSN (insn);
2185 	  if (LABEL_P (insn)
2186 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2187 	      && JUMP_TABLE_DATA_P (next))
2188 	    {
2189 	      rtx label = insn, jump = next;
2190 
2191 	      if (dump_file)
2192 		fprintf (dump_file, "Dead jumptable %i removed\n",
2193 			 INSN_UID (insn));
2194 
2195 	      next = NEXT_INSN (next);
2196 	      delete_insn (jump);
2197 	      delete_insn (label);
2198 	    }
2199 	}
2200     }
2201 }
2202 
2203 
2204 /* Tidy the CFG by deleting unreachable code and whatnot.  */
2205 
2206 bool
2207 cleanup_cfg (int mode)
2208 {
2209   bool changed = false;
2210 
2211   /* Set the cfglayout mode flag here.  We could update all the callers
2212      but that is just inconvenient, especially given that we eventually
2213      want to have cfglayout mode as the default.  */
2214   if (current_ir_type () == IR_RTL_CFGLAYOUT)
2215     mode |= CLEANUP_CFGLAYOUT;
2216 
2217   timevar_push (TV_CLEANUP_CFG);
2218   if (delete_unreachable_blocks ())
2219     {
2220       changed = true;
2221       /* We've possibly created trivially dead code.  Cleanup it right
2222 	 now to introduce more opportunities for try_optimize_cfg.  */
2223       if (!(mode & (CLEANUP_NO_INSN_DEL))
2224 	  && !reload_completed)
2225 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
2226     }
2227 
2228   compact_blocks ();
2229 
2230   /* To tail-merge blocks ending in the same noreturn function (e.g.
2231      a call to abort) we have to insert fake edges to exit.  Do this
2232      here once.  The fake edges do not interfere with any other CFG
2233      cleanups.  */
2234   if (mode & CLEANUP_CROSSJUMP)
2235     add_noreturn_fake_exit_edges ();
2236 
2237   if (!dbg_cnt (cfg_cleanup))
2238     return changed;
2239 
2240   while (try_optimize_cfg (mode))
2241     {
2242       delete_unreachable_blocks (), changed = true;
2243       if (!(mode & CLEANUP_NO_INSN_DEL))
2244 	{
2245 	  /* Try to remove some trivially dead insns when doing an expensive
2246 	     cleanup.  But delete_trivially_dead_insns doesn't work after
2247 	     reload (it only handles pseudos) and run_fast_dce is too costly
2248 	     to run in every iteration.
2249 
2250 	     For effective cross jumping, we really want to run a fast DCE to
2251 	     clean up any dead conditions, or they get in the way of performing
2252 	     useful tail merges.
2253 
2254 	     Other transformations in cleanup_cfg are not so sensitive to dead
2255 	     code, so delete_trivially_dead_insns or even doing nothing at all
2256 	     is good enough.  */
2257 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
2258 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2259 	    break;
2260 	  else if ((mode & CLEANUP_CROSSJUMP)
2261 		   && crossjumps_occured)
2262 	    run_fast_dce ();
2263 	}
2264       else
2265 	break;
2266     }
2267 
2268   if (mode & CLEANUP_CROSSJUMP)
2269     remove_fake_exit_edges ();
2270 
2271   /* Don't call delete_dead_jumptables in cfglayout mode, because
2272      that function assumes that jump tables are in the insns stream.
2273      But we also don't _have_ to delete dead jumptables in cfglayout
2274      mode because we shouldn't even be looking at things that are
2275      not in a basic block.  Dead jumptables are cleaned up when
2276      going out of cfglayout mode.  */
2277   if (!(mode & CLEANUP_CFGLAYOUT))
2278     delete_dead_jumptables ();
2279 
2280   timevar_pop (TV_CLEANUP_CFG);
2281 
2282   return changed;
2283 }
2284 
2285 static unsigned int
2286 rest_of_handle_jump (void)
2287 {
2288   if (crtl->tail_call_emit)
2289     fixup_tail_calls ();
2290   return 0;
2291 }
2292 
2293 struct rtl_opt_pass pass_jump =
2294 {
2295  {
2296   RTL_PASS,
2297   "sibling",                            /* name */
2298   NULL,                                 /* gate */
2299   rest_of_handle_jump,			/* execute */
2300   NULL,                                 /* sub */
2301   NULL,                                 /* next */
2302   0,                                    /* static_pass_number */
2303   TV_JUMP,                              /* tv_id */
2304   0,                                    /* properties_required */
2305   0,                                    /* properties_provided */
2306   0,                                    /* properties_destroyed */
2307   TODO_ggc_collect,                     /* todo_flags_start */
2308   TODO_verify_flow,                     /* todo_flags_finish */
2309  }
2310 };
2311 
2312 
2313 static unsigned int
2314 rest_of_handle_jump2 (void)
2315 {
2316   delete_trivially_dead_insns (get_insns (), max_reg_num ());
2317   if (dump_file)
2318     dump_flow_info (dump_file, dump_flags);
2319   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2320 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2321   return 0;
2322 }
2323 
2324 
2325 struct rtl_opt_pass pass_jump2 =
2326 {
2327  {
2328   RTL_PASS,
2329   "jump",                               /* name */
2330   NULL,                                 /* gate */
2331   rest_of_handle_jump2,			/* execute */
2332   NULL,                                 /* sub */
2333   NULL,                                 /* next */
2334   0,                                    /* static_pass_number */
2335   TV_JUMP,                              /* tv_id */
2336   0,                                    /* properties_required */
2337   0,                                    /* properties_provided */
2338   0,                                    /* properties_destroyed */
2339   TODO_ggc_collect,                     /* todo_flags_start */
2340   TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
2341  }
2342 };
2343 
2344 
2345