xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/cfgcleanup.c (revision 4f645668ed707e1f969c546666f8c8e45e6f8888)
1 /* Control flow optimization code for GNU compiler.
2    Copyright (C) 1987-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This file contains optimizer of the control flow.  The main entry point is
21    cleanup_cfg.  Following optimizations are performed:
22 
23    - Unreachable blocks removal
24    - Edge forwarding (edge to the forwarder block is forwarded to its
25      successor.  Simplification of the branch instruction is performed by
26      underlying infrastructure so branch can be converted to simplejump or
27      eliminated).
28    - Cross jumping (tail merging)
29    - Conditional jump-around-simplejump simplification
30    - Basic block merging.  */
31 
32 #include "config.h"
33 #include "system.h"
34 #include "coretypes.h"
35 #include "backend.h"
36 #include "target.h"
37 #include "rtl.h"
38 #include "tree.h"
39 #include "cfghooks.h"
40 #include "df.h"
41 #include "memmodel.h"
42 #include "tm_p.h"
43 #include "insn-config.h"
44 #include "emit-rtl.h"
45 #include "cselib.h"
46 #include "params.h"
47 #include "tree-pass.h"
48 #include "cfgloop.h"
49 #include "cfgrtl.h"
50 #include "cfganal.h"
51 #include "cfgbuild.h"
52 #include "cfgcleanup.h"
53 #include "dce.h"
54 #include "dbgcnt.h"
55 #include "rtl-iter.h"
56 #include "regs.h"
57 
58 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
59 
60 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
61 static bool first_pass;
62 
63 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg.  */
64 static bool crossjumps_occurred;
65 
66 /* Set to true if we couldn't run an optimization due to stale liveness
67    information; we should run df_analyze to enable more opportunities.  */
68 static bool block_was_dirty;
69 
70 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
71 static bool try_crossjump_bb (int, basic_block);
72 static bool outgoing_edges_match (int, basic_block, basic_block);
73 static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
74 
75 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
76 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
77 static bool try_optimize_cfg (int);
78 static bool try_simplify_condjump (basic_block);
79 static bool try_forward_edges (int, basic_block);
80 static edge thread_jump (edge, basic_block);
81 static bool mark_effect (rtx, bitmap);
82 static void notice_new_block (basic_block);
83 static void update_forwarder_flag (basic_block);
84 static void merge_memattrs (rtx, rtx);
85 
86 /* Set flags for newly created block.  */
87 
88 static void
89 notice_new_block (basic_block bb)
90 {
91   if (!bb)
92     return;
93 
94   if (forwarder_block_p (bb))
95     bb->flags |= BB_FORWARDER_BLOCK;
96 }
97 
98 /* Recompute forwarder flag after block has been modified.  */
99 
100 static void
101 update_forwarder_flag (basic_block bb)
102 {
103   if (forwarder_block_p (bb))
104     bb->flags |= BB_FORWARDER_BLOCK;
105   else
106     bb->flags &= ~BB_FORWARDER_BLOCK;
107 }
108 
109 /* Simplify a conditional jump around an unconditional jump.
110    Return true if something changed.  */
111 
112 static bool
113 try_simplify_condjump (basic_block cbranch_block)
114 {
115   basic_block jump_block, jump_dest_block, cbranch_dest_block;
116   edge cbranch_jump_edge, cbranch_fallthru_edge;
117   rtx_insn *cbranch_insn;
118 
119   /* Verify that there are exactly two successors.  */
120   if (EDGE_COUNT (cbranch_block->succs) != 2)
121     return false;
122 
123   /* Verify that we've got a normal conditional branch at the end
124      of the block.  */
125   cbranch_insn = BB_END (cbranch_block);
126   if (!any_condjump_p (cbranch_insn))
127     return false;
128 
129   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
130   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
131 
132   /* The next block must not have multiple predecessors, must not
133      be the last block in the function, and must contain just the
134      unconditional jump.  */
135   jump_block = cbranch_fallthru_edge->dest;
136   if (!single_pred_p (jump_block)
137       || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
138       || !FORWARDER_BLOCK_P (jump_block))
139     return false;
140   jump_dest_block = single_succ (jump_block);
141 
142   /* If we are partitioning hot/cold basic blocks, we don't want to
143      mess up unconditional or indirect jumps that cross between hot
144      and cold sections.
145 
146      Basic block partitioning may result in some jumps that appear to
147      be optimizable (or blocks that appear to be mergeable), but which really
148      must be left untouched (they are required to make it safely across
149      partition boundaries).  See the comments at the top of
150      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
151 
152   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
153       || (cbranch_jump_edge->flags & EDGE_CROSSING))
154     return false;
155 
156   /* The conditional branch must target the block after the
157      unconditional branch.  */
158   cbranch_dest_block = cbranch_jump_edge->dest;
159 
160   if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
161       || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
162       || !can_fallthru (jump_block, cbranch_dest_block))
163     return false;
164 
165   /* Invert the conditional branch.  */
166   if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
167 		    block_label (jump_dest_block), 0))
168     return false;
169 
170   if (dump_file)
171     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
172 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
173 
174   /* Success.  Update the CFG to match.  Note that after this point
175      the edge variable names appear backwards; the redirection is done
176      this way to preserve edge profile data.  */
177   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
178 						cbranch_dest_block);
179   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
180 						    jump_dest_block);
181   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
182   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
183   update_br_prob_note (cbranch_block);
184 
185   /* Delete the block with the unconditional jump, and clean up the mess.  */
186   delete_basic_block (jump_block);
187   tidy_fallthru_edge (cbranch_jump_edge);
188   update_forwarder_flag (cbranch_block);
189 
190   return true;
191 }
192 
193 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
194    on register.  Used by jump threading.  */
195 
196 static bool
197 mark_effect (rtx exp, regset nonequal)
198 {
199   rtx dest;
200   switch (GET_CODE (exp))
201     {
202       /* In case we do clobber the register, mark it as equal, as we know the
203 	 value is dead so it don't have to match.  */
204     case CLOBBER:
205       dest = XEXP (exp, 0);
206       if (REG_P (dest))
207 	bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
208       return false;
209 
210     case SET:
211       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
212 	return false;
213       dest = SET_DEST (exp);
214       if (dest == pc_rtx)
215 	return false;
216       if (!REG_P (dest))
217 	return true;
218       bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
219       return false;
220 
221     default:
222       return false;
223     }
224 }
225 
226 /* Return true if X contains a register in NONEQUAL.  */
227 static bool
228 mentions_nonequal_regs (const_rtx x, regset nonequal)
229 {
230   subrtx_iterator::array_type array;
231   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
232     {
233       const_rtx x = *iter;
234       if (REG_P (x))
235 	{
236 	  unsigned int end_regno = END_REGNO (x);
237 	  for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
238 	    if (REGNO_REG_SET_P (nonequal, regno))
239 	      return true;
240 	}
241     }
242   return false;
243 }
244 
245 /* Attempt to prove that the basic block B will have no side effects and
246    always continues in the same edge if reached via E.  Return the edge
247    if exist, NULL otherwise.  */
248 
249 static edge
250 thread_jump (edge e, basic_block b)
251 {
252   rtx set1, set2, cond1, cond2;
253   rtx_insn *insn;
254   enum rtx_code code1, code2, reversed_code2;
255   bool reverse1 = false;
256   unsigned i;
257   regset nonequal;
258   bool failed = false;
259   reg_set_iterator rsi;
260 
261   /* Jump threading may cause fixup_partitions to introduce new crossing edges,
262      which is not allowed after reload.  */
263   gcc_checking_assert (!reload_completed || !crtl->has_bb_partition);
264 
265   if (b->flags & BB_NONTHREADABLE_BLOCK)
266     return NULL;
267 
268   /* At the moment, we do handle only conditional jumps, but later we may
269      want to extend this code to tablejumps and others.  */
270   if (EDGE_COUNT (e->src->succs) != 2)
271     return NULL;
272   if (EDGE_COUNT (b->succs) != 2)
273     {
274       b->flags |= BB_NONTHREADABLE_BLOCK;
275       return NULL;
276     }
277 
278   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
279   if (!any_condjump_p (BB_END (e->src)))
280     return NULL;
281 
282   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
283     {
284       b->flags |= BB_NONTHREADABLE_BLOCK;
285       return NULL;
286     }
287 
288   set1 = pc_set (BB_END (e->src));
289   set2 = pc_set (BB_END (b));
290   if (((e->flags & EDGE_FALLTHRU) != 0)
291       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
292     reverse1 = true;
293 
294   cond1 = XEXP (SET_SRC (set1), 0);
295   cond2 = XEXP (SET_SRC (set2), 0);
296   if (reverse1)
297     code1 = reversed_comparison_code (cond1, BB_END (e->src));
298   else
299     code1 = GET_CODE (cond1);
300 
301   code2 = GET_CODE (cond2);
302   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
303 
304   if (!comparison_dominates_p (code1, code2)
305       && !comparison_dominates_p (code1, reversed_code2))
306     return NULL;
307 
308   /* Ensure that the comparison operators are equivalent.
309      ??? This is far too pessimistic.  We should allow swapped operands,
310      different CCmodes, or for example comparisons for interval, that
311      dominate even when operands are not equivalent.  */
312   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
313       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
314     return NULL;
315 
316   /* Punt if BB_END (e->src) is doloop-like conditional jump that modifies
317      the registers used in cond1.  */
318   if (modified_in_p (cond1, BB_END (e->src)))
319     return NULL;
320 
321   /* Short circuit cases where block B contains some side effects, as we can't
322      safely bypass it.  */
323   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
324        insn = NEXT_INSN (insn))
325     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
326       {
327 	b->flags |= BB_NONTHREADABLE_BLOCK;
328 	return NULL;
329       }
330 
331   cselib_init (0);
332 
333   /* First process all values computed in the source basic block.  */
334   for (insn = NEXT_INSN (BB_HEAD (e->src));
335        insn != NEXT_INSN (BB_END (e->src));
336        insn = NEXT_INSN (insn))
337     if (INSN_P (insn))
338       cselib_process_insn (insn);
339 
340   nonequal = BITMAP_ALLOC (NULL);
341   CLEAR_REG_SET (nonequal);
342 
343   /* Now assume that we've continued by the edge E to B and continue
344      processing as if it were same basic block.
345      Our goal is to prove that whole block is an NOOP.  */
346 
347   for (insn = NEXT_INSN (BB_HEAD (b));
348        insn != NEXT_INSN (BB_END (b)) && !failed;
349        insn = NEXT_INSN (insn))
350     {
351       /* cond2 must not mention any register that is not equal to the
352 	 former block.  Check this before processing that instruction,
353 	 as BB_END (b) could contain also clobbers.  */
354       if (insn == BB_END (b)
355 	  && mentions_nonequal_regs (cond2, nonequal))
356 	goto failed_exit;
357 
358       if (INSN_P (insn))
359 	{
360 	  rtx pat = PATTERN (insn);
361 
362 	  if (GET_CODE (pat) == PARALLEL)
363 	    {
364 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
365 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
366 	    }
367 	  else
368 	    failed |= mark_effect (pat, nonequal);
369 	}
370 
371       cselib_process_insn (insn);
372     }
373 
374   /* Later we should clear nonequal of dead registers.  So far we don't
375      have life information in cfg_cleanup.  */
376   if (failed)
377     {
378       b->flags |= BB_NONTHREADABLE_BLOCK;
379       goto failed_exit;
380     }
381 
382   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
383     goto failed_exit;
384 
385   BITMAP_FREE (nonequal);
386   cselib_finish ();
387   if ((comparison_dominates_p (code1, code2) != 0)
388       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
389     return BRANCH_EDGE (b);
390   else
391     return FALLTHRU_EDGE (b);
392 
393 failed_exit:
394   BITMAP_FREE (nonequal);
395   cselib_finish ();
396   return NULL;
397 }
398 
399 /* Attempt to forward edges leaving basic block B.
400    Return true if successful.  */
401 
402 static bool
403 try_forward_edges (int mode, basic_block b)
404 {
405   bool changed = false;
406   edge_iterator ei;
407   edge e, *threaded_edges = NULL;
408 
409   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
410     {
411       basic_block target, first;
412       location_t goto_locus;
413       int counter;
414       bool threaded = false;
415       int nthreaded_edges = 0;
416       bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
417       bool new_target_threaded = false;
418 
419       /* Skip complex edges because we don't know how to update them.
420 
421 	 Still handle fallthru edges, as we can succeed to forward fallthru
422 	 edge to the same place as the branch edge of conditional branch
423 	 and turn conditional branch to an unconditional branch.  */
424       if (e->flags & EDGE_COMPLEX)
425 	{
426 	  ei_next (&ei);
427 	  continue;
428 	}
429 
430       target = first = e->dest;
431       counter = NUM_FIXED_BLOCKS;
432       goto_locus = e->goto_locus;
433 
434       while (counter < n_basic_blocks_for_fn (cfun))
435 	{
436 	  basic_block new_target = NULL;
437 	  may_thread |= (target->flags & BB_MODIFIED) != 0;
438 
439 	  if (FORWARDER_BLOCK_P (target)
440 	      && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
441 	    {
442 	      /* Bypass trivial infinite loops.  */
443 	      new_target = single_succ (target);
444 	      if (target == new_target)
445 		counter = n_basic_blocks_for_fn (cfun);
446 	      else if (!optimize)
447 		{
448 		  /* When not optimizing, ensure that edges or forwarder
449 		     blocks with different locus are not optimized out.  */
450 		  location_t new_locus = single_succ_edge (target)->goto_locus;
451 		  location_t locus = goto_locus;
452 
453 		  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
454 		      && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
455 		      && new_locus != locus)
456 		    new_target = NULL;
457 		  else
458 		    {
459 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
460 			locus = new_locus;
461 
462 		      rtx_insn *last = BB_END (target);
463 		      if (DEBUG_INSN_P (last))
464 			last = prev_nondebug_insn (last);
465 		      if (last && INSN_P (last))
466 			new_locus = INSN_LOCATION (last);
467 		      else
468 			new_locus = UNKNOWN_LOCATION;
469 
470 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
471 			  && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
472 			  && new_locus != locus)
473 			new_target = NULL;
474 		      else
475 			{
476 			  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
477 			    locus = new_locus;
478 
479 			  goto_locus = locus;
480 			}
481 		    }
482 		}
483 	    }
484 
485 	  /* Allow to thread only over one edge at time to simplify updating
486 	     of probabilities.  */
487 	  else if ((mode & CLEANUP_THREADING) && may_thread)
488 	    {
489 	      edge t = thread_jump (e, target);
490 	      if (t)
491 		{
492 		  if (!threaded_edges)
493 		    threaded_edges = XNEWVEC (edge,
494 					      n_basic_blocks_for_fn (cfun));
495 		  else
496 		    {
497 		      int i;
498 
499 		      /* Detect an infinite loop across blocks not
500 			 including the start block.  */
501 		      for (i = 0; i < nthreaded_edges; ++i)
502 			if (threaded_edges[i] == t)
503 			  break;
504 		      if (i < nthreaded_edges)
505 			{
506 			  counter = n_basic_blocks_for_fn (cfun);
507 			  break;
508 			}
509 		    }
510 
511 		  /* Detect an infinite loop across the start block.  */
512 		  if (t->dest == b)
513 		    break;
514 
515 		  gcc_assert (nthreaded_edges
516 			      < (n_basic_blocks_for_fn (cfun)
517 				 - NUM_FIXED_BLOCKS));
518 		  threaded_edges[nthreaded_edges++] = t;
519 
520 		  new_target = t->dest;
521 		  new_target_threaded = true;
522 		}
523 	    }
524 
525 	  if (!new_target)
526 	    break;
527 
528 	  counter++;
529 	  /* Do not turn non-crossing jump to crossing.  Depending on target
530 	     it may require different instruction pattern.  */
531 	  if ((e->flags & EDGE_CROSSING)
532 	      || BB_PARTITION (first) == BB_PARTITION (new_target))
533 	    {
534 	      target = new_target;
535 	      threaded |= new_target_threaded;
536 	    }
537 	}
538 
539       if (counter >= n_basic_blocks_for_fn (cfun))
540 	{
541 	  if (dump_file)
542 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
543 		     target->index);
544 	}
545       else if (target == first)
546 	; /* We didn't do anything.  */
547       else
548 	{
549 	  /* Save the values now, as the edge may get removed.  */
550 	  profile_count edge_count = e->count ();
551 	  int n = 0;
552 
553 	  e->goto_locus = goto_locus;
554 
555 	  /* Don't force if target is exit block.  */
556 	  if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
557 	    {
558 	      notice_new_block (redirect_edge_and_branch_force (e, target));
559 	      if (dump_file)
560 		fprintf (dump_file, "Conditionals threaded.\n");
561 	    }
562 	  else if (!redirect_edge_and_branch (e, target))
563 	    {
564 	      if (dump_file)
565 		fprintf (dump_file,
566 			 "Forwarding edge %i->%i to %i failed.\n",
567 			 b->index, e->dest->index, target->index);
568 	      ei_next (&ei);
569 	      continue;
570 	    }
571 
572 	  /* We successfully forwarded the edge.  Now update profile
573 	     data: for each edge we traversed in the chain, remove
574 	     the original edge's execution count.  */
575 	  do
576 	    {
577 	      edge t;
578 
579 	      if (!single_succ_p (first))
580 		{
581 		  gcc_assert (n < nthreaded_edges);
582 		  t = threaded_edges [n++];
583 		  gcc_assert (t->src == first);
584 		  update_bb_profile_for_threading (first, edge_count, t);
585 		  update_br_prob_note (first);
586 		}
587 	      else
588 		{
589 		  first->count -= edge_count;
590 		  /* It is possible that as the result of
591 		     threading we've removed edge as it is
592 		     threaded to the fallthru edge.  Avoid
593 		     getting out of sync.  */
594 		  if (n < nthreaded_edges
595 		      && first == threaded_edges [n]->src)
596 		    n++;
597 		  t = single_succ_edge (first);
598 		}
599 
600 	      first = t->dest;
601 	    }
602 	  while (first != target);
603 
604 	  changed = true;
605 	  continue;
606 	}
607       ei_next (&ei);
608     }
609 
610   free (threaded_edges);
611   return changed;
612 }
613 
614 
615 /* Blocks A and B are to be merged into a single block.  A has no incoming
616    fallthru edge, so it can be moved before B without adding or modifying
617    any jumps (aside from the jump from A to B).  */
618 
619 static void
620 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
621 {
622   rtx_insn *barrier;
623 
624   /* If we are partitioning hot/cold basic blocks, we don't want to
625      mess up unconditional or indirect jumps that cross between hot
626      and cold sections.
627 
628      Basic block partitioning may result in some jumps that appear to
629      be optimizable (or blocks that appear to be mergeable), but which really
630      must be left untouched (they are required to make it safely across
631      partition boundaries).  See the comments at the top of
632      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
633 
634   if (BB_PARTITION (a) != BB_PARTITION (b))
635     return;
636 
637   barrier = next_nonnote_insn (BB_END (a));
638   gcc_assert (BARRIER_P (barrier));
639   delete_insn (barrier);
640 
641   /* Scramble the insn chain.  */
642   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
643     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
644   df_set_bb_dirty (a);
645 
646   if (dump_file)
647     fprintf (dump_file, "Moved block %d before %d and merged.\n",
648 	     a->index, b->index);
649 
650   /* Swap the records for the two blocks around.  */
651 
652   unlink_block (a);
653   link_block (a, b->prev_bb);
654 
655   /* Now blocks A and B are contiguous.  Merge them.  */
656   merge_blocks (a, b);
657 }
658 
659 /* Blocks A and B are to be merged into a single block.  B has no outgoing
660    fallthru edge, so it can be moved after A without adding or modifying
661    any jumps (aside from the jump from A to B).  */
662 
663 static void
664 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
665 {
666   rtx_insn *barrier, *real_b_end;
667   rtx_insn *label;
668   rtx_jump_table_data *table;
669 
670   /* If we are partitioning hot/cold basic blocks, we don't want to
671      mess up unconditional or indirect jumps that cross between hot
672      and cold sections.
673 
674      Basic block partitioning may result in some jumps that appear to
675      be optimizable (or blocks that appear to be mergeable), but which really
676      must be left untouched (they are required to make it safely across
677      partition boundaries).  See the comments at the top of
678      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
679 
680   if (BB_PARTITION (a) != BB_PARTITION (b))
681     return;
682 
683   real_b_end = BB_END (b);
684 
685   /* If there is a jump table following block B temporarily add the jump table
686      to block B so that it will also be moved to the correct location.  */
687   if (tablejump_p (BB_END (b), &label, &table)
688       && prev_active_insn (label) == BB_END (b))
689     {
690       BB_END (b) = table;
691     }
692 
693   /* There had better have been a barrier there.  Delete it.  */
694   barrier = NEXT_INSN (BB_END (b));
695   if (barrier && BARRIER_P (barrier))
696     delete_insn (barrier);
697 
698 
699   /* Scramble the insn chain.  */
700   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
701 
702   /* Restore the real end of b.  */
703   BB_END (b) = real_b_end;
704 
705   if (dump_file)
706     fprintf (dump_file, "Moved block %d after %d and merged.\n",
707 	     b->index, a->index);
708 
709   /* Now blocks A and B are contiguous.  Merge them.  */
710   merge_blocks (a, b);
711 }
712 
713 /* Attempt to merge basic blocks that are potentially non-adjacent.
714    Return NULL iff the attempt failed, otherwise return basic block
715    where cleanup_cfg should continue.  Because the merging commonly
716    moves basic block away or introduces another optimization
717    possibility, return basic block just before B so cleanup_cfg don't
718    need to iterate.
719 
720    It may be good idea to return basic block before C in the case
721    C has been moved after B and originally appeared earlier in the
722    insn sequence, but we have no information available about the
723    relative ordering of these two.  Hopefully it is not too common.  */
724 
725 static basic_block
726 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
727 {
728   basic_block next;
729 
730   /* If we are partitioning hot/cold basic blocks, we don't want to
731      mess up unconditional or indirect jumps that cross between hot
732      and cold sections.
733 
734      Basic block partitioning may result in some jumps that appear to
735      be optimizable (or blocks that appear to be mergeable), but which really
736      must be left untouched (they are required to make it safely across
737      partition boundaries).  See the comments at the top of
738      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
739 
740   if (BB_PARTITION (b) != BB_PARTITION (c))
741     return NULL;
742 
743   /* If B has a fallthru edge to C, no need to move anything.  */
744   if (e->flags & EDGE_FALLTHRU)
745     {
746       int b_index = b->index, c_index = c->index;
747 
748       /* Protect the loop latches.  */
749       if (current_loops && c->loop_father->latch == c)
750 	return NULL;
751 
752       merge_blocks (b, c);
753       update_forwarder_flag (b);
754 
755       if (dump_file)
756 	fprintf (dump_file, "Merged %d and %d without moving.\n",
757 		 b_index, c_index);
758 
759       return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
760     }
761 
762   /* Otherwise we will need to move code around.  Do that only if expensive
763      transformations are allowed.  */
764   else if (mode & CLEANUP_EXPENSIVE)
765     {
766       edge tmp_edge, b_fallthru_edge;
767       bool c_has_outgoing_fallthru;
768       bool b_has_incoming_fallthru;
769 
770       /* Avoid overactive code motion, as the forwarder blocks should be
771 	 eliminated by edge redirection instead.  One exception might have
772 	 been if B is a forwarder block and C has no fallthru edge, but
773 	 that should be cleaned up by bb-reorder instead.  */
774       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
775 	return NULL;
776 
777       /* We must make sure to not munge nesting of lexical blocks,
778 	 and loop notes.  This is done by squeezing out all the notes
779 	 and leaving them there to lie.  Not ideal, but functional.  */
780 
781       tmp_edge = find_fallthru_edge (c->succs);
782       c_has_outgoing_fallthru = (tmp_edge != NULL);
783 
784       tmp_edge = find_fallthru_edge (b->preds);
785       b_has_incoming_fallthru = (tmp_edge != NULL);
786       b_fallthru_edge = tmp_edge;
787       next = b->prev_bb;
788       if (next == c)
789 	next = next->prev_bb;
790 
791       /* Otherwise, we're going to try to move C after B.  If C does
792 	 not have an outgoing fallthru, then it can be moved
793 	 immediately after B without introducing or modifying jumps.  */
794       if (! c_has_outgoing_fallthru)
795 	{
796 	  merge_blocks_move_successor_nojumps (b, c);
797 	  return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
798 	}
799 
800       /* If B does not have an incoming fallthru, then it can be moved
801 	 immediately before C without introducing or modifying jumps.
802 	 C cannot be the first block, so we do not have to worry about
803 	 accessing a non-existent block.  */
804 
805       if (b_has_incoming_fallthru)
806 	{
807 	  basic_block bb;
808 
809 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
810 	    return NULL;
811 	  bb = force_nonfallthru (b_fallthru_edge);
812 	  if (bb)
813 	    notice_new_block (bb);
814 	}
815 
816       merge_blocks_move_predecessor_nojumps (b, c);
817       return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
818     }
819 
820   return NULL;
821 }
822 
823 
824 /* Removes the memory attributes of MEM expression
825    if they are not equal.  */
826 
827 static void
828 merge_memattrs (rtx x, rtx y)
829 {
830   int i;
831   int j;
832   enum rtx_code code;
833   const char *fmt;
834 
835   if (x == y)
836     return;
837   if (x == 0 || y == 0)
838     return;
839 
840   code = GET_CODE (x);
841 
842   if (code != GET_CODE (y))
843     return;
844 
845   if (GET_MODE (x) != GET_MODE (y))
846     return;
847 
848   if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
849     {
850       if (! MEM_ATTRS (x))
851 	MEM_ATTRS (y) = 0;
852       else if (! MEM_ATTRS (y))
853 	MEM_ATTRS (x) = 0;
854       else
855 	{
856 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
857 	    {
858 	      set_mem_alias_set (x, 0);
859 	      set_mem_alias_set (y, 0);
860 	    }
861 
862 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
863 	    {
864 	      set_mem_expr (x, 0);
865 	      set_mem_expr (y, 0);
866 	      clear_mem_offset (x);
867 	      clear_mem_offset (y);
868 	    }
869 	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
870 		   || (MEM_OFFSET_KNOWN_P (x)
871 		       && maybe_ne (MEM_OFFSET (x), MEM_OFFSET (y))))
872 	    {
873 	      clear_mem_offset (x);
874 	      clear_mem_offset (y);
875 	    }
876 
877 	  if (!MEM_SIZE_KNOWN_P (x))
878 	    clear_mem_size (y);
879 	  else if (!MEM_SIZE_KNOWN_P (y))
880 	    clear_mem_size (x);
881 	  else if (known_le (MEM_SIZE (x), MEM_SIZE (y)))
882 	    set_mem_size (x, MEM_SIZE (y));
883 	  else if (known_le (MEM_SIZE (y), MEM_SIZE (x)))
884 	    set_mem_size (y, MEM_SIZE (x));
885 	  else
886 	    {
887 	      /* The sizes aren't ordered, so we can't merge them.  */
888 	      clear_mem_size (x);
889 	      clear_mem_size (y);
890 	    }
891 
892 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
893 	  set_mem_align (y, MEM_ALIGN (x));
894 	}
895     }
896   if (code == MEM)
897     {
898       if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
899 	{
900 	  MEM_READONLY_P (x) = 0;
901 	  MEM_READONLY_P (y) = 0;
902 	}
903       if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
904 	{
905 	  MEM_NOTRAP_P (x) = 0;
906 	  MEM_NOTRAP_P (y) = 0;
907 	}
908       if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
909 	{
910 	  MEM_VOLATILE_P (x) = 1;
911 	  MEM_VOLATILE_P (y) = 1;
912 	}
913     }
914 
915   fmt = GET_RTX_FORMAT (code);
916   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
917     {
918       switch (fmt[i])
919 	{
920 	case 'E':
921 	  /* Two vectors must have the same length.  */
922 	  if (XVECLEN (x, i) != XVECLEN (y, i))
923 	    return;
924 
925 	  for (j = 0; j < XVECLEN (x, i); j++)
926 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
927 
928 	  break;
929 
930 	case 'e':
931 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
932 	}
933     }
934   return;
935 }
936 
937 
938  /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
939     different single sets S1 and S2.  */
940 
941 static bool
942 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
943 {
944   int i;
945   rtx e1, e2;
946 
947   if (p1 == s1 && p2 == s2)
948     return true;
949 
950   if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
951     return false;
952 
953   if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
954     return false;
955 
956   for (i = 0; i < XVECLEN (p1, 0); i++)
957     {
958       e1 = XVECEXP (p1, 0, i);
959       e2 = XVECEXP (p2, 0, i);
960       if (e1 == s1 && e2 == s2)
961         continue;
962       if (reload_completed
963           ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
964         continue;
965 
966       return false;
967     }
968 
969   return true;
970 }
971 
972 
973 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
974    that is a single_set with a SET_SRC of SRC1.  Similarly
975    for NOTE2/SRC2.
976 
977    So effectively NOTE1/NOTE2 are an alternate form of
978    SRC1/SRC2 respectively.
979 
980    Return nonzero if SRC1 or NOTE1 has the same constant
981    integer value as SRC2 or NOTE2.   Else return zero.  */
982 static int
983 values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
984 {
985   if (note1
986       && note2
987       && CONST_INT_P (XEXP (note1, 0))
988       && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
989     return 1;
990 
991   if (!note1
992       && !note2
993       && CONST_INT_P (src1)
994       && CONST_INT_P (src2)
995       && rtx_equal_p (src1, src2))
996     return 1;
997 
998   if (note1
999       && CONST_INT_P (src2)
1000       && rtx_equal_p (XEXP (note1, 0), src2))
1001     return 1;
1002 
1003   if (note2
1004       && CONST_INT_P (src1)
1005       && rtx_equal_p (XEXP (note2, 0), src1))
1006     return 1;
1007 
1008   return 0;
1009 }
1010 
1011 /* Examine register notes on I1 and I2 and return:
1012    - dir_forward if I1 can be replaced by I2, or
1013    - dir_backward if I2 can be replaced by I1, or
1014    - dir_both if both are the case.  */
1015 
1016 static enum replace_direction
1017 can_replace_by (rtx_insn *i1, rtx_insn *i2)
1018 {
1019   rtx s1, s2, d1, d2, src1, src2, note1, note2;
1020   bool c1, c2;
1021 
1022   /* Check for 2 sets.  */
1023   s1 = single_set (i1);
1024   s2 = single_set (i2);
1025   if (s1 == NULL_RTX || s2 == NULL_RTX)
1026     return dir_none;
1027 
1028   /* Check that the 2 sets set the same dest.  */
1029   d1 = SET_DEST (s1);
1030   d2 = SET_DEST (s2);
1031   if (!(reload_completed
1032         ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1033     return dir_none;
1034 
1035   /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1036      set dest to the same value.  */
1037   note1 = find_reg_equal_equiv_note (i1);
1038   note2 = find_reg_equal_equiv_note (i2);
1039 
1040   src1 = SET_SRC (s1);
1041   src2 = SET_SRC (s2);
1042 
1043   if (!values_equal_p (note1, note2, src1, src2))
1044     return dir_none;
1045 
1046   if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1047     return dir_none;
1048 
1049   /* Although the 2 sets set dest to the same value, we cannot replace
1050        (set (dest) (const_int))
1051      by
1052        (set (dest) (reg))
1053      because we don't know if the reg is live and has the same value at the
1054      location of replacement.  */
1055   c1 = CONST_INT_P (src1);
1056   c2 = CONST_INT_P (src2);
1057   if (c1 && c2)
1058     return dir_both;
1059   else if (c2)
1060     return dir_forward;
1061   else if (c1)
1062     return dir_backward;
1063 
1064   return dir_none;
1065 }
1066 
1067 /* Merges directions A and B.  */
1068 
1069 static enum replace_direction
1070 merge_dir (enum replace_direction a, enum replace_direction b)
1071 {
1072   /* Implements the following table:
1073         |bo fw bw no
1074      ---+-----------
1075      bo |bo fw bw no
1076      fw |-- fw no no
1077      bw |-- -- bw no
1078      no |-- -- -- no.  */
1079 
1080   if (a == b)
1081     return a;
1082 
1083   if (a == dir_both)
1084     return b;
1085   if (b == dir_both)
1086     return a;
1087 
1088   return dir_none;
1089 }
1090 
1091 /* Array of flags indexed by reg note kind, true if the given
1092    reg note is CFA related.  */
1093 static const bool reg_note_cfa_p[] = {
1094 #undef REG_CFA_NOTE
1095 #define DEF_REG_NOTE(NAME) false,
1096 #define REG_CFA_NOTE(NAME) true,
1097 #include "reg-notes.def"
1098 #undef REG_CFA_NOTE
1099 #undef DEF_REG_NOTE
1100   false
1101 };
1102 
1103 /* Return true if I1 and I2 have identical CFA notes (the same order
1104    and equivalent content).  */
1105 
1106 static bool
1107 insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2)
1108 {
1109   rtx n1, n2;
1110   for (n1 = REG_NOTES (i1), n2 = REG_NOTES (i2); ;
1111        n1 = XEXP (n1, 1), n2 = XEXP (n2, 1))
1112     {
1113       /* Skip over reg notes not related to CFI information.  */
1114       while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)])
1115 	n1 = XEXP (n1, 1);
1116       while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)])
1117 	n2 = XEXP (n2, 1);
1118       if (n1 == NULL_RTX && n2 == NULL_RTX)
1119 	return true;
1120       if (n1 == NULL_RTX || n2 == NULL_RTX)
1121 	return false;
1122       if (XEXP (n1, 0) == XEXP (n2, 0))
1123 	;
1124       else if (XEXP (n1, 0) == NULL_RTX || XEXP (n2, 0) == NULL_RTX)
1125 	return false;
1126       else if (!(reload_completed
1127 		 ? rtx_renumbered_equal_p (XEXP (n1, 0), XEXP (n2, 0))
1128 		 : rtx_equal_p (XEXP (n1, 0), XEXP (n2, 0))))
1129 	return false;
1130     }
1131 }
1132 
1133 /* Examine I1 and I2 and return:
1134    - dir_forward if I1 can be replaced by I2, or
1135    - dir_backward if I2 can be replaced by I1, or
1136    - dir_both if both are the case.  */
1137 
1138 static enum replace_direction
1139 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1140 {
1141   rtx p1, p2;
1142 
1143   /* Verify that I1 and I2 are equivalent.  */
1144   if (GET_CODE (i1) != GET_CODE (i2))
1145     return dir_none;
1146 
1147   /* __builtin_unreachable() may lead to empty blocks (ending with
1148      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
1149   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1150     return dir_both;
1151 
1152   /* ??? Do not allow cross-jumping between different stack levels.  */
1153   p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1154   p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1155   if (p1 && p2)
1156     {
1157       p1 = XEXP (p1, 0);
1158       p2 = XEXP (p2, 0);
1159       if (!rtx_equal_p (p1, p2))
1160         return dir_none;
1161 
1162       /* ??? Worse, this adjustment had better be constant lest we
1163          have differing incoming stack levels.  */
1164       if (!frame_pointer_needed
1165 	  && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN))
1166 	return dir_none;
1167     }
1168   else if (p1 || p2)
1169     return dir_none;
1170 
1171   /* Do not allow cross-jumping between frame related insns and other
1172      insns.  */
1173   if (RTX_FRAME_RELATED_P (i1) != RTX_FRAME_RELATED_P (i2))
1174     return dir_none;
1175 
1176   p1 = PATTERN (i1);
1177   p2 = PATTERN (i2);
1178 
1179   if (GET_CODE (p1) != GET_CODE (p2))
1180     return dir_none;
1181 
1182   /* If this is a CALL_INSN, compare register usage information.
1183      If we don't check this on stack register machines, the two
1184      CALL_INSNs might be merged leaving reg-stack.c with mismatching
1185      numbers of stack registers in the same basic block.
1186      If we don't check this on machines with delay slots, a delay slot may
1187      be filled that clobbers a parameter expected by the subroutine.
1188 
1189      ??? We take the simple route for now and assume that if they're
1190      equal, they were constructed identically.
1191 
1192      Also check for identical exception regions.  */
1193 
1194   if (CALL_P (i1))
1195     {
1196       /* Ensure the same EH region.  */
1197       rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1198       rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1199 
1200       if (!n1 && n2)
1201 	return dir_none;
1202 
1203       if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1204 	return dir_none;
1205 
1206       if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1207 			CALL_INSN_FUNCTION_USAGE (i2))
1208 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1209 	return dir_none;
1210 
1211       /* For address sanitizer, never crossjump __asan_report_* builtins,
1212 	 otherwise errors might be reported on incorrect lines.  */
1213       if (flag_sanitize & SANITIZE_ADDRESS)
1214 	{
1215 	  rtx call = get_call_rtx_from (i1);
1216 	  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1217 	    {
1218 	      rtx symbol = XEXP (XEXP (call, 0), 0);
1219 	      if (SYMBOL_REF_DECL (symbol)
1220 		  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1221 		{
1222 		  if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1223 		       == BUILT_IN_NORMAL)
1224 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1225 			 >= BUILT_IN_ASAN_REPORT_LOAD1
1226 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1227 			 <= BUILT_IN_ASAN_STOREN)
1228 		    return dir_none;
1229 		}
1230 	    }
1231 	}
1232 
1233       HARD_REG_SET i1_used, i2_used;
1234 
1235       get_call_reg_set_usage (i1, &i1_used, call_used_reg_set);
1236       get_call_reg_set_usage (i2, &i2_used, call_used_reg_set);
1237 
1238       if (!hard_reg_set_equal_p (i1_used, i2_used))
1239         return dir_none;
1240     }
1241 
1242   /* If both i1 and i2 are frame related, verify all the CFA notes
1243      in the same order and with the same content.  */
1244   if (RTX_FRAME_RELATED_P (i1) && !insns_have_identical_cfa_notes (i1, i2))
1245     return dir_none;
1246 
1247 #ifdef STACK_REGS
1248   /* If cross_jump_death_matters is not 0, the insn's mode
1249      indicates whether or not the insn contains any stack-like
1250      regs.  */
1251 
1252   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1253     {
1254       /* If register stack conversion has already been done, then
1255 	 death notes must also be compared before it is certain that
1256 	 the two instruction streams match.  */
1257 
1258       rtx note;
1259       HARD_REG_SET i1_regset, i2_regset;
1260 
1261       CLEAR_HARD_REG_SET (i1_regset);
1262       CLEAR_HARD_REG_SET (i2_regset);
1263 
1264       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1265 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1266 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1267 
1268       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1269 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1270 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1271 
1272       if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1273 	return dir_none;
1274     }
1275 #endif
1276 
1277   if (reload_completed
1278       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1279     return dir_both;
1280 
1281   return can_replace_by (i1, i2);
1282 }
1283 
1284 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1285    flow_find_head_matching_sequence, ensure the notes match.  */
1286 
1287 static void
1288 merge_notes (rtx_insn *i1, rtx_insn *i2)
1289 {
1290   /* If the merged insns have different REG_EQUAL notes, then
1291      remove them.  */
1292   rtx equiv1 = find_reg_equal_equiv_note (i1);
1293   rtx equiv2 = find_reg_equal_equiv_note (i2);
1294 
1295   if (equiv1 && !equiv2)
1296     remove_note (i1, equiv1);
1297   else if (!equiv1 && equiv2)
1298     remove_note (i2, equiv2);
1299   else if (equiv1 && equiv2
1300 	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1301     {
1302       remove_note (i1, equiv1);
1303       remove_note (i2, equiv2);
1304     }
1305 }
1306 
1307  /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1308     resulting insn in I1, and the corresponding bb in BB1.  At the head of a
1309     bb, if there is a predecessor bb that reaches this bb via fallthru, and
1310     FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1311     DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
1312 
1313 static void
1314 walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1315                        bool *did_fallthru)
1316 {
1317   edge fallthru;
1318 
1319   *did_fallthru = false;
1320 
1321   /* Ignore notes.  */
1322   while (!NONDEBUG_INSN_P (*i1))
1323     {
1324       if (*i1 != BB_HEAD (*bb1))
1325         {
1326           *i1 = PREV_INSN (*i1);
1327           continue;
1328         }
1329 
1330       if (!follow_fallthru)
1331         return;
1332 
1333       fallthru = find_fallthru_edge ((*bb1)->preds);
1334       if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1335           || !single_succ_p (fallthru->src))
1336         return;
1337 
1338       *bb1 = fallthru->src;
1339       *i1 = BB_END (*bb1);
1340       *did_fallthru = true;
1341      }
1342 }
1343 
1344 /* Look through the insns at the end of BB1 and BB2 and find the longest
1345    sequence that are either equivalent, or allow forward or backward
1346    replacement.  Store the first insns for that sequence in *F1 and *F2 and
1347    return the sequence length.
1348 
1349    DIR_P indicates the allowed replacement direction on function entry, and
1350    the actual replacement direction on function exit.  If NULL, only equivalent
1351    sequences are allowed.
1352 
1353    To simplify callers of this function, if the blocks match exactly,
1354    store the head of the blocks in *F1 and *F2.  */
1355 
1356 int
1357 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1358 		      rtx_insn **f2, enum replace_direction *dir_p)
1359 {
1360   rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1361   int ninsns = 0;
1362   enum replace_direction dir, last_dir, afterlast_dir;
1363   bool follow_fallthru, did_fallthru;
1364 
1365   if (dir_p)
1366     dir = *dir_p;
1367   else
1368     dir = dir_both;
1369   afterlast_dir = dir;
1370   last_dir = afterlast_dir;
1371 
1372   /* Skip simple jumps at the end of the blocks.  Complex jumps still
1373      need to be compared for equivalence, which we'll do below.  */
1374 
1375   i1 = BB_END (bb1);
1376   last1 = afterlast1 = last2 = afterlast2 = NULL;
1377   if (onlyjump_p (i1)
1378       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1379     {
1380       last1 = i1;
1381       i1 = PREV_INSN (i1);
1382     }
1383 
1384   i2 = BB_END (bb2);
1385   if (onlyjump_p (i2)
1386       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1387     {
1388       last2 = i2;
1389       /* Count everything except for unconditional jump as insn.
1390 	 Don't count any jumps if dir_p is NULL.  */
1391       if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1392 	ninsns++;
1393       i2 = PREV_INSN (i2);
1394     }
1395 
1396   while (true)
1397     {
1398       /* In the following example, we can replace all jumps to C by jumps to A.
1399 
1400          This removes 4 duplicate insns.
1401          [bb A] insn1            [bb C] insn1
1402                 insn2                   insn2
1403          [bb B] insn3                   insn3
1404                 insn4                   insn4
1405                 jump_insn               jump_insn
1406 
1407          We could also replace all jumps to A by jumps to C, but that leaves B
1408          alive, and removes only 2 duplicate insns.  In a subsequent crossjump
1409          step, all jumps to B would be replaced with jumps to the middle of C,
1410          achieving the same result with more effort.
1411          So we allow only the first possibility, which means that we don't allow
1412          fallthru in the block that's being replaced.  */
1413 
1414       follow_fallthru = dir_p && dir != dir_forward;
1415       walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1416       if (did_fallthru)
1417         dir = dir_backward;
1418 
1419       follow_fallthru = dir_p && dir != dir_backward;
1420       walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1421       if (did_fallthru)
1422         dir = dir_forward;
1423 
1424       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1425 	break;
1426 
1427       /* Do not turn corssing edge to non-crossing or vice versa after
1428 	 reload. */
1429       if (BB_PARTITION (BLOCK_FOR_INSN (i1))
1430 	  != BB_PARTITION (BLOCK_FOR_INSN (i2))
1431 	  && reload_completed)
1432 	break;
1433 
1434       dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1435       if (dir == dir_none || (!dir_p && dir != dir_both))
1436 	break;
1437 
1438       merge_memattrs (i1, i2);
1439 
1440       /* Don't begin a cross-jump with a NOTE insn.  */
1441       if (INSN_P (i1))
1442 	{
1443 	  merge_notes (i1, i2);
1444 
1445 	  afterlast1 = last1, afterlast2 = last2;
1446 	  last1 = i1, last2 = i2;
1447 	  afterlast_dir = last_dir;
1448 	  last_dir = dir;
1449 	  if (active_insn_p (i1))
1450 	    ninsns++;
1451 	}
1452 
1453       i1 = PREV_INSN (i1);
1454       i2 = PREV_INSN (i2);
1455     }
1456 
1457   /* Don't allow the insn after a compare to be shared by
1458      cross-jumping unless the compare is also shared.  */
1459   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1460       && ! sets_cc0_p (last1))
1461     last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1462 
1463   /* Include preceding notes and labels in the cross-jump.  One,
1464      this may bring us to the head of the blocks as requested above.
1465      Two, it keeps line number notes as matched as may be.  */
1466   if (ninsns)
1467     {
1468       bb1 = BLOCK_FOR_INSN (last1);
1469       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1470 	last1 = PREV_INSN (last1);
1471 
1472       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1473 	last1 = PREV_INSN (last1);
1474 
1475       bb2 = BLOCK_FOR_INSN (last2);
1476       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1477 	last2 = PREV_INSN (last2);
1478 
1479       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1480 	last2 = PREV_INSN (last2);
1481 
1482       *f1 = last1;
1483       *f2 = last2;
1484     }
1485 
1486   if (dir_p)
1487     *dir_p = last_dir;
1488   return ninsns;
1489 }
1490 
1491 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1492    the head of the two blocks.  Do not include jumps at the end.
1493    If STOP_AFTER is nonzero, stop after finding that many matching
1494    instructions.  If STOP_AFTER is zero, count all INSN_P insns, if it is
1495    non-zero, only count active insns.  */
1496 
1497 int
1498 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1499 				  rtx_insn **f2, int stop_after)
1500 {
1501   rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1502   int ninsns = 0;
1503   edge e;
1504   edge_iterator ei;
1505   int nehedges1 = 0, nehedges2 = 0;
1506 
1507   FOR_EACH_EDGE (e, ei, bb1->succs)
1508     if (e->flags & EDGE_EH)
1509       nehedges1++;
1510   FOR_EACH_EDGE (e, ei, bb2->succs)
1511     if (e->flags & EDGE_EH)
1512       nehedges2++;
1513 
1514   i1 = BB_HEAD (bb1);
1515   i2 = BB_HEAD (bb2);
1516   last1 = beforelast1 = last2 = beforelast2 = NULL;
1517 
1518   while (true)
1519     {
1520       /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
1521       while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1522 	{
1523 	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1524 	    break;
1525 	  i1 = NEXT_INSN (i1);
1526 	}
1527 
1528       while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1529 	{
1530 	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1531 	    break;
1532 	  i2 = NEXT_INSN (i2);
1533 	}
1534 
1535       if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1536 	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1537 	break;
1538 
1539       if (NOTE_P (i1) || NOTE_P (i2)
1540 	  || JUMP_P (i1) || JUMP_P (i2))
1541 	break;
1542 
1543       /* A sanity check to make sure we're not merging insns with different
1544 	 effects on EH.  If only one of them ends a basic block, it shouldn't
1545 	 have an EH edge; if both end a basic block, there should be the same
1546 	 number of EH edges.  */
1547       if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1548 	   && nehedges1 > 0)
1549 	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1550 	      && nehedges2 > 0)
1551 	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1552 	      && nehedges1 != nehedges2))
1553 	break;
1554 
1555       if (old_insns_match_p (0, i1, i2) != dir_both)
1556 	break;
1557 
1558       merge_memattrs (i1, i2);
1559 
1560       /* Don't begin a cross-jump with a NOTE insn.  */
1561       if (INSN_P (i1))
1562 	{
1563 	  merge_notes (i1, i2);
1564 
1565 	  beforelast1 = last1, beforelast2 = last2;
1566 	  last1 = i1, last2 = i2;
1567 	  if (!stop_after || active_insn_p (i1))
1568 	    ninsns++;
1569 	}
1570 
1571       if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1572 	  || (stop_after > 0 && ninsns == stop_after))
1573 	break;
1574 
1575       i1 = NEXT_INSN (i1);
1576       i2 = NEXT_INSN (i2);
1577     }
1578 
1579   /* Don't allow a compare to be shared by cross-jumping unless the insn
1580      after the compare is also shared.  */
1581   if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1582       && sets_cc0_p (last1))
1583     last1 = beforelast1, last2 = beforelast2, ninsns--;
1584 
1585   if (ninsns)
1586     {
1587       *f1 = last1;
1588       *f2 = last2;
1589     }
1590 
1591   return ninsns;
1592 }
1593 
1594 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1595    the branch instruction.  This means that if we commonize the control
1596    flow before end of the basic block, the semantic remains unchanged.
1597 
1598    We may assume that there exists one edge with a common destination.  */
1599 
1600 static bool
1601 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1602 {
1603   int nehedges1 = 0, nehedges2 = 0;
1604   edge fallthru1 = 0, fallthru2 = 0;
1605   edge e1, e2;
1606   edge_iterator ei;
1607 
1608   /* If we performed shrink-wrapping, edges to the exit block can
1609      only be distinguished for JUMP_INSNs.  The two paths may differ in
1610      whether they went through the prologue.  Sibcalls are fine, we know
1611      that we either didn't need or inserted an epilogue before them.  */
1612   if (crtl->shrink_wrapped
1613       && single_succ_p (bb1)
1614       && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1615       && (!JUMP_P (BB_END (bb1))
1616 	  /* Punt if the only successor is a fake edge to exit, the jump
1617 	     must be some weird one.  */
1618 	  || (single_succ_edge (bb1)->flags & EDGE_FAKE) != 0)
1619       && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1620     return false;
1621 
1622   /* If BB1 has only one successor, we may be looking at either an
1623      unconditional jump, or a fake edge to exit.  */
1624   if (single_succ_p (bb1)
1625       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1626       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1627     return (single_succ_p (bb2)
1628 	    && (single_succ_edge (bb2)->flags
1629 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1630 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1631 
1632   /* Match conditional jumps - this may get tricky when fallthru and branch
1633      edges are crossed.  */
1634   if (EDGE_COUNT (bb1->succs) == 2
1635       && any_condjump_p (BB_END (bb1))
1636       && onlyjump_p (BB_END (bb1)))
1637     {
1638       edge b1, f1, b2, f2;
1639       bool reverse, match;
1640       rtx set1, set2, cond1, cond2;
1641       enum rtx_code code1, code2;
1642 
1643       if (EDGE_COUNT (bb2->succs) != 2
1644 	  || !any_condjump_p (BB_END (bb2))
1645 	  || !onlyjump_p (BB_END (bb2)))
1646 	return false;
1647 
1648       b1 = BRANCH_EDGE (bb1);
1649       b2 = BRANCH_EDGE (bb2);
1650       f1 = FALLTHRU_EDGE (bb1);
1651       f2 = FALLTHRU_EDGE (bb2);
1652 
1653       /* Get around possible forwarders on fallthru edges.  Other cases
1654 	 should be optimized out already.  */
1655       if (FORWARDER_BLOCK_P (f1->dest))
1656 	f1 = single_succ_edge (f1->dest);
1657 
1658       if (FORWARDER_BLOCK_P (f2->dest))
1659 	f2 = single_succ_edge (f2->dest);
1660 
1661       /* To simplify use of this function, return false if there are
1662 	 unneeded forwarder blocks.  These will get eliminated later
1663 	 during cleanup_cfg.  */
1664       if (FORWARDER_BLOCK_P (f1->dest)
1665 	  || FORWARDER_BLOCK_P (f2->dest)
1666 	  || FORWARDER_BLOCK_P (b1->dest)
1667 	  || FORWARDER_BLOCK_P (b2->dest))
1668 	return false;
1669 
1670       if (f1->dest == f2->dest && b1->dest == b2->dest)
1671 	reverse = false;
1672       else if (f1->dest == b2->dest && b1->dest == f2->dest)
1673 	reverse = true;
1674       else
1675 	return false;
1676 
1677       set1 = pc_set (BB_END (bb1));
1678       set2 = pc_set (BB_END (bb2));
1679       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1680 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1681 	reverse = !reverse;
1682 
1683       cond1 = XEXP (SET_SRC (set1), 0);
1684       cond2 = XEXP (SET_SRC (set2), 0);
1685       code1 = GET_CODE (cond1);
1686       if (reverse)
1687 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1688       else
1689 	code2 = GET_CODE (cond2);
1690 
1691       if (code2 == UNKNOWN)
1692 	return false;
1693 
1694       /* Verify codes and operands match.  */
1695       match = ((code1 == code2
1696 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1697 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1698 	       || (code1 == swap_condition (code2)
1699 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1700 					      XEXP (cond2, 0))
1701 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1702 					      XEXP (cond2, 1))));
1703 
1704       /* If we return true, we will join the blocks.  Which means that
1705 	 we will only have one branch prediction bit to work with.  Thus
1706 	 we require the existing branches to have probabilities that are
1707 	 roughly similar.  */
1708       if (match
1709 	  && optimize_bb_for_speed_p (bb1)
1710 	  && optimize_bb_for_speed_p (bb2))
1711 	{
1712 	  profile_probability prob2;
1713 
1714 	  if (b1->dest == b2->dest)
1715 	    prob2 = b2->probability;
1716 	  else
1717 	    /* Do not use f2 probability as f2 may be forwarded.  */
1718 	    prob2 = b2->probability.invert ();
1719 
1720 	  /* Fail if the difference in probabilities is greater than 50%.
1721 	     This rules out two well-predicted branches with opposite
1722 	     outcomes.  */
1723 	  if (b1->probability.differs_lot_from_p (prob2))
1724 	    {
1725 	      if (dump_file)
1726 		{
1727 		  fprintf (dump_file,
1728 			   "Outcomes of branch in bb %i and %i differ too"
1729 			   " much (", bb1->index, bb2->index);
1730 		  b1->probability.dump (dump_file);
1731 		  prob2.dump (dump_file);
1732 		  fprintf (dump_file, ")\n");
1733 		}
1734 	      return false;
1735 	    }
1736 	}
1737 
1738       if (dump_file && match)
1739 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1740 		 bb1->index, bb2->index);
1741 
1742       return match;
1743     }
1744 
1745   /* Generic case - we are seeing a computed jump, table jump or trapping
1746      instruction.  */
1747 
1748   /* Check whether there are tablejumps in the end of BB1 and BB2.
1749      Return true if they are identical.  */
1750     {
1751       rtx_insn *label1, *label2;
1752       rtx_jump_table_data *table1, *table2;
1753 
1754       if (tablejump_p (BB_END (bb1), &label1, &table1)
1755 	  && tablejump_p (BB_END (bb2), &label2, &table2)
1756 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1757 	{
1758 	  /* The labels should never be the same rtx.  If they really are same
1759 	     the jump tables are same too. So disable crossjumping of blocks BB1
1760 	     and BB2 because when deleting the common insns in the end of BB1
1761 	     by delete_basic_block () the jump table would be deleted too.  */
1762 	  /* If LABEL2 is referenced in BB1->END do not do anything
1763 	     because we would loose information when replacing
1764 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1765 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1766 	    {
1767 	      /* Set IDENTICAL to true when the tables are identical.  */
1768 	      bool identical = false;
1769 	      rtx p1, p2;
1770 
1771 	      p1 = PATTERN (table1);
1772 	      p2 = PATTERN (table2);
1773 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1774 		{
1775 		  identical = true;
1776 		}
1777 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1778 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1779 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1780 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1781 		{
1782 		  int i;
1783 
1784 		  identical = true;
1785 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1786 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1787 		      identical = false;
1788 		}
1789 
1790 	      if (identical)
1791 		{
1792 		  bool match;
1793 
1794 		  /* Temporarily replace references to LABEL1 with LABEL2
1795 		     in BB1->END so that we could compare the instructions.  */
1796 		  replace_label_in_insn (BB_END (bb1), label1, label2, false);
1797 
1798 		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1799 			   == dir_both);
1800 		  if (dump_file && match)
1801 		    fprintf (dump_file,
1802 			     "Tablejumps in bb %i and %i match.\n",
1803 			     bb1->index, bb2->index);
1804 
1805 		  /* Set the original label in BB1->END because when deleting
1806 		     a block whose end is a tablejump, the tablejump referenced
1807 		     from the instruction is deleted too.  */
1808 		  replace_label_in_insn (BB_END (bb1), label2, label1, false);
1809 
1810 		  return match;
1811 		}
1812 	    }
1813 	  return false;
1814 	}
1815     }
1816 
1817   /* Find the last non-debug non-note instruction in each bb, except
1818      stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1819      handles that case specially. old_insns_match_p does not handle
1820      other types of instruction notes.  */
1821   rtx_insn *last1 = BB_END (bb1);
1822   rtx_insn *last2 = BB_END (bb2);
1823   while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1824          (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1825     last1 = PREV_INSN (last1);
1826   while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1827          (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1828     last2 = PREV_INSN (last2);
1829   gcc_assert (last1 && last2);
1830 
1831   /* First ensure that the instructions match.  There may be many outgoing
1832      edges so this test is generally cheaper.  */
1833   if (old_insns_match_p (mode, last1, last2) != dir_both)
1834     return false;
1835 
1836   /* Search the outgoing edges, ensure that the counts do match, find possible
1837      fallthru and exception handling edges since these needs more
1838      validation.  */
1839   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1840     return false;
1841 
1842   bool nonfakeedges = false;
1843   FOR_EACH_EDGE (e1, ei, bb1->succs)
1844     {
1845       e2 = EDGE_SUCC (bb2, ei.index);
1846 
1847       if ((e1->flags & EDGE_FAKE) == 0)
1848 	nonfakeedges = true;
1849 
1850       if (e1->flags & EDGE_EH)
1851 	nehedges1++;
1852 
1853       if (e2->flags & EDGE_EH)
1854 	nehedges2++;
1855 
1856       if (e1->flags & EDGE_FALLTHRU)
1857 	fallthru1 = e1;
1858       if (e2->flags & EDGE_FALLTHRU)
1859 	fallthru2 = e2;
1860     }
1861 
1862   /* If number of edges of various types does not match, fail.  */
1863   if (nehedges1 != nehedges2
1864       || (fallthru1 != 0) != (fallthru2 != 0))
1865     return false;
1866 
1867   /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1868      and the last real insn doesn't have REG_ARGS_SIZE note, don't
1869      attempt to optimize, as the two basic blocks might have different
1870      REG_ARGS_SIZE depths.  For noreturn calls and unconditional
1871      traps there should be REG_ARG_SIZE notes, they could be missing
1872      for __builtin_unreachable () uses though.  */
1873   if (!nonfakeedges
1874       && !ACCUMULATE_OUTGOING_ARGS
1875       && (!INSN_P (last1)
1876           || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1877     return false;
1878 
1879   /* fallthru edges must be forwarded to the same destination.  */
1880   if (fallthru1)
1881     {
1882       basic_block d1 = (forwarder_block_p (fallthru1->dest)
1883 			? single_succ (fallthru1->dest): fallthru1->dest);
1884       basic_block d2 = (forwarder_block_p (fallthru2->dest)
1885 			? single_succ (fallthru2->dest): fallthru2->dest);
1886 
1887       if (d1 != d2)
1888 	return false;
1889     }
1890 
1891   /* Ensure the same EH region.  */
1892   {
1893     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1894     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1895 
1896     if (!n1 && n2)
1897       return false;
1898 
1899     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1900       return false;
1901   }
1902 
1903   /* The same checks as in try_crossjump_to_edge. It is required for RTL
1904      version of sequence abstraction.  */
1905   FOR_EACH_EDGE (e1, ei, bb2->succs)
1906     {
1907       edge e2;
1908       edge_iterator ei;
1909       basic_block d1 = e1->dest;
1910 
1911       if (FORWARDER_BLOCK_P (d1))
1912         d1 = EDGE_SUCC (d1, 0)->dest;
1913 
1914       FOR_EACH_EDGE (e2, ei, bb1->succs)
1915         {
1916           basic_block d2 = e2->dest;
1917           if (FORWARDER_BLOCK_P (d2))
1918             d2 = EDGE_SUCC (d2, 0)->dest;
1919           if (d1 == d2)
1920             break;
1921         }
1922 
1923       if (!e2)
1924         return false;
1925     }
1926 
1927   return true;
1928 }
1929 
1930 /* Returns true if BB basic block has a preserve label.  */
1931 
1932 static bool
1933 block_has_preserve_label (basic_block bb)
1934 {
1935   return (bb
1936           && block_label (bb)
1937           && LABEL_PRESERVE_P (block_label (bb)));
1938 }
1939 
1940 /* E1 and E2 are edges with the same destination block.  Search their
1941    predecessors for common code.  If found, redirect control flow from
1942    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1943    or the other way around (dir_backward).  DIR specifies the allowed
1944    replacement direction.  */
1945 
1946 static bool
1947 try_crossjump_to_edge (int mode, edge e1, edge e2,
1948                        enum replace_direction dir)
1949 {
1950   int nmatch;
1951   basic_block src1 = e1->src, src2 = e2->src;
1952   basic_block redirect_to, redirect_from, to_remove;
1953   basic_block osrc1, osrc2, redirect_edges_to, tmp;
1954   rtx_insn *newpos1, *newpos2;
1955   edge s;
1956   edge_iterator ei;
1957 
1958   newpos1 = newpos2 = NULL;
1959 
1960   /* Search backward through forwarder blocks.  We don't need to worry
1961      about multiple entry or chained forwarders, as they will be optimized
1962      away.  We do this to look past the unconditional jump following a
1963      conditional jump that is required due to the current CFG shape.  */
1964   if (single_pred_p (src1)
1965       && FORWARDER_BLOCK_P (src1))
1966     e1 = single_pred_edge (src1), src1 = e1->src;
1967 
1968   if (single_pred_p (src2)
1969       && FORWARDER_BLOCK_P (src2))
1970     e2 = single_pred_edge (src2), src2 = e2->src;
1971 
1972   /* Nothing to do if we reach ENTRY, or a common source block.  */
1973   if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1974       == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1975     return false;
1976   if (src1 == src2)
1977     return false;
1978 
1979   /* Seeing more than 1 forwarder blocks would confuse us later...  */
1980   if (FORWARDER_BLOCK_P (e1->dest)
1981       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1982     return false;
1983 
1984   if (FORWARDER_BLOCK_P (e2->dest)
1985       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1986     return false;
1987 
1988   /* Likewise with dead code (possibly newly created by the other optimizations
1989      of cfg_cleanup).  */
1990   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1991     return false;
1992 
1993   /* Do not turn corssing edge to non-crossing or vice versa after reload.  */
1994   if (BB_PARTITION (src1) != BB_PARTITION (src2)
1995       && reload_completed)
1996     return false;
1997 
1998   /* Look for the common insn sequence, part the first ...  */
1999   if (!outgoing_edges_match (mode, src1, src2))
2000     return false;
2001 
2002   /* ... and part the second.  */
2003   nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
2004 
2005   osrc1 = src1;
2006   osrc2 = src2;
2007   if (newpos1 != NULL_RTX)
2008     src1 = BLOCK_FOR_INSN (newpos1);
2009   if (newpos2 != NULL_RTX)
2010     src2 = BLOCK_FOR_INSN (newpos2);
2011 
2012   /* Check that SRC1 and SRC2 have preds again.  They may have changed
2013      above due to the call to flow_find_cross_jump.  */
2014   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
2015     return false;
2016 
2017   if (dir == dir_backward)
2018     {
2019       std::swap (osrc1, osrc2);
2020       std::swap (src1, src2);
2021       std::swap (e1, e2);
2022       std::swap (newpos1, newpos2);
2023     }
2024 
2025   /* Don't proceed with the crossjump unless we found a sufficient number
2026      of matching instructions or the 'from' block was totally matched
2027      (such that its predecessors will hopefully be redirected and the
2028      block removed).  */
2029   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
2030       && (newpos1 != BB_HEAD (src1)))
2031     return false;
2032 
2033   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
2034   if (block_has_preserve_label (e1->dest)
2035       && (e1->flags & EDGE_ABNORMAL))
2036     return false;
2037 
2038   /* Here we know that the insns in the end of SRC1 which are common with SRC2
2039      will be deleted.
2040      If we have tablejumps in the end of SRC1 and SRC2
2041      they have been already compared for equivalence in outgoing_edges_match ()
2042      so replace the references to TABLE1 by references to TABLE2.  */
2043   {
2044       rtx_insn *label1, *label2;
2045       rtx_jump_table_data *table1, *table2;
2046 
2047       if (tablejump_p (BB_END (osrc1), &label1, &table1)
2048 	  && tablejump_p (BB_END (osrc2), &label2, &table2)
2049 	  && label1 != label2)
2050 	{
2051 	  rtx_insn *insn;
2052 
2053 	  /* Replace references to LABEL1 with LABEL2.  */
2054 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2055 	    {
2056 	      /* Do not replace the label in SRC1->END because when deleting
2057 		 a block whose end is a tablejump, the tablejump referenced
2058 		 from the instruction is deleted too.  */
2059 	      if (insn != BB_END (osrc1))
2060 		replace_label_in_insn (insn, label1, label2, true);
2061 	    }
2062 	}
2063   }
2064 
2065   /* Avoid splitting if possible.  We must always split when SRC2 has
2066      EH predecessor edges, or we may end up with basic blocks with both
2067      normal and EH predecessor edges.  */
2068   if (newpos2 == BB_HEAD (src2)
2069       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2070     redirect_to = src2;
2071   else
2072     {
2073       if (newpos2 == BB_HEAD (src2))
2074 	{
2075 	  /* Skip possible basic block header.  */
2076 	  if (LABEL_P (newpos2))
2077 	    newpos2 = NEXT_INSN (newpos2);
2078 	  while (DEBUG_INSN_P (newpos2))
2079 	    newpos2 = NEXT_INSN (newpos2);
2080 	  if (NOTE_P (newpos2))
2081 	    newpos2 = NEXT_INSN (newpos2);
2082 	  while (DEBUG_INSN_P (newpos2))
2083 	    newpos2 = NEXT_INSN (newpos2);
2084 	}
2085 
2086       if (dump_file)
2087 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
2088 		 src2->index, nmatch);
2089       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2090     }
2091 
2092   if (dump_file)
2093     fprintf (dump_file,
2094 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
2095 	     src1->index, src2->index, nmatch);
2096 
2097   /* We may have some registers visible through the block.  */
2098   df_set_bb_dirty (redirect_to);
2099 
2100   if (osrc2 == src2)
2101     redirect_edges_to = redirect_to;
2102   else
2103     redirect_edges_to = osrc2;
2104 
2105   /* Recompute the counts of destinations of outgoing edges.  */
2106   FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2107     {
2108       edge s2;
2109       edge_iterator ei;
2110       basic_block d = s->dest;
2111 
2112       if (FORWARDER_BLOCK_P (d))
2113 	d = single_succ (d);
2114 
2115       FOR_EACH_EDGE (s2, ei, src1->succs)
2116 	{
2117 	  basic_block d2 = s2->dest;
2118 	  if (FORWARDER_BLOCK_P (d2))
2119 	    d2 = single_succ (d2);
2120 	  if (d == d2)
2121 	    break;
2122 	}
2123 
2124       /* Take care to update possible forwarder blocks.  We verified
2125 	 that there is no more than one in the chain, so we can't run
2126 	 into infinite loop.  */
2127       if (FORWARDER_BLOCK_P (s->dest))
2128 	s->dest->count += s->count ();
2129 
2130       if (FORWARDER_BLOCK_P (s2->dest))
2131 	s2->dest->count -= s->count ();
2132 
2133       s->probability = s->probability.combine_with_count
2134 			  (redirect_edges_to->count,
2135 			   s2->probability, src1->count);
2136     }
2137 
2138   /* Adjust count for the block.  An earlier jump
2139      threading pass may have left the profile in an inconsistent
2140      state (see update_bb_profile_for_threading) so we must be
2141      prepared for overflows.  */
2142   tmp = redirect_to;
2143   do
2144     {
2145       tmp->count += src1->count;
2146       if (tmp == redirect_edges_to)
2147         break;
2148       tmp = find_fallthru_edge (tmp->succs)->dest;
2149     }
2150   while (true);
2151   update_br_prob_note (redirect_edges_to);
2152 
2153   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
2154 
2155   /* Skip possible basic block header.  */
2156   if (LABEL_P (newpos1))
2157     newpos1 = NEXT_INSN (newpos1);
2158 
2159   while (DEBUG_INSN_P (newpos1))
2160     newpos1 = NEXT_INSN (newpos1);
2161 
2162   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2163     newpos1 = NEXT_INSN (newpos1);
2164 
2165   while (DEBUG_INSN_P (newpos1))
2166     newpos1 = NEXT_INSN (newpos1);
2167 
2168   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2169   to_remove = single_succ (redirect_from);
2170 
2171   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2172   delete_basic_block (to_remove);
2173 
2174   update_forwarder_flag (redirect_from);
2175   if (redirect_to != src2)
2176     update_forwarder_flag (src2);
2177 
2178   return true;
2179 }
2180 
2181 /* Search the predecessors of BB for common insn sequences.  When found,
2182    share code between them by redirecting control flow.  Return true if
2183    any changes made.  */
2184 
2185 static bool
2186 try_crossjump_bb (int mode, basic_block bb)
2187 {
2188   edge e, e2, fallthru;
2189   bool changed;
2190   unsigned max, ix, ix2;
2191 
2192   /* Nothing to do if there is not at least two incoming edges.  */
2193   if (EDGE_COUNT (bb->preds) < 2)
2194     return false;
2195 
2196   /* Don't crossjump if this block ends in a computed jump,
2197      unless we are optimizing for size.  */
2198   if (optimize_bb_for_size_p (bb)
2199       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2200       && computed_jump_p (BB_END (bb)))
2201     return false;
2202 
2203   /* If we are partitioning hot/cold basic blocks, we don't want to
2204      mess up unconditional or indirect jumps that cross between hot
2205      and cold sections.
2206 
2207      Basic block partitioning may result in some jumps that appear to
2208      be optimizable (or blocks that appear to be mergeable), but which really
2209      must be left untouched (they are required to make it safely across
2210      partition boundaries).  See the comments at the top of
2211      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
2212 
2213   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2214 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
2215       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2216     return false;
2217 
2218   /* It is always cheapest to redirect a block that ends in a branch to
2219      a block that falls through into BB, as that adds no branches to the
2220      program.  We'll try that combination first.  */
2221   fallthru = NULL;
2222   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2223 
2224   if (EDGE_COUNT (bb->preds) > max)
2225     return false;
2226 
2227   fallthru = find_fallthru_edge (bb->preds);
2228 
2229   changed = false;
2230   for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2231     {
2232       e = EDGE_PRED (bb, ix);
2233       ix++;
2234 
2235       /* As noted above, first try with the fallthru predecessor (or, a
2236 	 fallthru predecessor if we are in cfglayout mode).  */
2237       if (fallthru)
2238 	{
2239 	  /* Don't combine the fallthru edge into anything else.
2240 	     If there is a match, we'll do it the other way around.  */
2241 	  if (e == fallthru)
2242 	    continue;
2243 	  /* If nothing changed since the last attempt, there is nothing
2244 	     we can do.  */
2245 	  if (!first_pass
2246 	      && !((e->src->flags & BB_MODIFIED)
2247 		   || (fallthru->src->flags & BB_MODIFIED)))
2248 	    continue;
2249 
2250 	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2251 	    {
2252 	      changed = true;
2253 	      ix = 0;
2254 	      continue;
2255 	    }
2256 	}
2257 
2258       /* Non-obvious work limiting check: Recognize that we're going
2259 	 to call try_crossjump_bb on every basic block.  So if we have
2260 	 two blocks with lots of outgoing edges (a switch) and they
2261 	 share lots of common destinations, then we would do the
2262 	 cross-jump check once for each common destination.
2263 
2264 	 Now, if the blocks actually are cross-jump candidates, then
2265 	 all of their destinations will be shared.  Which means that
2266 	 we only need check them for cross-jump candidacy once.  We
2267 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2268 	 choosing to do the check from the block for which the edge
2269 	 in question is the first successor of A.  */
2270       if (EDGE_SUCC (e->src, 0) != e)
2271 	continue;
2272 
2273       for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2274 	{
2275 	  e2 = EDGE_PRED (bb, ix2);
2276 
2277 	  if (e2 == e)
2278 	    continue;
2279 
2280 	  /* We've already checked the fallthru edge above.  */
2281 	  if (e2 == fallthru)
2282 	    continue;
2283 
2284 	  /* The "first successor" check above only prevents multiple
2285 	     checks of crossjump(A,B).  In order to prevent redundant
2286 	     checks of crossjump(B,A), require that A be the block
2287 	     with the lowest index.  */
2288 	  if (e->src->index > e2->src->index)
2289 	    continue;
2290 
2291 	  /* If nothing changed since the last attempt, there is nothing
2292 	     we can do.  */
2293 	  if (!first_pass
2294 	      && !((e->src->flags & BB_MODIFIED)
2295 		   || (e2->src->flags & BB_MODIFIED)))
2296 	    continue;
2297 
2298 	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
2299 	     direction.  */
2300 	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
2301 	    {
2302 	      changed = true;
2303 	      ix = 0;
2304 	      break;
2305 	    }
2306 	}
2307     }
2308 
2309   if (changed)
2310     crossjumps_occurred = true;
2311 
2312   return changed;
2313 }
2314 
2315 /* Search the successors of BB for common insn sequences.  When found,
2316    share code between them by moving it across the basic block
2317    boundary.  Return true if any changes made.  */
2318 
2319 static bool
2320 try_head_merge_bb (basic_block bb)
2321 {
2322   basic_block final_dest_bb = NULL;
2323   int max_match = INT_MAX;
2324   edge e0;
2325   rtx_insn **headptr, **currptr, **nextptr;
2326   bool changed, moveall;
2327   unsigned ix;
2328   rtx_insn *e0_last_head;
2329   rtx cond;
2330   rtx_insn *move_before;
2331   unsigned nedges = EDGE_COUNT (bb->succs);
2332   rtx_insn *jump = BB_END (bb);
2333   regset live, live_union;
2334 
2335   /* Nothing to do if there is not at least two outgoing edges.  */
2336   if (nedges < 2)
2337     return false;
2338 
2339   /* Don't crossjump if this block ends in a computed jump,
2340      unless we are optimizing for size.  */
2341   if (optimize_bb_for_size_p (bb)
2342       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2343       && computed_jump_p (BB_END (bb)))
2344     return false;
2345 
2346   cond = get_condition (jump, &move_before, true, false);
2347   if (cond == NULL_RTX)
2348     {
2349       if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2350 	move_before = prev_nonnote_nondebug_insn (jump);
2351       else
2352 	move_before = jump;
2353     }
2354 
2355   for (ix = 0; ix < nedges; ix++)
2356     if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2357       return false;
2358 
2359   for (ix = 0; ix < nedges; ix++)
2360     {
2361       edge e = EDGE_SUCC (bb, ix);
2362       basic_block other_bb = e->dest;
2363 
2364       if (df_get_bb_dirty (other_bb))
2365 	{
2366 	  block_was_dirty = true;
2367 	  return false;
2368 	}
2369 
2370       if (e->flags & EDGE_ABNORMAL)
2371 	return false;
2372 
2373       /* Normally, all destination blocks must only be reachable from this
2374 	 block, i.e. they must have one incoming edge.
2375 
2376 	 There is one special case we can handle, that of multiple consecutive
2377 	 jumps where the first jumps to one of the targets of the second jump.
2378 	 This happens frequently in switch statements for default labels.
2379 	 The structure is as follows:
2380 	 FINAL_DEST_BB
2381 	 ....
2382 	 if (cond) jump A;
2383 	 fall through
2384 	 BB
2385 	 jump with targets A, B, C, D...
2386 	 A
2387 	 has two incoming edges, from FINAL_DEST_BB and BB
2388 
2389 	 In this case, we can try to move the insns through BB and into
2390 	 FINAL_DEST_BB.  */
2391       if (EDGE_COUNT (other_bb->preds) != 1)
2392 	{
2393 	  edge incoming_edge, incoming_bb_other_edge;
2394 	  edge_iterator ei;
2395 
2396 	  if (final_dest_bb != NULL
2397 	      || EDGE_COUNT (other_bb->preds) != 2)
2398 	    return false;
2399 
2400 	  /* We must be able to move the insns across the whole block.  */
2401 	  move_before = BB_HEAD (bb);
2402 	  while (!NONDEBUG_INSN_P (move_before))
2403 	    move_before = NEXT_INSN (move_before);
2404 
2405 	  if (EDGE_COUNT (bb->preds) != 1)
2406 	    return false;
2407 	  incoming_edge = EDGE_PRED (bb, 0);
2408 	  final_dest_bb = incoming_edge->src;
2409 	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
2410 	    return false;
2411 	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2412 	    if (incoming_bb_other_edge != incoming_edge)
2413 	      break;
2414 	  if (incoming_bb_other_edge->dest != other_bb)
2415 	    return false;
2416 	}
2417     }
2418 
2419   e0 = EDGE_SUCC (bb, 0);
2420   e0_last_head = NULL;
2421   changed = false;
2422 
2423   for (ix = 1; ix < nedges; ix++)
2424     {
2425       edge e = EDGE_SUCC (bb, ix);
2426       rtx_insn *e0_last, *e_last;
2427       int nmatch;
2428 
2429       nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2430 						 &e0_last, &e_last, 0);
2431       if (nmatch == 0)
2432 	return false;
2433 
2434       if (nmatch < max_match)
2435 	{
2436 	  max_match = nmatch;
2437 	  e0_last_head = e0_last;
2438 	}
2439     }
2440 
2441   /* If we matched an entire block, we probably have to avoid moving the
2442      last insn.  */
2443   if (max_match > 0
2444       && e0_last_head == BB_END (e0->dest)
2445       && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2446 	  || control_flow_insn_p (e0_last_head)))
2447     {
2448       max_match--;
2449       if (max_match == 0)
2450 	return false;
2451       e0_last_head = prev_real_nondebug_insn (e0_last_head);
2452     }
2453 
2454   if (max_match == 0)
2455     return false;
2456 
2457   /* We must find a union of the live registers at each of the end points.  */
2458   live = BITMAP_ALLOC (NULL);
2459   live_union = BITMAP_ALLOC (NULL);
2460 
2461   currptr = XNEWVEC (rtx_insn *, nedges);
2462   headptr = XNEWVEC (rtx_insn *, nedges);
2463   nextptr = XNEWVEC (rtx_insn *, nedges);
2464 
2465   for (ix = 0; ix < nedges; ix++)
2466     {
2467       int j;
2468       basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2469       rtx_insn *head = BB_HEAD (merge_bb);
2470 
2471       while (!NONDEBUG_INSN_P (head))
2472 	head = NEXT_INSN (head);
2473       headptr[ix] = head;
2474       currptr[ix] = head;
2475 
2476       /* Compute the end point and live information  */
2477       for (j = 1; j < max_match; j++)
2478 	do
2479 	  head = NEXT_INSN (head);
2480 	while (!NONDEBUG_INSN_P (head));
2481       simulate_backwards_to_point (merge_bb, live, head);
2482       IOR_REG_SET (live_union, live);
2483     }
2484 
2485   /* If we're moving across two blocks, verify the validity of the
2486      first move, then adjust the target and let the loop below deal
2487      with the final move.  */
2488   if (final_dest_bb != NULL)
2489     {
2490       rtx_insn *move_upto;
2491 
2492       moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2493 				       jump, e0->dest, live_union,
2494 				       NULL, &move_upto);
2495       if (!moveall)
2496 	{
2497 	  if (move_upto == NULL_RTX)
2498 	    goto out;
2499 
2500 	  while (e0_last_head != move_upto)
2501 	    {
2502 	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2503 					      live_union);
2504 	      e0_last_head = PREV_INSN (e0_last_head);
2505 	    }
2506 	}
2507       if (e0_last_head == NULL_RTX)
2508 	goto out;
2509 
2510       jump = BB_END (final_dest_bb);
2511       cond = get_condition (jump, &move_before, true, false);
2512       if (cond == NULL_RTX)
2513 	{
2514 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2515 	    move_before = prev_nonnote_nondebug_insn (jump);
2516 	  else
2517 	    move_before = jump;
2518 	}
2519     }
2520 
2521   do
2522     {
2523       rtx_insn *move_upto;
2524       moveall = can_move_insns_across (currptr[0], e0_last_head,
2525 				       move_before, jump, e0->dest, live_union,
2526 				       NULL, &move_upto);
2527       if (!moveall && move_upto == NULL_RTX)
2528 	{
2529 	  if (jump == move_before)
2530 	    break;
2531 
2532 	  /* Try again, using a different insertion point.  */
2533 	  move_before = jump;
2534 
2535 	  /* Don't try moving before a cc0 user, as that may invalidate
2536 	     the cc0.  */
2537 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2538 	    break;
2539 
2540 	  continue;
2541 	}
2542 
2543       if (final_dest_bb && !moveall)
2544 	/* We haven't checked whether a partial move would be OK for the first
2545 	   move, so we have to fail this case.  */
2546 	break;
2547 
2548       changed = true;
2549       for (;;)
2550 	{
2551 	  if (currptr[0] == move_upto)
2552 	    break;
2553 	  for (ix = 0; ix < nedges; ix++)
2554 	    {
2555 	      rtx_insn *curr = currptr[ix];
2556 	      do
2557 		curr = NEXT_INSN (curr);
2558 	      while (!NONDEBUG_INSN_P (curr));
2559 	      currptr[ix] = curr;
2560 	    }
2561 	}
2562 
2563       /* If we can't currently move all of the identical insns, remember
2564 	 each insn after the range that we'll merge.  */
2565       if (!moveall)
2566 	for (ix = 0; ix < nedges; ix++)
2567 	  {
2568 	    rtx_insn *curr = currptr[ix];
2569 	    do
2570 	      curr = NEXT_INSN (curr);
2571 	    while (!NONDEBUG_INSN_P (curr));
2572 	    nextptr[ix] = curr;
2573 	  }
2574 
2575       reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2576       df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2577       if (final_dest_bb != NULL)
2578 	df_set_bb_dirty (final_dest_bb);
2579       df_set_bb_dirty (bb);
2580       for (ix = 1; ix < nedges; ix++)
2581 	{
2582 	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2583 	  delete_insn_chain (headptr[ix], currptr[ix], false);
2584 	}
2585       if (!moveall)
2586 	{
2587 	  if (jump == move_before)
2588 	    break;
2589 
2590 	  /* For the unmerged insns, try a different insertion point.  */
2591 	  move_before = jump;
2592 
2593 	  /* Don't try moving before a cc0 user, as that may invalidate
2594 	     the cc0.  */
2595 	  if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2596 	    break;
2597 
2598 	  for (ix = 0; ix < nedges; ix++)
2599 	    currptr[ix] = headptr[ix] = nextptr[ix];
2600 	}
2601     }
2602   while (!moveall);
2603 
2604  out:
2605   free (currptr);
2606   free (headptr);
2607   free (nextptr);
2608 
2609   crossjumps_occurred |= changed;
2610 
2611   return changed;
2612 }
2613 
2614 /* Return true if BB contains just bb note, or bb note followed
2615    by only DEBUG_INSNs.  */
2616 
2617 static bool
2618 trivially_empty_bb_p (basic_block bb)
2619 {
2620   rtx_insn *insn = BB_END (bb);
2621 
2622   while (1)
2623     {
2624       if (insn == BB_HEAD (bb))
2625 	return true;
2626       if (!DEBUG_INSN_P (insn))
2627 	return false;
2628       insn = PREV_INSN (insn);
2629     }
2630 }
2631 
2632 /* Return true if BB contains just a return and possibly a USE of the
2633    return value.  Fill in *RET and *USE with the return and use insns
2634    if any found, otherwise NULL.  All CLOBBERs are ignored.  */
2635 
2636 static bool
2637 bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use)
2638 {
2639   *ret = *use = NULL;
2640   rtx_insn *insn;
2641 
2642   if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2643     return false;
2644 
2645   FOR_BB_INSNS (bb, insn)
2646     if (NONDEBUG_INSN_P (insn))
2647       {
2648 	rtx pat = PATTERN (insn);
2649 
2650 	if (!*ret && ANY_RETURN_P (pat))
2651 	  *ret = insn;
2652 	else if (!*ret && !*use && GET_CODE (pat) == USE
2653 	    && REG_P (XEXP (pat, 0))
2654 	    && REG_FUNCTION_VALUE_P (XEXP (pat, 0)))
2655 	  *use = insn;
2656 	else if (GET_CODE (pat) != CLOBBER)
2657 	  return false;
2658       }
2659 
2660   return !!*ret;
2661 }
2662 
2663 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2664    instructions etc.  Return nonzero if changes were made.  */
2665 
2666 static bool
2667 try_optimize_cfg (int mode)
2668 {
2669   bool changed_overall = false;
2670   bool changed;
2671   int iterations = 0;
2672   basic_block bb, b, next;
2673 
2674   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2675     clear_bb_flags ();
2676 
2677   crossjumps_occurred = false;
2678 
2679   FOR_EACH_BB_FN (bb, cfun)
2680     update_forwarder_flag (bb);
2681 
2682   if (! targetm.cannot_modify_jumps_p ())
2683     {
2684       first_pass = true;
2685       /* Attempt to merge blocks as made possible by edge removal.  If
2686 	 a block has only one successor, and the successor has only
2687 	 one predecessor, they may be combined.  */
2688       do
2689 	{
2690 	  block_was_dirty = false;
2691 	  changed = false;
2692 	  iterations++;
2693 
2694 	  if (dump_file)
2695 	    fprintf (dump_file,
2696 		     "\n\ntry_optimize_cfg iteration %i\n\n",
2697 		     iterations);
2698 
2699 	  for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2700 	       != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2701 	    {
2702 	      basic_block c;
2703 	      edge s;
2704 	      bool changed_here = false;
2705 
2706 	      /* Delete trivially dead basic blocks.  This is either
2707 		 blocks with no predecessors, or empty blocks with no
2708 		 successors.  However if the empty block with no
2709 		 successors is the successor of the ENTRY_BLOCK, it is
2710 		 kept.  This ensures that the ENTRY_BLOCK will have a
2711 		 successor which is a precondition for many RTL
2712 		 passes.  Empty blocks may result from expanding
2713 		 __builtin_unreachable ().  */
2714 	      if (EDGE_COUNT (b->preds) == 0
2715 		  || (EDGE_COUNT (b->succs) == 0
2716 		      && trivially_empty_bb_p (b)
2717 		      && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2718 		      != b))
2719 		{
2720 		  c = b->prev_bb;
2721 		  if (EDGE_COUNT (b->preds) > 0)
2722 		    {
2723 		      edge e;
2724 		      edge_iterator ei;
2725 
2726 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
2727 			{
2728 			  rtx_insn *insn;
2729 			  for (insn = BB_FOOTER (b);
2730 			       insn; insn = NEXT_INSN (insn))
2731 			    if (BARRIER_P (insn))
2732 			      break;
2733 			  if (insn)
2734 			    FOR_EACH_EDGE (e, ei, b->preds)
2735 			      if ((e->flags & EDGE_FALLTHRU))
2736 				{
2737 				  if (BB_FOOTER (b)
2738 				      && BB_FOOTER (e->src) == NULL)
2739 				    {
2740 				      BB_FOOTER (e->src) = BB_FOOTER (b);
2741 				      BB_FOOTER (b) = NULL;
2742 				    }
2743 				  else
2744 				    emit_barrier_after_bb (e->src);
2745 				}
2746 			}
2747 		      else
2748 			{
2749 			  rtx_insn *last = get_last_bb_insn (b);
2750 			  if (last && BARRIER_P (last))
2751 			    FOR_EACH_EDGE (e, ei, b->preds)
2752 			      if ((e->flags & EDGE_FALLTHRU))
2753 				emit_barrier_after (BB_END (e->src));
2754 			}
2755 		    }
2756 		  delete_basic_block (b);
2757 		  changed = true;
2758 		  /* Avoid trying to remove the exit block.  */
2759 		  b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2760 		  continue;
2761 		}
2762 
2763 	      /* Remove code labels no longer used.  */
2764 	      if (single_pred_p (b)
2765 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2766 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2767 		  && LABEL_P (BB_HEAD (b))
2768 		  && !LABEL_PRESERVE_P (BB_HEAD (b))
2769 		  /* If the previous block ends with a branch to this
2770 		     block, we can't delete the label.  Normally this
2771 		     is a condjump that is yet to be simplified, but
2772 		     if CASE_DROPS_THRU, this can be a tablejump with
2773 		     some element going to the same place as the
2774 		     default (fallthru).  */
2775 		  && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2776 		      || !JUMP_P (BB_END (single_pred (b)))
2777 		      || ! label_is_jump_target_p (BB_HEAD (b),
2778 						   BB_END (single_pred (b)))))
2779 		{
2780 		  delete_insn (BB_HEAD (b));
2781 		  if (dump_file)
2782 		    fprintf (dump_file, "Deleted label in block %i.\n",
2783 			     b->index);
2784 		}
2785 
2786 	      /* If we fall through an empty block, we can remove it.  */
2787 	      if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2788 		  && single_pred_p (b)
2789 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2790 		  && !LABEL_P (BB_HEAD (b))
2791 		  && FORWARDER_BLOCK_P (b)
2792 		  /* Note that forwarder_block_p true ensures that
2793 		     there is a successor for this block.  */
2794 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2795 		  && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
2796 		{
2797 		  if (dump_file)
2798 		    fprintf (dump_file,
2799 			     "Deleting fallthru block %i.\n",
2800 			     b->index);
2801 
2802 		  c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2803 		       ? b->next_bb : b->prev_bb);
2804 		  redirect_edge_succ_nodup (single_pred_edge (b),
2805 					    single_succ (b));
2806 		  delete_basic_block (b);
2807 		  changed = true;
2808 		  b = c;
2809 		  continue;
2810 		}
2811 
2812 	      /* Merge B with its single successor, if any.  */
2813 	      if (single_succ_p (b)
2814 		  && (s = single_succ_edge (b))
2815 		  && !(s->flags & EDGE_COMPLEX)
2816 		  && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2817 		  && single_pred_p (c)
2818 		  && b != c)
2819 		{
2820 		  /* When not in cfg_layout mode use code aware of reordering
2821 		     INSN.  This code possibly creates new basic blocks so it
2822 		     does not fit merge_blocks interface and is kept here in
2823 		     hope that it will become useless once more of compiler
2824 		     is transformed to use cfg_layout mode.  */
2825 
2826 		  if ((mode & CLEANUP_CFGLAYOUT)
2827 		      && can_merge_blocks_p (b, c))
2828 		    {
2829 		      merge_blocks (b, c);
2830 		      update_forwarder_flag (b);
2831 		      changed_here = true;
2832 		    }
2833 		  else if (!(mode & CLEANUP_CFGLAYOUT)
2834 			   /* If the jump insn has side effects,
2835 			      we can't kill the edge.  */
2836 			   && (!JUMP_P (BB_END (b))
2837 			       || (reload_completed
2838 				   ? simplejump_p (BB_END (b))
2839 				   : (onlyjump_p (BB_END (b))
2840 				      && !tablejump_p (BB_END (b),
2841 						       NULL, NULL))))
2842 			   && (next = merge_blocks_move (s, b, c, mode)))
2843 		      {
2844 			b = next;
2845 			changed_here = true;
2846 		      }
2847 		}
2848 
2849 	      /* Try to change a branch to a return to just that return.  */
2850 	      rtx_insn *ret, *use;
2851 	      if (single_succ_p (b)
2852 		  && onlyjump_p (BB_END (b))
2853 		  && bb_is_just_return (single_succ (b), &ret, &use))
2854 		{
2855 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2856 				     PATTERN (ret), 0))
2857 		    {
2858 		      if (use)
2859 			emit_insn_before (copy_insn (PATTERN (use)),
2860 					  BB_END (b));
2861 		      if (dump_file)
2862 			fprintf (dump_file, "Changed jump %d->%d to return.\n",
2863 				 b->index, single_succ (b)->index);
2864 		      redirect_edge_succ (single_succ_edge (b),
2865 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
2866 		      single_succ_edge (b)->flags &= ~EDGE_CROSSING;
2867 		      changed_here = true;
2868 		    }
2869 		}
2870 
2871 	      /* Try to change a conditional branch to a return to the
2872 		 respective conditional return.  */
2873 	      if (EDGE_COUNT (b->succs) == 2
2874 		  && any_condjump_p (BB_END (b))
2875 		  && bb_is_just_return (BRANCH_EDGE (b)->dest, &ret, &use))
2876 		{
2877 		  if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2878 				     PATTERN (ret), 0))
2879 		    {
2880 		      if (use)
2881 			emit_insn_before (copy_insn (PATTERN (use)),
2882 					  BB_END (b));
2883 		      if (dump_file)
2884 			fprintf (dump_file, "Changed conditional jump %d->%d "
2885 				 "to conditional return.\n",
2886 				 b->index, BRANCH_EDGE (b)->dest->index);
2887 		      redirect_edge_succ (BRANCH_EDGE (b),
2888 					  EXIT_BLOCK_PTR_FOR_FN (cfun));
2889 		      BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2890 		      changed_here = true;
2891 		    }
2892 		}
2893 
2894 	      /* Try to flip a conditional branch that falls through to
2895 		 a return so that it becomes a conditional return and a
2896 		 new jump to the original branch target.  */
2897 	      if (EDGE_COUNT (b->succs) == 2
2898 		  && BRANCH_EDGE (b)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2899 		  && any_condjump_p (BB_END (b))
2900 		  && bb_is_just_return (FALLTHRU_EDGE (b)->dest, &ret, &use))
2901 		{
2902 		  if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2903 				   JUMP_LABEL (BB_END (b)), 0))
2904 		    {
2905 		      basic_block new_ft = BRANCH_EDGE (b)->dest;
2906 		      if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2907 					 PATTERN (ret), 0))
2908 			{
2909 			  if (use)
2910 			    emit_insn_before (copy_insn (PATTERN (use)),
2911 					      BB_END (b));
2912 			  if (dump_file)
2913 			    fprintf (dump_file, "Changed conditional jump "
2914 				     "%d->%d to conditional return, adding "
2915 				     "fall-through jump.\n",
2916 				     b->index, BRANCH_EDGE (b)->dest->index);
2917 			  redirect_edge_succ (BRANCH_EDGE (b),
2918 					      EXIT_BLOCK_PTR_FOR_FN (cfun));
2919 			  BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2920 			  std::swap (BRANCH_EDGE (b)->probability,
2921 				     FALLTHRU_EDGE (b)->probability);
2922 			  update_br_prob_note (b);
2923 			  basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b));
2924 			  notice_new_block (jb);
2925 			  if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)),
2926 					      block_label (new_ft), 0))
2927 			    gcc_unreachable ();
2928 			  redirect_edge_succ (single_succ_edge (jb), new_ft);
2929 			  changed_here = true;
2930 			}
2931 		      else
2932 			{
2933 			  /* Invert the jump back to what it was.  This should
2934 			     never fail.  */
2935 			  if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2936 					    JUMP_LABEL (BB_END (b)), 0))
2937 			    gcc_unreachable ();
2938 			}
2939 		    }
2940 		}
2941 
2942 	      /* Simplify branch over branch.  */
2943 	      if ((mode & CLEANUP_EXPENSIVE)
2944 		   && !(mode & CLEANUP_CFGLAYOUT)
2945 		   && try_simplify_condjump (b))
2946 		changed_here = true;
2947 
2948 	      /* If B has a single outgoing edge, but uses a
2949 		 non-trivial jump instruction without side-effects, we
2950 		 can either delete the jump entirely, or replace it
2951 		 with a simple unconditional jump.  */
2952 	      if (single_succ_p (b)
2953 		  && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2954 		  && onlyjump_p (BB_END (b))
2955 		  && !CROSSING_JUMP_P (BB_END (b))
2956 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
2957 						     single_succ (b),
2958 						     (mode & CLEANUP_CFGLAYOUT) != 0))
2959 		{
2960 		  update_forwarder_flag (b);
2961 		  changed_here = true;
2962 		}
2963 
2964 	      /* Simplify branch to branch.  */
2965 	      if (try_forward_edges (mode, b))
2966 		{
2967 		  update_forwarder_flag (b);
2968 		  changed_here = true;
2969 		}
2970 
2971 	      /* Look for shared code between blocks.  */
2972 	      if ((mode & CLEANUP_CROSSJUMP)
2973 		  && try_crossjump_bb (mode, b))
2974 		changed_here = true;
2975 
2976 	      if ((mode & CLEANUP_CROSSJUMP)
2977 		  /* This can lengthen register lifetimes.  Do it only after
2978 		     reload.  */
2979 		  && reload_completed
2980 		  && try_head_merge_bb (b))
2981 		changed_here = true;
2982 
2983 	      /* Don't get confused by the index shift caused by
2984 		 deleting blocks.  */
2985 	      if (!changed_here)
2986 		b = b->next_bb;
2987 	      else
2988 		changed = true;
2989 	    }
2990 
2991 	  if ((mode & CLEANUP_CROSSJUMP)
2992 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2993 	    changed = true;
2994 
2995 	  if (block_was_dirty)
2996 	    {
2997 	      /* This should only be set by head-merging.  */
2998 	      gcc_assert (mode & CLEANUP_CROSSJUMP);
2999 	      df_analyze ();
3000 	    }
3001 
3002 	  if (changed)
3003             {
3004               /* Edge forwarding in particular can cause hot blocks previously
3005                  reached by both hot and cold blocks to become dominated only
3006                  by cold blocks. This will cause the verification below to fail,
3007                  and lead to now cold code in the hot section. This is not easy
3008                  to detect and fix during edge forwarding, and in some cases
3009                  is only visible after newly unreachable blocks are deleted,
3010                  which will be done in fixup_partitions.  */
3011 	      if ((mode & CLEANUP_NO_PARTITIONING) == 0)
3012 		{
3013 		  fixup_partitions ();
3014 	          checking_verify_flow_info ();
3015 		}
3016             }
3017 
3018 	  changed_overall |= changed;
3019 	  first_pass = false;
3020 	}
3021       while (changed);
3022     }
3023 
3024   FOR_ALL_BB_FN (b, cfun)
3025     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
3026 
3027   return changed_overall;
3028 }
3029 
3030 /* Delete all unreachable basic blocks.  */
3031 
3032 bool
3033 delete_unreachable_blocks (void)
3034 {
3035   bool changed = false;
3036   basic_block b, prev_bb;
3037 
3038   find_unreachable_blocks ();
3039 
3040   /* When we're in GIMPLE mode and there may be debug bind insns, we
3041      should delete blocks in reverse dominator order, so as to get a
3042      chance to substitute all released DEFs into debug bind stmts.  If
3043      we don't have dominators information, walking blocks backward
3044      gets us a better chance of retaining most debug information than
3045      otherwise.  */
3046   if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE
3047       && dom_info_available_p (CDI_DOMINATORS))
3048     {
3049       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3050 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3051 	{
3052 	  prev_bb = b->prev_bb;
3053 
3054 	  if (!(b->flags & BB_REACHABLE))
3055 	    {
3056 	      /* Speed up the removal of blocks that don't dominate
3057 		 others.  Walking backwards, this should be the common
3058 		 case.  */
3059 	      if (!first_dom_son (CDI_DOMINATORS, b))
3060 		delete_basic_block (b);
3061 	      else
3062 		{
3063 		  vec<basic_block> h
3064 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
3065 
3066 		  while (h.length ())
3067 		    {
3068 		      b = h.pop ();
3069 
3070 		      prev_bb = b->prev_bb;
3071 
3072 		      gcc_assert (!(b->flags & BB_REACHABLE));
3073 
3074 		      delete_basic_block (b);
3075 		    }
3076 
3077 		  h.release ();
3078 		}
3079 
3080 	      changed = true;
3081 	    }
3082 	}
3083     }
3084   else
3085     {
3086       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3087 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3088 	{
3089 	  prev_bb = b->prev_bb;
3090 
3091 	  if (!(b->flags & BB_REACHABLE))
3092 	    {
3093 	      delete_basic_block (b);
3094 	      changed = true;
3095 	    }
3096 	}
3097     }
3098 
3099   if (changed)
3100     tidy_fallthru_edges ();
3101   return changed;
3102 }
3103 
3104 /* Delete any jump tables never referenced.  We can't delete them at the
3105    time of removing tablejump insn as they are referenced by the preceding
3106    insns computing the destination, so we delay deleting and garbagecollect
3107    them once life information is computed.  */
3108 void
3109 delete_dead_jumptables (void)
3110 {
3111   basic_block bb;
3112 
3113   /* A dead jump table does not belong to any basic block.  Scan insns
3114      between two adjacent basic blocks.  */
3115   FOR_EACH_BB_FN (bb, cfun)
3116     {
3117       rtx_insn *insn, *next;
3118 
3119       for (insn = NEXT_INSN (BB_END (bb));
3120 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
3121 	   insn = next)
3122 	{
3123 	  next = NEXT_INSN (insn);
3124 	  if (LABEL_P (insn)
3125 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
3126 	      && JUMP_TABLE_DATA_P (next))
3127 	    {
3128 	      rtx_insn *label = insn, *jump = next;
3129 
3130 	      if (dump_file)
3131 		fprintf (dump_file, "Dead jumptable %i removed\n",
3132 			 INSN_UID (insn));
3133 
3134 	      next = NEXT_INSN (next);
3135 	      delete_insn (jump);
3136 	      delete_insn (label);
3137 	    }
3138 	}
3139     }
3140 }
3141 
3142 
3143 /* Tidy the CFG by deleting unreachable code and whatnot.  */
3144 
3145 bool
3146 cleanup_cfg (int mode)
3147 {
3148   bool changed = false;
3149 
3150   /* Set the cfglayout mode flag here.  We could update all the callers
3151      but that is just inconvenient, especially given that we eventually
3152      want to have cfglayout mode as the default.  */
3153   if (current_ir_type () == IR_RTL_CFGLAYOUT)
3154     mode |= CLEANUP_CFGLAYOUT;
3155 
3156   timevar_push (TV_CLEANUP_CFG);
3157   if (delete_unreachable_blocks ())
3158     {
3159       changed = true;
3160       /* We've possibly created trivially dead code.  Cleanup it right
3161 	 now to introduce more opportunities for try_optimize_cfg.  */
3162       if (!(mode & (CLEANUP_NO_INSN_DEL))
3163 	  && !reload_completed)
3164 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
3165     }
3166 
3167   compact_blocks ();
3168 
3169   /* To tail-merge blocks ending in the same noreturn function (e.g.
3170      a call to abort) we have to insert fake edges to exit.  Do this
3171      here once.  The fake edges do not interfere with any other CFG
3172      cleanups.  */
3173   if (mode & CLEANUP_CROSSJUMP)
3174     add_noreturn_fake_exit_edges ();
3175 
3176   if (!dbg_cnt (cfg_cleanup))
3177     return changed;
3178 
3179   while (try_optimize_cfg (mode))
3180     {
3181       delete_unreachable_blocks (), changed = true;
3182       if (!(mode & CLEANUP_NO_INSN_DEL))
3183 	{
3184 	  /* Try to remove some trivially dead insns when doing an expensive
3185 	     cleanup.  But delete_trivially_dead_insns doesn't work after
3186 	     reload (it only handles pseudos) and run_fast_dce is too costly
3187 	     to run in every iteration.
3188 
3189 	     For effective cross jumping, we really want to run a fast DCE to
3190 	     clean up any dead conditions, or they get in the way of performing
3191 	     useful tail merges.
3192 
3193 	     Other transformations in cleanup_cfg are not so sensitive to dead
3194 	     code, so delete_trivially_dead_insns or even doing nothing at all
3195 	     is good enough.  */
3196 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3197 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3198 	    break;
3199 	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occurred)
3200 	    run_fast_dce ();
3201 	}
3202       else
3203 	break;
3204     }
3205 
3206   if (mode & CLEANUP_CROSSJUMP)
3207     remove_fake_exit_edges ();
3208 
3209   /* Don't call delete_dead_jumptables in cfglayout mode, because
3210      that function assumes that jump tables are in the insns stream.
3211      But we also don't _have_ to delete dead jumptables in cfglayout
3212      mode because we shouldn't even be looking at things that are
3213      not in a basic block.  Dead jumptables are cleaned up when
3214      going out of cfglayout mode.  */
3215   if (!(mode & CLEANUP_CFGLAYOUT))
3216     delete_dead_jumptables ();
3217 
3218   /* ???  We probably do this way too often.  */
3219   if (current_loops
3220       && (changed
3221 	  || (mode & CLEANUP_CFG_CHANGED)))
3222     {
3223       timevar_push (TV_REPAIR_LOOPS);
3224       /* The above doesn't preserve dominance info if available.  */
3225       gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3226       calculate_dominance_info (CDI_DOMINATORS);
3227       fix_loop_structure (NULL);
3228       free_dominance_info (CDI_DOMINATORS);
3229       timevar_pop (TV_REPAIR_LOOPS);
3230     }
3231 
3232   timevar_pop (TV_CLEANUP_CFG);
3233 
3234   return changed;
3235 }
3236 
3237 namespace {
3238 
3239 const pass_data pass_data_jump =
3240 {
3241   RTL_PASS, /* type */
3242   "jump", /* name */
3243   OPTGROUP_NONE, /* optinfo_flags */
3244   TV_JUMP, /* tv_id */
3245   0, /* properties_required */
3246   0, /* properties_provided */
3247   0, /* properties_destroyed */
3248   0, /* todo_flags_start */
3249   0, /* todo_flags_finish */
3250 };
3251 
3252 class pass_jump : public rtl_opt_pass
3253 {
3254 public:
3255   pass_jump (gcc::context *ctxt)
3256     : rtl_opt_pass (pass_data_jump, ctxt)
3257   {}
3258 
3259   /* opt_pass methods: */
3260   virtual unsigned int execute (function *);
3261 
3262 }; // class pass_jump
3263 
3264 unsigned int
3265 pass_jump::execute (function *)
3266 {
3267   delete_trivially_dead_insns (get_insns (), max_reg_num ());
3268   if (dump_file)
3269     dump_flow_info (dump_file, dump_flags);
3270   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3271 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3272   return 0;
3273 }
3274 
3275 } // anon namespace
3276 
3277 rtl_opt_pass *
3278 make_pass_jump (gcc::context *ctxt)
3279 {
3280   return new pass_jump (ctxt);
3281 }
3282 
3283 namespace {
3284 
3285 const pass_data pass_data_jump_after_combine =
3286 {
3287   RTL_PASS, /* type */
3288   "jump_after_combine", /* name */
3289   OPTGROUP_NONE, /* optinfo_flags */
3290   TV_JUMP, /* tv_id */
3291   0, /* properties_required */
3292   0, /* properties_provided */
3293   0, /* properties_destroyed */
3294   0, /* todo_flags_start */
3295   0, /* todo_flags_finish */
3296 };
3297 
3298 class pass_jump_after_combine : public rtl_opt_pass
3299 {
3300 public:
3301   pass_jump_after_combine (gcc::context *ctxt)
3302     : rtl_opt_pass (pass_data_jump_after_combine, ctxt)
3303   {}
3304 
3305   /* opt_pass methods: */
3306   virtual bool gate (function *) { return flag_thread_jumps; }
3307   virtual unsigned int execute (function *);
3308 
3309 }; // class pass_jump_after_combine
3310 
3311 unsigned int
3312 pass_jump_after_combine::execute (function *)
3313 {
3314   /* Jump threading does not keep dominators up-to-date.  */
3315   free_dominance_info (CDI_DOMINATORS);
3316   cleanup_cfg (CLEANUP_THREADING);
3317   return 0;
3318 }
3319 
3320 } // anon namespace
3321 
3322 rtl_opt_pass *
3323 make_pass_jump_after_combine (gcc::context *ctxt)
3324 {
3325   return new pass_jump_after_combine (ctxt);
3326 }
3327 
3328 namespace {
3329 
3330 const pass_data pass_data_jump2 =
3331 {
3332   RTL_PASS, /* type */
3333   "jump2", /* name */
3334   OPTGROUP_NONE, /* optinfo_flags */
3335   TV_JUMP, /* tv_id */
3336   0, /* properties_required */
3337   0, /* properties_provided */
3338   0, /* properties_destroyed */
3339   0, /* todo_flags_start */
3340   0, /* todo_flags_finish */
3341 };
3342 
3343 class pass_jump2 : public rtl_opt_pass
3344 {
3345 public:
3346   pass_jump2 (gcc::context *ctxt)
3347     : rtl_opt_pass (pass_data_jump2, ctxt)
3348   {}
3349 
3350   /* opt_pass methods: */
3351   virtual unsigned int execute (function *)
3352     {
3353       cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3354       return 0;
3355     }
3356 
3357 }; // class pass_jump2
3358 
3359 } // anon namespace
3360 
3361 rtl_opt_pass *
3362 make_pass_jump2 (gcc::context *ctxt)
3363 {
3364   return new pass_jump2 (ctxt);
3365 }
3366