xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/cfgcleanup.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Control flow optimization code for GNU compiler.
2    Copyright (C) 1987-2015 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This file contains optimizer of the control flow.  The main entry point is
21    cleanup_cfg.  Following optimizations are performed:
22 
23    - Unreachable blocks removal
24    - Edge forwarding (edge to the forwarder block is forwarded to its
25      successor.  Simplification of the branch instruction is performed by
26      underlying infrastructure so branch can be converted to simplejump or
27      eliminated).
28    - Cross jumping (tail merging)
29    - Conditional jump-around-simplejump simplification
30    - Basic block merging.  */
31 
32 #include "config.h"
33 #include "system.h"
34 #include "coretypes.h"
35 #include "tm.h"
36 #include "rtl.h"
37 #include "hash-set.h"
38 #include "machmode.h"
39 #include "vec.h"
40 #include "double-int.h"
41 #include "input.h"
42 #include "alias.h"
43 #include "symtab.h"
44 #include "wide-int.h"
45 #include "inchash.h"
46 #include "tree.h"
47 #include "hard-reg-set.h"
48 #include "regs.h"
49 #include "insn-config.h"
50 #include "flags.h"
51 #include "recog.h"
52 #include "diagnostic-core.h"
53 #include "cselib.h"
54 #include "params.h"
55 #include "tm_p.h"
56 #include "target.h"
57 #include "hashtab.h"
58 #include "function.h" /* For inline functions in emit-rtl.h they need crtl.  */
59 #include "emit-rtl.h"
60 #include "tree-pass.h"
61 #include "cfgloop.h"
62 #include "function.h"
63 #include "statistics.h"
64 #include "real.h"
65 #include "fixed-value.h"
66 #include "expmed.h"
67 #include "dojump.h"
68 #include "explow.h"
69 #include "calls.h"
70 #include "varasm.h"
71 #include "stmt.h"
72 #include "expr.h"
73 #include "dominance.h"
74 #include "cfg.h"
75 #include "cfgrtl.h"
76 #include "cfganal.h"
77 #include "cfgbuild.h"
78 #include "cfgcleanup.h"
79 #include "predict.h"
80 #include "basic-block.h"
81 #include "df.h"
82 #include "dce.h"
83 #include "dbgcnt.h"
84 #include "rtl-iter.h"
85 
86 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
87 
88 /* Set to true when we are running first pass of try_optimize_cfg loop.  */
89 static bool first_pass;
90 
91 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg.  */
92 static bool crossjumps_occured;
93 
94 /* Set to true if we couldn't run an optimization due to stale liveness
95    information; we should run df_analyze to enable more opportunities.  */
96 static bool block_was_dirty;
97 
98 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
99 static bool try_crossjump_bb (int, basic_block);
100 static bool outgoing_edges_match (int, basic_block, basic_block);
101 static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
102 
103 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
104 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
105 static bool try_optimize_cfg (int);
106 static bool try_simplify_condjump (basic_block);
107 static bool try_forward_edges (int, basic_block);
108 static edge thread_jump (edge, basic_block);
109 static bool mark_effect (rtx, bitmap);
110 static void notice_new_block (basic_block);
111 static void update_forwarder_flag (basic_block);
112 static void merge_memattrs (rtx, rtx);
113 
114 /* Set flags for newly created block.  */
115 
116 static void
117 notice_new_block (basic_block bb)
118 {
119   if (!bb)
120     return;
121 
122   if (forwarder_block_p (bb))
123     bb->flags |= BB_FORWARDER_BLOCK;
124 }
125 
126 /* Recompute forwarder flag after block has been modified.  */
127 
128 static void
129 update_forwarder_flag (basic_block bb)
130 {
131   if (forwarder_block_p (bb))
132     bb->flags |= BB_FORWARDER_BLOCK;
133   else
134     bb->flags &= ~BB_FORWARDER_BLOCK;
135 }
136 
137 /* Simplify a conditional jump around an unconditional jump.
138    Return true if something changed.  */
139 
140 static bool
141 try_simplify_condjump (basic_block cbranch_block)
142 {
143   basic_block jump_block, jump_dest_block, cbranch_dest_block;
144   edge cbranch_jump_edge, cbranch_fallthru_edge;
145   rtx_insn *cbranch_insn;
146 
147   /* Verify that there are exactly two successors.  */
148   if (EDGE_COUNT (cbranch_block->succs) != 2)
149     return false;
150 
151   /* Verify that we've got a normal conditional branch at the end
152      of the block.  */
153   cbranch_insn = BB_END (cbranch_block);
154   if (!any_condjump_p (cbranch_insn))
155     return false;
156 
157   cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
158   cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
159 
160   /* The next block must not have multiple predecessors, must not
161      be the last block in the function, and must contain just the
162      unconditional jump.  */
163   jump_block = cbranch_fallthru_edge->dest;
164   if (!single_pred_p (jump_block)
165       || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
166       || !FORWARDER_BLOCK_P (jump_block))
167     return false;
168   jump_dest_block = single_succ (jump_block);
169 
170   /* If we are partitioning hot/cold basic blocks, we don't want to
171      mess up unconditional or indirect jumps that cross between hot
172      and cold sections.
173 
174      Basic block partitioning may result in some jumps that appear to
175      be optimizable (or blocks that appear to be mergeable), but which really
176      must be left untouched (they are required to make it safely across
177      partition boundaries).  See the comments at the top of
178      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
179 
180   if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
181       || (cbranch_jump_edge->flags & EDGE_CROSSING))
182     return false;
183 
184   /* The conditional branch must target the block after the
185      unconditional branch.  */
186   cbranch_dest_block = cbranch_jump_edge->dest;
187 
188   if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
189       || !can_fallthru (jump_block, cbranch_dest_block))
190     return false;
191 
192   /* Invert the conditional branch.  */
193   if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
194     return false;
195 
196   if (dump_file)
197     fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
198 	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
199 
200   /* Success.  Update the CFG to match.  Note that after this point
201      the edge variable names appear backwards; the redirection is done
202      this way to preserve edge profile data.  */
203   cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
204 						cbranch_dest_block);
205   cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
206 						    jump_dest_block);
207   cbranch_jump_edge->flags |= EDGE_FALLTHRU;
208   cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
209   update_br_prob_note (cbranch_block);
210 
211   /* Delete the block with the unconditional jump, and clean up the mess.  */
212   delete_basic_block (jump_block);
213   tidy_fallthru_edge (cbranch_jump_edge);
214   update_forwarder_flag (cbranch_block);
215 
216   return true;
217 }
218 
219 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
220    on register.  Used by jump threading.  */
221 
222 static bool
223 mark_effect (rtx exp, regset nonequal)
224 {
225   int regno;
226   rtx dest;
227   switch (GET_CODE (exp))
228     {
229       /* In case we do clobber the register, mark it as equal, as we know the
230 	 value is dead so it don't have to match.  */
231     case CLOBBER:
232       if (REG_P (XEXP (exp, 0)))
233 	{
234 	  dest = XEXP (exp, 0);
235 	  regno = REGNO (dest);
236 	  if (HARD_REGISTER_NUM_P (regno))
237 	    bitmap_clear_range (nonequal, regno,
238 				hard_regno_nregs[regno][GET_MODE (dest)]);
239 	  else
240 	    bitmap_clear_bit (nonequal, regno);
241 	}
242       return false;
243 
244     case SET:
245       if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
246 	return false;
247       dest = SET_DEST (exp);
248       if (dest == pc_rtx)
249 	return false;
250       if (!REG_P (dest))
251 	return true;
252       regno = REGNO (dest);
253       if (HARD_REGISTER_NUM_P (regno))
254 	bitmap_set_range (nonequal, regno,
255 			  hard_regno_nregs[regno][GET_MODE (dest)]);
256       else
257 	bitmap_set_bit (nonequal, regno);
258       return false;
259 
260     default:
261       return false;
262     }
263 }
264 
265 /* Return true if X contains a register in NONEQUAL.  */
266 static bool
267 mentions_nonequal_regs (const_rtx x, regset nonequal)
268 {
269   subrtx_iterator::array_type array;
270   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
271     {
272       const_rtx x = *iter;
273       if (REG_P (x))
274 	{
275 	  unsigned int regno = REGNO (x);
276 	  if (REGNO_REG_SET_P (nonequal, regno))
277 	    return true;
278 	  if (regno < FIRST_PSEUDO_REGISTER)
279 	    {
280 	      int n = hard_regno_nregs[regno][GET_MODE (x)];
281 	      while (--n > 0)
282 		if (REGNO_REG_SET_P (nonequal, regno + n))
283 		  return true;
284 	    }
285 	}
286     }
287   return false;
288 }
289 
290 /* Attempt to prove that the basic block B will have no side effects and
291    always continues in the same edge if reached via E.  Return the edge
292    if exist, NULL otherwise.  */
293 
294 static edge
295 thread_jump (edge e, basic_block b)
296 {
297   rtx set1, set2, cond1, cond2;
298   rtx_insn *insn;
299   enum rtx_code code1, code2, reversed_code2;
300   bool reverse1 = false;
301   unsigned i;
302   regset nonequal;
303   bool failed = false;
304   reg_set_iterator rsi;
305 
306   if (b->flags & BB_NONTHREADABLE_BLOCK)
307     return NULL;
308 
309   /* At the moment, we do handle only conditional jumps, but later we may
310      want to extend this code to tablejumps and others.  */
311   if (EDGE_COUNT (e->src->succs) != 2)
312     return NULL;
313   if (EDGE_COUNT (b->succs) != 2)
314     {
315       b->flags |= BB_NONTHREADABLE_BLOCK;
316       return NULL;
317     }
318 
319   /* Second branch must end with onlyjump, as we will eliminate the jump.  */
320   if (!any_condjump_p (BB_END (e->src)))
321     return NULL;
322 
323   if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
324     {
325       b->flags |= BB_NONTHREADABLE_BLOCK;
326       return NULL;
327     }
328 
329   set1 = pc_set (BB_END (e->src));
330   set2 = pc_set (BB_END (b));
331   if (((e->flags & EDGE_FALLTHRU) != 0)
332       != (XEXP (SET_SRC (set1), 1) == pc_rtx))
333     reverse1 = true;
334 
335   cond1 = XEXP (SET_SRC (set1), 0);
336   cond2 = XEXP (SET_SRC (set2), 0);
337   if (reverse1)
338     code1 = reversed_comparison_code (cond1, BB_END (e->src));
339   else
340     code1 = GET_CODE (cond1);
341 
342   code2 = GET_CODE (cond2);
343   reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
344 
345   if (!comparison_dominates_p (code1, code2)
346       && !comparison_dominates_p (code1, reversed_code2))
347     return NULL;
348 
349   /* Ensure that the comparison operators are equivalent.
350      ??? This is far too pessimistic.  We should allow swapped operands,
351      different CCmodes, or for example comparisons for interval, that
352      dominate even when operands are not equivalent.  */
353   if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
354       || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
355     return NULL;
356 
357   /* Short circuit cases where block B contains some side effects, as we can't
358      safely bypass it.  */
359   for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
360        insn = NEXT_INSN (insn))
361     if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
362       {
363 	b->flags |= BB_NONTHREADABLE_BLOCK;
364 	return NULL;
365       }
366 
367   cselib_init (0);
368 
369   /* First process all values computed in the source basic block.  */
370   for (insn = NEXT_INSN (BB_HEAD (e->src));
371        insn != NEXT_INSN (BB_END (e->src));
372        insn = NEXT_INSN (insn))
373     if (INSN_P (insn))
374       cselib_process_insn (insn);
375 
376   nonequal = BITMAP_ALLOC (NULL);
377   CLEAR_REG_SET (nonequal);
378 
379   /* Now assume that we've continued by the edge E to B and continue
380      processing as if it were same basic block.
381      Our goal is to prove that whole block is an NOOP.  */
382 
383   for (insn = NEXT_INSN (BB_HEAD (b));
384        insn != NEXT_INSN (BB_END (b)) && !failed;
385        insn = NEXT_INSN (insn))
386     {
387       if (INSN_P (insn))
388 	{
389 	  rtx pat = PATTERN (insn);
390 
391 	  if (GET_CODE (pat) == PARALLEL)
392 	    {
393 	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
394 		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
395 	    }
396 	  else
397 	    failed |= mark_effect (pat, nonequal);
398 	}
399 
400       cselib_process_insn (insn);
401     }
402 
403   /* Later we should clear nonequal of dead registers.  So far we don't
404      have life information in cfg_cleanup.  */
405   if (failed)
406     {
407       b->flags |= BB_NONTHREADABLE_BLOCK;
408       goto failed_exit;
409     }
410 
411   /* cond2 must not mention any register that is not equal to the
412      former block.  */
413   if (mentions_nonequal_regs (cond2, nonequal))
414     goto failed_exit;
415 
416   EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
417     goto failed_exit;
418 
419   BITMAP_FREE (nonequal);
420   cselib_finish ();
421   if ((comparison_dominates_p (code1, code2) != 0)
422       != (XEXP (SET_SRC (set2), 1) == pc_rtx))
423     return BRANCH_EDGE (b);
424   else
425     return FALLTHRU_EDGE (b);
426 
427 failed_exit:
428   BITMAP_FREE (nonequal);
429   cselib_finish ();
430   return NULL;
431 }
432 
433 /* Attempt to forward edges leaving basic block B.
434    Return true if successful.  */
435 
436 static bool
437 try_forward_edges (int mode, basic_block b)
438 {
439   bool changed = false;
440   edge_iterator ei;
441   edge e, *threaded_edges = NULL;
442 
443   /* If we are partitioning hot/cold basic blocks, we don't want to
444      mess up unconditional or indirect jumps that cross between hot
445      and cold sections.
446 
447      Basic block partitioning may result in some jumps that appear to
448      be optimizable (or blocks that appear to be mergeable), but which really
449      must be left untouched (they are required to make it safely across
450      partition boundaries).  See the comments at the top of
451      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
452 
453   if (JUMP_P (BB_END (b)) && CROSSING_JUMP_P (BB_END (b)))
454     return false;
455 
456   for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
457     {
458       basic_block target, first;
459       location_t goto_locus;
460       int counter;
461       bool threaded = false;
462       int nthreaded_edges = 0;
463       bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
464 
465       /* Skip complex edges because we don't know how to update them.
466 
467 	 Still handle fallthru edges, as we can succeed to forward fallthru
468 	 edge to the same place as the branch edge of conditional branch
469 	 and turn conditional branch to an unconditional branch.  */
470       if (e->flags & EDGE_COMPLEX)
471 	{
472 	  ei_next (&ei);
473 	  continue;
474 	}
475 
476       target = first = e->dest;
477       counter = NUM_FIXED_BLOCKS;
478       goto_locus = e->goto_locus;
479 
480       /* If we are partitioning hot/cold basic_blocks, we don't want to mess
481 	 up jumps that cross between hot/cold sections.
482 
483 	 Basic block partitioning may result in some jumps that appear
484 	 to be optimizable (or blocks that appear to be mergeable), but which
485 	 really must be left untouched (they are required to make it safely
486 	 across partition boundaries).  See the comments at the top of
487 	 bb-reorder.c:partition_hot_cold_basic_blocks for complete
488 	 details.  */
489 
490       if (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
491 	  && JUMP_P (BB_END (first))
492 	  && CROSSING_JUMP_P (BB_END (first)))
493 	return changed;
494 
495       while (counter < n_basic_blocks_for_fn (cfun))
496 	{
497 	  basic_block new_target = NULL;
498 	  bool new_target_threaded = false;
499 	  may_thread |= (target->flags & BB_MODIFIED) != 0;
500 
501 	  if (FORWARDER_BLOCK_P (target)
502 	      && !(single_succ_edge (target)->flags & EDGE_CROSSING)
503 	      && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
504 	    {
505 	      /* Bypass trivial infinite loops.  */
506 	      new_target = single_succ (target);
507 	      if (target == new_target)
508 		counter = n_basic_blocks_for_fn (cfun);
509 	      else if (!optimize)
510 		{
511 		  /* When not optimizing, ensure that edges or forwarder
512 		     blocks with different locus are not optimized out.  */
513 		  location_t new_locus = single_succ_edge (target)->goto_locus;
514 		  location_t locus = goto_locus;
515 
516 		  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
517 		      && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
518 		      && new_locus != locus)
519 		    new_target = NULL;
520 		  else
521 		    {
522 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
523 			locus = new_locus;
524 
525 		      rtx_insn *last = BB_END (target);
526 		      if (DEBUG_INSN_P (last))
527 			last = prev_nondebug_insn (last);
528 		      if (last && INSN_P (last))
529 			new_locus = INSN_LOCATION (last);
530 		      else
531 			new_locus = UNKNOWN_LOCATION;
532 
533 		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
534 			  && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
535 			  && new_locus != locus)
536 			new_target = NULL;
537 		      else
538 			{
539 			  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
540 			    locus = new_locus;
541 
542 			  goto_locus = locus;
543 			}
544 		    }
545 		}
546 	    }
547 
548 	  /* Allow to thread only over one edge at time to simplify updating
549 	     of probabilities.  */
550 	  else if ((mode & CLEANUP_THREADING) && may_thread)
551 	    {
552 	      edge t = thread_jump (e, target);
553 	      if (t)
554 		{
555 		  if (!threaded_edges)
556 		    threaded_edges = XNEWVEC (edge,
557 					      n_basic_blocks_for_fn (cfun));
558 		  else
559 		    {
560 		      int i;
561 
562 		      /* Detect an infinite loop across blocks not
563 			 including the start block.  */
564 		      for (i = 0; i < nthreaded_edges; ++i)
565 			if (threaded_edges[i] == t)
566 			  break;
567 		      if (i < nthreaded_edges)
568 			{
569 			  counter = n_basic_blocks_for_fn (cfun);
570 			  break;
571 			}
572 		    }
573 
574 		  /* Detect an infinite loop across the start block.  */
575 		  if (t->dest == b)
576 		    break;
577 
578 		  gcc_assert (nthreaded_edges
579 			      < (n_basic_blocks_for_fn (cfun)
580 				 - NUM_FIXED_BLOCKS));
581 		  threaded_edges[nthreaded_edges++] = t;
582 
583 		  new_target = t->dest;
584 		  new_target_threaded = true;
585 		}
586 	    }
587 
588 	  if (!new_target)
589 	    break;
590 
591 	  counter++;
592 	  target = new_target;
593 	  threaded |= new_target_threaded;
594 	}
595 
596       if (counter >= n_basic_blocks_for_fn (cfun))
597 	{
598 	  if (dump_file)
599 	    fprintf (dump_file, "Infinite loop in BB %i.\n",
600 		     target->index);
601 	}
602       else if (target == first)
603 	; /* We didn't do anything.  */
604       else
605 	{
606 	  /* Save the values now, as the edge may get removed.  */
607 	  gcov_type edge_count = e->count;
608 	  int edge_probability = e->probability;
609 	  int edge_frequency;
610 	  int n = 0;
611 
612 	  e->goto_locus = goto_locus;
613 
614 	  /* Don't force if target is exit block.  */
615 	  if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
616 	    {
617 	      notice_new_block (redirect_edge_and_branch_force (e, target));
618 	      if (dump_file)
619 		fprintf (dump_file, "Conditionals threaded.\n");
620 	    }
621 	  else if (!redirect_edge_and_branch (e, target))
622 	    {
623 	      if (dump_file)
624 		fprintf (dump_file,
625 			 "Forwarding edge %i->%i to %i failed.\n",
626 			 b->index, e->dest->index, target->index);
627 	      ei_next (&ei);
628 	      continue;
629 	    }
630 
631 	  /* We successfully forwarded the edge.  Now update profile
632 	     data: for each edge we traversed in the chain, remove
633 	     the original edge's execution count.  */
634 	  edge_frequency = apply_probability (b->frequency, edge_probability);
635 
636 	  do
637 	    {
638 	      edge t;
639 
640 	      if (!single_succ_p (first))
641 		{
642 		  gcc_assert (n < nthreaded_edges);
643 		  t = threaded_edges [n++];
644 		  gcc_assert (t->src == first);
645 		  update_bb_profile_for_threading (first, edge_frequency,
646 						   edge_count, t);
647 		  update_br_prob_note (first);
648 		}
649 	      else
650 		{
651 		  first->count -= edge_count;
652 		  if (first->count < 0)
653 		    first->count = 0;
654 		  first->frequency -= edge_frequency;
655 		  if (first->frequency < 0)
656 		    first->frequency = 0;
657 		  /* It is possible that as the result of
658 		     threading we've removed edge as it is
659 		     threaded to the fallthru edge.  Avoid
660 		     getting out of sync.  */
661 		  if (n < nthreaded_edges
662 		      && first == threaded_edges [n]->src)
663 		    n++;
664 		  t = single_succ_edge (first);
665 		}
666 
667 	      t->count -= edge_count;
668 	      if (t->count < 0)
669 		t->count = 0;
670 	      first = t->dest;
671 	    }
672 	  while (first != target);
673 
674 	  changed = true;
675 	  continue;
676 	}
677       ei_next (&ei);
678     }
679 
680   free (threaded_edges);
681   return changed;
682 }
683 
684 
685 /* Blocks A and B are to be merged into a single block.  A has no incoming
686    fallthru edge, so it can be moved before B without adding or modifying
687    any jumps (aside from the jump from A to B).  */
688 
689 static void
690 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
691 {
692   rtx_insn *barrier;
693 
694   /* If we are partitioning hot/cold basic blocks, we don't want to
695      mess up unconditional or indirect jumps that cross between hot
696      and cold sections.
697 
698      Basic block partitioning may result in some jumps that appear to
699      be optimizable (or blocks that appear to be mergeable), but which really
700      must be left untouched (they are required to make it safely across
701      partition boundaries).  See the comments at the top of
702      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
703 
704   if (BB_PARTITION (a) != BB_PARTITION (b))
705     return;
706 
707   barrier = next_nonnote_insn (BB_END (a));
708   gcc_assert (BARRIER_P (barrier));
709   delete_insn (barrier);
710 
711   /* Scramble the insn chain.  */
712   if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
713     reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
714   df_set_bb_dirty (a);
715 
716   if (dump_file)
717     fprintf (dump_file, "Moved block %d before %d and merged.\n",
718 	     a->index, b->index);
719 
720   /* Swap the records for the two blocks around.  */
721 
722   unlink_block (a);
723   link_block (a, b->prev_bb);
724 
725   /* Now blocks A and B are contiguous.  Merge them.  */
726   merge_blocks (a, b);
727 }
728 
729 /* Blocks A and B are to be merged into a single block.  B has no outgoing
730    fallthru edge, so it can be moved after A without adding or modifying
731    any jumps (aside from the jump from A to B).  */
732 
733 static void
734 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
735 {
736   rtx_insn *barrier, *real_b_end;
737   rtx label;
738   rtx_jump_table_data *table;
739 
740   /* If we are partitioning hot/cold basic blocks, we don't want to
741      mess up unconditional or indirect jumps that cross between hot
742      and cold sections.
743 
744      Basic block partitioning may result in some jumps that appear to
745      be optimizable (or blocks that appear to be mergeable), but which really
746      must be left untouched (they are required to make it safely across
747      partition boundaries).  See the comments at the top of
748      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
749 
750   if (BB_PARTITION (a) != BB_PARTITION (b))
751     return;
752 
753   real_b_end = BB_END (b);
754 
755   /* If there is a jump table following block B temporarily add the jump table
756      to block B so that it will also be moved to the correct location.  */
757   if (tablejump_p (BB_END (b), &label, &table)
758       && prev_active_insn (label) == BB_END (b))
759     {
760       BB_END (b) = table;
761     }
762 
763   /* There had better have been a barrier there.  Delete it.  */
764   barrier = NEXT_INSN (BB_END (b));
765   if (barrier && BARRIER_P (barrier))
766     delete_insn (barrier);
767 
768 
769   /* Scramble the insn chain.  */
770   reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
771 
772   /* Restore the real end of b.  */
773   BB_END (b) = real_b_end;
774 
775   if (dump_file)
776     fprintf (dump_file, "Moved block %d after %d and merged.\n",
777 	     b->index, a->index);
778 
779   /* Now blocks A and B are contiguous.  Merge them.  */
780   merge_blocks (a, b);
781 }
782 
783 /* Attempt to merge basic blocks that are potentially non-adjacent.
784    Return NULL iff the attempt failed, otherwise return basic block
785    where cleanup_cfg should continue.  Because the merging commonly
786    moves basic block away or introduces another optimization
787    possibility, return basic block just before B so cleanup_cfg don't
788    need to iterate.
789 
790    It may be good idea to return basic block before C in the case
791    C has been moved after B and originally appeared earlier in the
792    insn sequence, but we have no information available about the
793    relative ordering of these two.  Hopefully it is not too common.  */
794 
795 static basic_block
796 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
797 {
798   basic_block next;
799 
800   /* If we are partitioning hot/cold basic blocks, we don't want to
801      mess up unconditional or indirect jumps that cross between hot
802      and cold sections.
803 
804      Basic block partitioning may result in some jumps that appear to
805      be optimizable (or blocks that appear to be mergeable), but which really
806      must be left untouched (they are required to make it safely across
807      partition boundaries).  See the comments at the top of
808      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
809 
810   if (BB_PARTITION (b) != BB_PARTITION (c))
811     return NULL;
812 
813   /* If B has a fallthru edge to C, no need to move anything.  */
814   if (e->flags & EDGE_FALLTHRU)
815     {
816       int b_index = b->index, c_index = c->index;
817 
818       /* Protect the loop latches.  */
819       if (current_loops && c->loop_father->latch == c)
820 	return NULL;
821 
822       merge_blocks (b, c);
823       update_forwarder_flag (b);
824 
825       if (dump_file)
826 	fprintf (dump_file, "Merged %d and %d without moving.\n",
827 		 b_index, c_index);
828 
829       return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
830     }
831 
832   /* Otherwise we will need to move code around.  Do that only if expensive
833      transformations are allowed.  */
834   else if (mode & CLEANUP_EXPENSIVE)
835     {
836       edge tmp_edge, b_fallthru_edge;
837       bool c_has_outgoing_fallthru;
838       bool b_has_incoming_fallthru;
839 
840       /* Avoid overactive code motion, as the forwarder blocks should be
841 	 eliminated by edge redirection instead.  One exception might have
842 	 been if B is a forwarder block and C has no fallthru edge, but
843 	 that should be cleaned up by bb-reorder instead.  */
844       if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
845 	return NULL;
846 
847       /* We must make sure to not munge nesting of lexical blocks,
848 	 and loop notes.  This is done by squeezing out all the notes
849 	 and leaving them there to lie.  Not ideal, but functional.  */
850 
851       tmp_edge = find_fallthru_edge (c->succs);
852       c_has_outgoing_fallthru = (tmp_edge != NULL);
853 
854       tmp_edge = find_fallthru_edge (b->preds);
855       b_has_incoming_fallthru = (tmp_edge != NULL);
856       b_fallthru_edge = tmp_edge;
857       next = b->prev_bb;
858       if (next == c)
859 	next = next->prev_bb;
860 
861       /* Otherwise, we're going to try to move C after B.  If C does
862 	 not have an outgoing fallthru, then it can be moved
863 	 immediately after B without introducing or modifying jumps.  */
864       if (! c_has_outgoing_fallthru)
865 	{
866 	  merge_blocks_move_successor_nojumps (b, c);
867 	  return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
868 	}
869 
870       /* If B does not have an incoming fallthru, then it can be moved
871 	 immediately before C without introducing or modifying jumps.
872 	 C cannot be the first block, so we do not have to worry about
873 	 accessing a non-existent block.  */
874 
875       if (b_has_incoming_fallthru)
876 	{
877 	  basic_block bb;
878 
879 	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
880 	    return NULL;
881 	  bb = force_nonfallthru (b_fallthru_edge);
882 	  if (bb)
883 	    notice_new_block (bb);
884 	}
885 
886       merge_blocks_move_predecessor_nojumps (b, c);
887       return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
888     }
889 
890   return NULL;
891 }
892 
893 
894 /* Removes the memory attributes of MEM expression
895    if they are not equal.  */
896 
897 static void
898 merge_memattrs (rtx x, rtx y)
899 {
900   int i;
901   int j;
902   enum rtx_code code;
903   const char *fmt;
904 
905   if (x == y)
906     return;
907   if (x == 0 || y == 0)
908     return;
909 
910   code = GET_CODE (x);
911 
912   if (code != GET_CODE (y))
913     return;
914 
915   if (GET_MODE (x) != GET_MODE (y))
916     return;
917 
918   if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
919     {
920       if (! MEM_ATTRS (x))
921 	MEM_ATTRS (y) = 0;
922       else if (! MEM_ATTRS (y))
923 	MEM_ATTRS (x) = 0;
924       else
925 	{
926 	  HOST_WIDE_INT mem_size;
927 
928 	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
929 	    {
930 	      set_mem_alias_set (x, 0);
931 	      set_mem_alias_set (y, 0);
932 	    }
933 
934 	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
935 	    {
936 	      set_mem_expr (x, 0);
937 	      set_mem_expr (y, 0);
938 	      clear_mem_offset (x);
939 	      clear_mem_offset (y);
940 	    }
941 	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
942 		   || (MEM_OFFSET_KNOWN_P (x)
943 		       && MEM_OFFSET (x) != MEM_OFFSET (y)))
944 	    {
945 	      clear_mem_offset (x);
946 	      clear_mem_offset (y);
947 	    }
948 
949 	  if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
950 	    {
951 	      mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
952 	      set_mem_size (x, mem_size);
953 	      set_mem_size (y, mem_size);
954 	    }
955 	  else
956 	    {
957 	      clear_mem_size (x);
958 	      clear_mem_size (y);
959 	    }
960 
961 	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
962 	  set_mem_align (y, MEM_ALIGN (x));
963 	}
964     }
965   if (code == MEM)
966     {
967       if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
968 	{
969 	  MEM_READONLY_P (x) = 0;
970 	  MEM_READONLY_P (y) = 0;
971 	}
972       if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
973 	{
974 	  MEM_NOTRAP_P (x) = 0;
975 	  MEM_NOTRAP_P (y) = 0;
976 	}
977       if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
978 	{
979 	  MEM_VOLATILE_P (x) = 1;
980 	  MEM_VOLATILE_P (y) = 1;
981 	}
982     }
983 
984   fmt = GET_RTX_FORMAT (code);
985   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
986     {
987       switch (fmt[i])
988 	{
989 	case 'E':
990 	  /* Two vectors must have the same length.  */
991 	  if (XVECLEN (x, i) != XVECLEN (y, i))
992 	    return;
993 
994 	  for (j = 0; j < XVECLEN (x, i); j++)
995 	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
996 
997 	  break;
998 
999 	case 'e':
1000 	  merge_memattrs (XEXP (x, i), XEXP (y, i));
1001 	}
1002     }
1003   return;
1004 }
1005 
1006 
1007  /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
1008     different single sets S1 and S2.  */
1009 
1010 static bool
1011 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
1012 {
1013   int i;
1014   rtx e1, e2;
1015 
1016   if (p1 == s1 && p2 == s2)
1017     return true;
1018 
1019   if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
1020     return false;
1021 
1022   if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
1023     return false;
1024 
1025   for (i = 0; i < XVECLEN (p1, 0); i++)
1026     {
1027       e1 = XVECEXP (p1, 0, i);
1028       e2 = XVECEXP (p2, 0, i);
1029       if (e1 == s1 && e2 == s2)
1030         continue;
1031       if (reload_completed
1032           ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
1033         continue;
1034 
1035         return false;
1036     }
1037 
1038   return true;
1039 }
1040 
1041 /* Examine register notes on I1 and I2 and return:
1042    - dir_forward if I1 can be replaced by I2, or
1043    - dir_backward if I2 can be replaced by I1, or
1044    - dir_both if both are the case.  */
1045 
1046 static enum replace_direction
1047 can_replace_by (rtx_insn *i1, rtx_insn *i2)
1048 {
1049   rtx s1, s2, d1, d2, src1, src2, note1, note2;
1050   bool c1, c2;
1051 
1052   /* Check for 2 sets.  */
1053   s1 = single_set (i1);
1054   s2 = single_set (i2);
1055   if (s1 == NULL_RTX || s2 == NULL_RTX)
1056     return dir_none;
1057 
1058   /* Check that the 2 sets set the same dest.  */
1059   d1 = SET_DEST (s1);
1060   d2 = SET_DEST (s2);
1061   if (!(reload_completed
1062         ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1063     return dir_none;
1064 
1065   /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1066      set dest to the same value.  */
1067   note1 = find_reg_equal_equiv_note (i1);
1068   note2 = find_reg_equal_equiv_note (i2);
1069   if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))
1070       || !CONST_INT_P (XEXP (note1, 0)))
1071     return dir_none;
1072 
1073   if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1074     return dir_none;
1075 
1076   /* Although the 2 sets set dest to the same value, we cannot replace
1077        (set (dest) (const_int))
1078      by
1079        (set (dest) (reg))
1080      because we don't know if the reg is live and has the same value at the
1081      location of replacement.  */
1082   src1 = SET_SRC (s1);
1083   src2 = SET_SRC (s2);
1084   c1 = CONST_INT_P (src1);
1085   c2 = CONST_INT_P (src2);
1086   if (c1 && c2)
1087     return dir_both;
1088   else if (c2)
1089     return dir_forward;
1090   else if (c1)
1091     return dir_backward;
1092 
1093   return dir_none;
1094 }
1095 
1096 /* Merges directions A and B.  */
1097 
1098 static enum replace_direction
1099 merge_dir (enum replace_direction a, enum replace_direction b)
1100 {
1101   /* Implements the following table:
1102         |bo fw bw no
1103      ---+-----------
1104      bo |bo fw bw no
1105      fw |-- fw no no
1106      bw |-- -- bw no
1107      no |-- -- -- no.  */
1108 
1109   if (a == b)
1110     return a;
1111 
1112   if (a == dir_both)
1113     return b;
1114   if (b == dir_both)
1115     return a;
1116 
1117   return dir_none;
1118 }
1119 
1120 /* Examine I1 and I2 and return:
1121    - dir_forward if I1 can be replaced by I2, or
1122    - dir_backward if I2 can be replaced by I1, or
1123    - dir_both if both are the case.  */
1124 
1125 static enum replace_direction
1126 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1127 {
1128   rtx p1, p2;
1129 
1130   /* Verify that I1 and I2 are equivalent.  */
1131   if (GET_CODE (i1) != GET_CODE (i2))
1132     return dir_none;
1133 
1134   /* __builtin_unreachable() may lead to empty blocks (ending with
1135      NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
1136   if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1137     return dir_both;
1138 
1139   /* ??? Do not allow cross-jumping between different stack levels.  */
1140   p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1141   p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1142   if (p1 && p2)
1143     {
1144       p1 = XEXP (p1, 0);
1145       p2 = XEXP (p2, 0);
1146       if (!rtx_equal_p (p1, p2))
1147         return dir_none;
1148 
1149       /* ??? Worse, this adjustment had better be constant lest we
1150          have differing incoming stack levels.  */
1151       if (!frame_pointer_needed
1152           && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
1153 	return dir_none;
1154     }
1155   else if (p1 || p2)
1156     return dir_none;
1157 
1158   p1 = PATTERN (i1);
1159   p2 = PATTERN (i2);
1160 
1161   if (GET_CODE (p1) != GET_CODE (p2))
1162     return dir_none;
1163 
1164   /* If this is a CALL_INSN, compare register usage information.
1165      If we don't check this on stack register machines, the two
1166      CALL_INSNs might be merged leaving reg-stack.c with mismatching
1167      numbers of stack registers in the same basic block.
1168      If we don't check this on machines with delay slots, a delay slot may
1169      be filled that clobbers a parameter expected by the subroutine.
1170 
1171      ??? We take the simple route for now and assume that if they're
1172      equal, they were constructed identically.
1173 
1174      Also check for identical exception regions.  */
1175 
1176   if (CALL_P (i1))
1177     {
1178       /* Ensure the same EH region.  */
1179       rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1180       rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1181 
1182       if (!n1 && n2)
1183 	return dir_none;
1184 
1185       if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1186 	return dir_none;
1187 
1188       if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1189 			CALL_INSN_FUNCTION_USAGE (i2))
1190 	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1191 	return dir_none;
1192 
1193       /* For address sanitizer, never crossjump __asan_report_* builtins,
1194 	 otherwise errors might be reported on incorrect lines.  */
1195       if (flag_sanitize & SANITIZE_ADDRESS)
1196 	{
1197 	  rtx call = get_call_rtx_from (i1);
1198 	  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1199 	    {
1200 	      rtx symbol = XEXP (XEXP (call, 0), 0);
1201 	      if (SYMBOL_REF_DECL (symbol)
1202 		  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1203 		{
1204 		  if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1205 		       == BUILT_IN_NORMAL)
1206 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1207 			 >= BUILT_IN_ASAN_REPORT_LOAD1
1208 		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1209 			 <= BUILT_IN_ASAN_STOREN)
1210 		    return dir_none;
1211 		}
1212 	    }
1213 	}
1214     }
1215 
1216 #ifdef STACK_REGS
1217   /* If cross_jump_death_matters is not 0, the insn's mode
1218      indicates whether or not the insn contains any stack-like
1219      regs.  */
1220 
1221   if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1222     {
1223       /* If register stack conversion has already been done, then
1224 	 death notes must also be compared before it is certain that
1225 	 the two instruction streams match.  */
1226 
1227       rtx note;
1228       HARD_REG_SET i1_regset, i2_regset;
1229 
1230       CLEAR_HARD_REG_SET (i1_regset);
1231       CLEAR_HARD_REG_SET (i2_regset);
1232 
1233       for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1234 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1235 	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1236 
1237       for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1238 	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1239 	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1240 
1241       if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1242 	return dir_none;
1243     }
1244 #endif
1245 
1246   if (reload_completed
1247       ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1248     return dir_both;
1249 
1250   return can_replace_by (i1, i2);
1251 }
1252 
1253 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1254    flow_find_head_matching_sequence, ensure the notes match.  */
1255 
1256 static void
1257 merge_notes (rtx_insn *i1, rtx_insn *i2)
1258 {
1259   /* If the merged insns have different REG_EQUAL notes, then
1260      remove them.  */
1261   rtx equiv1 = find_reg_equal_equiv_note (i1);
1262   rtx equiv2 = find_reg_equal_equiv_note (i2);
1263 
1264   if (equiv1 && !equiv2)
1265     remove_note (i1, equiv1);
1266   else if (!equiv1 && equiv2)
1267     remove_note (i2, equiv2);
1268   else if (equiv1 && equiv2
1269 	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1270     {
1271       remove_note (i1, equiv1);
1272       remove_note (i2, equiv2);
1273     }
1274 }
1275 
1276  /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1277     resulting insn in I1, and the corresponding bb in BB1.  At the head of a
1278     bb, if there is a predecessor bb that reaches this bb via fallthru, and
1279     FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1280     DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */
1281 
1282 static void
1283 walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1284                        bool *did_fallthru)
1285 {
1286   edge fallthru;
1287 
1288   *did_fallthru = false;
1289 
1290   /* Ignore notes.  */
1291   while (!NONDEBUG_INSN_P (*i1))
1292     {
1293       if (*i1 != BB_HEAD (*bb1))
1294         {
1295           *i1 = PREV_INSN (*i1);
1296           continue;
1297         }
1298 
1299       if (!follow_fallthru)
1300         return;
1301 
1302       fallthru = find_fallthru_edge ((*bb1)->preds);
1303       if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1304           || !single_succ_p (fallthru->src))
1305         return;
1306 
1307       *bb1 = fallthru->src;
1308       *i1 = BB_END (*bb1);
1309       *did_fallthru = true;
1310      }
1311 }
1312 
1313 /* Look through the insns at the end of BB1 and BB2 and find the longest
1314    sequence that are either equivalent, or allow forward or backward
1315    replacement.  Store the first insns for that sequence in *F1 and *F2 and
1316    return the sequence length.
1317 
1318    DIR_P indicates the allowed replacement direction on function entry, and
1319    the actual replacement direction on function exit.  If NULL, only equivalent
1320    sequences are allowed.
1321 
1322    To simplify callers of this function, if the blocks match exactly,
1323    store the head of the blocks in *F1 and *F2.  */
1324 
1325 int
1326 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1327 		      rtx_insn **f2, enum replace_direction *dir_p)
1328 {
1329   rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1330   int ninsns = 0;
1331   enum replace_direction dir, last_dir, afterlast_dir;
1332   bool follow_fallthru, did_fallthru;
1333 
1334   if (dir_p)
1335     dir = *dir_p;
1336   else
1337     dir = dir_both;
1338   afterlast_dir = dir;
1339   last_dir = afterlast_dir;
1340 
1341   /* Skip simple jumps at the end of the blocks.  Complex jumps still
1342      need to be compared for equivalence, which we'll do below.  */
1343 
1344   i1 = BB_END (bb1);
1345   last1 = afterlast1 = last2 = afterlast2 = NULL;
1346   if (onlyjump_p (i1)
1347       || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1348     {
1349       last1 = i1;
1350       i1 = PREV_INSN (i1);
1351     }
1352 
1353   i2 = BB_END (bb2);
1354   if (onlyjump_p (i2)
1355       || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1356     {
1357       last2 = i2;
1358       /* Count everything except for unconditional jump as insn.
1359 	 Don't count any jumps if dir_p is NULL.  */
1360       if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1361 	ninsns++;
1362       i2 = PREV_INSN (i2);
1363     }
1364 
1365   while (true)
1366     {
1367       /* In the following example, we can replace all jumps to C by jumps to A.
1368 
1369          This removes 4 duplicate insns.
1370          [bb A] insn1            [bb C] insn1
1371                 insn2                   insn2
1372          [bb B] insn3                   insn3
1373                 insn4                   insn4
1374                 jump_insn               jump_insn
1375 
1376          We could also replace all jumps to A by jumps to C, but that leaves B
1377          alive, and removes only 2 duplicate insns.  In a subsequent crossjump
1378          step, all jumps to B would be replaced with jumps to the middle of C,
1379          achieving the same result with more effort.
1380          So we allow only the first possibility, which means that we don't allow
1381          fallthru in the block that's being replaced.  */
1382 
1383       follow_fallthru = dir_p && dir != dir_forward;
1384       walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1385       if (did_fallthru)
1386         dir = dir_backward;
1387 
1388       follow_fallthru = dir_p && dir != dir_backward;
1389       walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1390       if (did_fallthru)
1391         dir = dir_forward;
1392 
1393       if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1394 	break;
1395 
1396       dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1397       if (dir == dir_none || (!dir_p && dir != dir_both))
1398 	break;
1399 
1400       merge_memattrs (i1, i2);
1401 
1402       /* Don't begin a cross-jump with a NOTE insn.  */
1403       if (INSN_P (i1))
1404 	{
1405 	  merge_notes (i1, i2);
1406 
1407 	  afterlast1 = last1, afterlast2 = last2;
1408 	  last1 = i1, last2 = i2;
1409 	  afterlast_dir = last_dir;
1410 	  last_dir = dir;
1411 	  if (active_insn_p (i1))
1412 	    ninsns++;
1413 	}
1414 
1415       i1 = PREV_INSN (i1);
1416       i2 = PREV_INSN (i2);
1417     }
1418 
1419 #ifdef HAVE_cc0
1420   /* Don't allow the insn after a compare to be shared by
1421      cross-jumping unless the compare is also shared.  */
1422   if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1423     last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1424 #endif
1425 
1426   /* Include preceding notes and labels in the cross-jump.  One,
1427      this may bring us to the head of the blocks as requested above.
1428      Two, it keeps line number notes as matched as may be.  */
1429   if (ninsns)
1430     {
1431       bb1 = BLOCK_FOR_INSN (last1);
1432       while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1433 	last1 = PREV_INSN (last1);
1434 
1435       if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1436 	last1 = PREV_INSN (last1);
1437 
1438       bb2 = BLOCK_FOR_INSN (last2);
1439       while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1440 	last2 = PREV_INSN (last2);
1441 
1442       if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1443 	last2 = PREV_INSN (last2);
1444 
1445       *f1 = last1;
1446       *f2 = last2;
1447     }
1448 
1449   if (dir_p)
1450     *dir_p = last_dir;
1451   return ninsns;
1452 }
1453 
1454 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1455    the head of the two blocks.  Do not include jumps at the end.
1456    If STOP_AFTER is nonzero, stop after finding that many matching
1457    instructions.  If STOP_AFTER is zero, count all INSN_P insns, if it is
1458    non-zero, only count active insns.  */
1459 
1460 int
1461 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1462 				  rtx_insn **f2, int stop_after)
1463 {
1464   rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1465   int ninsns = 0;
1466   edge e;
1467   edge_iterator ei;
1468   int nehedges1 = 0, nehedges2 = 0;
1469 
1470   FOR_EACH_EDGE (e, ei, bb1->succs)
1471     if (e->flags & EDGE_EH)
1472       nehedges1++;
1473   FOR_EACH_EDGE (e, ei, bb2->succs)
1474     if (e->flags & EDGE_EH)
1475       nehedges2++;
1476 
1477   i1 = BB_HEAD (bb1);
1478   i2 = BB_HEAD (bb2);
1479   last1 = beforelast1 = last2 = beforelast2 = NULL;
1480 
1481   while (true)
1482     {
1483       /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
1484       while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1485 	{
1486 	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1487 	    break;
1488 	  i1 = NEXT_INSN (i1);
1489 	}
1490 
1491       while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1492 	{
1493 	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1494 	    break;
1495 	  i2 = NEXT_INSN (i2);
1496 	}
1497 
1498       if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1499 	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1500 	break;
1501 
1502       if (NOTE_P (i1) || NOTE_P (i2)
1503 	  || JUMP_P (i1) || JUMP_P (i2))
1504 	break;
1505 
1506       /* A sanity check to make sure we're not merging insns with different
1507 	 effects on EH.  If only one of them ends a basic block, it shouldn't
1508 	 have an EH edge; if both end a basic block, there should be the same
1509 	 number of EH edges.  */
1510       if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1511 	   && nehedges1 > 0)
1512 	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1513 	      && nehedges2 > 0)
1514 	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1515 	      && nehedges1 != nehedges2))
1516 	break;
1517 
1518       if (old_insns_match_p (0, i1, i2) != dir_both)
1519 	break;
1520 
1521       merge_memattrs (i1, i2);
1522 
1523       /* Don't begin a cross-jump with a NOTE insn.  */
1524       if (INSN_P (i1))
1525 	{
1526 	  merge_notes (i1, i2);
1527 
1528 	  beforelast1 = last1, beforelast2 = last2;
1529 	  last1 = i1, last2 = i2;
1530 	  if (!stop_after || active_insn_p (i1))
1531 	    ninsns++;
1532 	}
1533 
1534       if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1535 	  || (stop_after > 0 && ninsns == stop_after))
1536 	break;
1537 
1538       i1 = NEXT_INSN (i1);
1539       i2 = NEXT_INSN (i2);
1540     }
1541 
1542 #ifdef HAVE_cc0
1543   /* Don't allow a compare to be shared by cross-jumping unless the insn
1544      after the compare is also shared.  */
1545   if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
1546     last1 = beforelast1, last2 = beforelast2, ninsns--;
1547 #endif
1548 
1549   if (ninsns)
1550     {
1551       *f1 = last1;
1552       *f2 = last2;
1553     }
1554 
1555   return ninsns;
1556 }
1557 
1558 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1559    the branch instruction.  This means that if we commonize the control
1560    flow before end of the basic block, the semantic remains unchanged.
1561 
1562    We may assume that there exists one edge with a common destination.  */
1563 
1564 static bool
1565 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1566 {
1567   int nehedges1 = 0, nehedges2 = 0;
1568   edge fallthru1 = 0, fallthru2 = 0;
1569   edge e1, e2;
1570   edge_iterator ei;
1571 
1572   /* If we performed shrink-wrapping, edges to the exit block can
1573      only be distinguished for JUMP_INSNs.  The two paths may differ in
1574      whether they went through the prologue.  Sibcalls are fine, we know
1575      that we either didn't need or inserted an epilogue before them.  */
1576   if (crtl->shrink_wrapped
1577       && single_succ_p (bb1)
1578       && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1579       && !JUMP_P (BB_END (bb1))
1580       && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1581     return false;
1582 
1583   /* If BB1 has only one successor, we may be looking at either an
1584      unconditional jump, or a fake edge to exit.  */
1585   if (single_succ_p (bb1)
1586       && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1587       && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1588     return (single_succ_p (bb2)
1589 	    && (single_succ_edge (bb2)->flags
1590 		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1591 	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1592 
1593   /* Match conditional jumps - this may get tricky when fallthru and branch
1594      edges are crossed.  */
1595   if (EDGE_COUNT (bb1->succs) == 2
1596       && any_condjump_p (BB_END (bb1))
1597       && onlyjump_p (BB_END (bb1)))
1598     {
1599       edge b1, f1, b2, f2;
1600       bool reverse, match;
1601       rtx set1, set2, cond1, cond2;
1602       enum rtx_code code1, code2;
1603 
1604       if (EDGE_COUNT (bb2->succs) != 2
1605 	  || !any_condjump_p (BB_END (bb2))
1606 	  || !onlyjump_p (BB_END (bb2)))
1607 	return false;
1608 
1609       b1 = BRANCH_EDGE (bb1);
1610       b2 = BRANCH_EDGE (bb2);
1611       f1 = FALLTHRU_EDGE (bb1);
1612       f2 = FALLTHRU_EDGE (bb2);
1613 
1614       /* Get around possible forwarders on fallthru edges.  Other cases
1615 	 should be optimized out already.  */
1616       if (FORWARDER_BLOCK_P (f1->dest))
1617 	f1 = single_succ_edge (f1->dest);
1618 
1619       if (FORWARDER_BLOCK_P (f2->dest))
1620 	f2 = single_succ_edge (f2->dest);
1621 
1622       /* To simplify use of this function, return false if there are
1623 	 unneeded forwarder blocks.  These will get eliminated later
1624 	 during cleanup_cfg.  */
1625       if (FORWARDER_BLOCK_P (f1->dest)
1626 	  || FORWARDER_BLOCK_P (f2->dest)
1627 	  || FORWARDER_BLOCK_P (b1->dest)
1628 	  || FORWARDER_BLOCK_P (b2->dest))
1629 	return false;
1630 
1631       if (f1->dest == f2->dest && b1->dest == b2->dest)
1632 	reverse = false;
1633       else if (f1->dest == b2->dest && b1->dest == f2->dest)
1634 	reverse = true;
1635       else
1636 	return false;
1637 
1638       set1 = pc_set (BB_END (bb1));
1639       set2 = pc_set (BB_END (bb2));
1640       if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1641 	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1642 	reverse = !reverse;
1643 
1644       cond1 = XEXP (SET_SRC (set1), 0);
1645       cond2 = XEXP (SET_SRC (set2), 0);
1646       code1 = GET_CODE (cond1);
1647       if (reverse)
1648 	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1649       else
1650 	code2 = GET_CODE (cond2);
1651 
1652       if (code2 == UNKNOWN)
1653 	return false;
1654 
1655       /* Verify codes and operands match.  */
1656       match = ((code1 == code2
1657 		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1658 		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1659 	       || (code1 == swap_condition (code2)
1660 		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1661 					      XEXP (cond2, 0))
1662 		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1663 					      XEXP (cond2, 1))));
1664 
1665       /* If we return true, we will join the blocks.  Which means that
1666 	 we will only have one branch prediction bit to work with.  Thus
1667 	 we require the existing branches to have probabilities that are
1668 	 roughly similar.  */
1669       if (match
1670 	  && optimize_bb_for_speed_p (bb1)
1671 	  && optimize_bb_for_speed_p (bb2))
1672 	{
1673 	  int prob2;
1674 
1675 	  if (b1->dest == b2->dest)
1676 	    prob2 = b2->probability;
1677 	  else
1678 	    /* Do not use f2 probability as f2 may be forwarded.  */
1679 	    prob2 = REG_BR_PROB_BASE - b2->probability;
1680 
1681 	  /* Fail if the difference in probabilities is greater than 50%.
1682 	     This rules out two well-predicted branches with opposite
1683 	     outcomes.  */
1684 	  if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1685 	    {
1686 	      if (dump_file)
1687 		fprintf (dump_file,
1688 			 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1689 			 bb1->index, bb2->index, b1->probability, prob2);
1690 
1691 	      return false;
1692 	    }
1693 	}
1694 
1695       if (dump_file && match)
1696 	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1697 		 bb1->index, bb2->index);
1698 
1699       return match;
1700     }
1701 
1702   /* Generic case - we are seeing a computed jump, table jump or trapping
1703      instruction.  */
1704 
1705   /* Check whether there are tablejumps in the end of BB1 and BB2.
1706      Return true if they are identical.  */
1707     {
1708       rtx label1, label2;
1709       rtx_jump_table_data *table1, *table2;
1710 
1711       if (tablejump_p (BB_END (bb1), &label1, &table1)
1712 	  && tablejump_p (BB_END (bb2), &label2, &table2)
1713 	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1714 	{
1715 	  /* The labels should never be the same rtx.  If they really are same
1716 	     the jump tables are same too. So disable crossjumping of blocks BB1
1717 	     and BB2 because when deleting the common insns in the end of BB1
1718 	     by delete_basic_block () the jump table would be deleted too.  */
1719 	  /* If LABEL2 is referenced in BB1->END do not do anything
1720 	     because we would loose information when replacing
1721 	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1722 	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1723 	    {
1724 	      /* Set IDENTICAL to true when the tables are identical.  */
1725 	      bool identical = false;
1726 	      rtx p1, p2;
1727 
1728 	      p1 = PATTERN (table1);
1729 	      p2 = PATTERN (table2);
1730 	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1731 		{
1732 		  identical = true;
1733 		}
1734 	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1735 		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1736 		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1737 		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1738 		{
1739 		  int i;
1740 
1741 		  identical = true;
1742 		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1743 		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1744 		      identical = false;
1745 		}
1746 
1747 	      if (identical)
1748 		{
1749 		  bool match;
1750 
1751 		  /* Temporarily replace references to LABEL1 with LABEL2
1752 		     in BB1->END so that we could compare the instructions.  */
1753 		  replace_label_in_insn (BB_END (bb1), label1, label2, false);
1754 
1755 		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1756 			   == dir_both);
1757 		  if (dump_file && match)
1758 		    fprintf (dump_file,
1759 			     "Tablejumps in bb %i and %i match.\n",
1760 			     bb1->index, bb2->index);
1761 
1762 		  /* Set the original label in BB1->END because when deleting
1763 		     a block whose end is a tablejump, the tablejump referenced
1764 		     from the instruction is deleted too.  */
1765 		  replace_label_in_insn (BB_END (bb1), label2, label1, false);
1766 
1767 		  return match;
1768 		}
1769 	    }
1770 	  return false;
1771 	}
1772     }
1773 
1774   /* Find the last non-debug non-note instruction in each bb, except
1775      stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1776      handles that case specially. old_insns_match_p does not handle
1777      other types of instruction notes.  */
1778   rtx_insn *last1 = BB_END (bb1);
1779   rtx_insn *last2 = BB_END (bb2);
1780   while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1781          (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1782     last1 = PREV_INSN (last1);
1783   while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1784          (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1785     last2 = PREV_INSN (last2);
1786   gcc_assert (last1 && last2);
1787 
1788   /* First ensure that the instructions match.  There may be many outgoing
1789      edges so this test is generally cheaper.  */
1790   if (old_insns_match_p (mode, last1, last2) != dir_both)
1791     return false;
1792 
1793   /* Search the outgoing edges, ensure that the counts do match, find possible
1794      fallthru and exception handling edges since these needs more
1795      validation.  */
1796   if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1797     return false;
1798 
1799   bool nonfakeedges = false;
1800   FOR_EACH_EDGE (e1, ei, bb1->succs)
1801     {
1802       e2 = EDGE_SUCC (bb2, ei.index);
1803 
1804       if ((e1->flags & EDGE_FAKE) == 0)
1805 	nonfakeedges = true;
1806 
1807       if (e1->flags & EDGE_EH)
1808 	nehedges1++;
1809 
1810       if (e2->flags & EDGE_EH)
1811 	nehedges2++;
1812 
1813       if (e1->flags & EDGE_FALLTHRU)
1814 	fallthru1 = e1;
1815       if (e2->flags & EDGE_FALLTHRU)
1816 	fallthru2 = e2;
1817     }
1818 
1819   /* If number of edges of various types does not match, fail.  */
1820   if (nehedges1 != nehedges2
1821       || (fallthru1 != 0) != (fallthru2 != 0))
1822     return false;
1823 
1824   /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1825      and the last real insn doesn't have REG_ARGS_SIZE note, don't
1826      attempt to optimize, as the two basic blocks might have different
1827      REG_ARGS_SIZE depths.  For noreturn calls and unconditional
1828      traps there should be REG_ARG_SIZE notes, they could be missing
1829      for __builtin_unreachable () uses though.  */
1830   if (!nonfakeedges
1831       && !ACCUMULATE_OUTGOING_ARGS
1832       && (!INSN_P (last1)
1833           || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1834     return false;
1835 
1836   /* fallthru edges must be forwarded to the same destination.  */
1837   if (fallthru1)
1838     {
1839       basic_block d1 = (forwarder_block_p (fallthru1->dest)
1840 			? single_succ (fallthru1->dest): fallthru1->dest);
1841       basic_block d2 = (forwarder_block_p (fallthru2->dest)
1842 			? single_succ (fallthru2->dest): fallthru2->dest);
1843 
1844       if (d1 != d2)
1845 	return false;
1846     }
1847 
1848   /* Ensure the same EH region.  */
1849   {
1850     rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1851     rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1852 
1853     if (!n1 && n2)
1854       return false;
1855 
1856     if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1857       return false;
1858   }
1859 
1860   /* The same checks as in try_crossjump_to_edge. It is required for RTL
1861      version of sequence abstraction.  */
1862   FOR_EACH_EDGE (e1, ei, bb2->succs)
1863     {
1864       edge e2;
1865       edge_iterator ei;
1866       basic_block d1 = e1->dest;
1867 
1868       if (FORWARDER_BLOCK_P (d1))
1869         d1 = EDGE_SUCC (d1, 0)->dest;
1870 
1871       FOR_EACH_EDGE (e2, ei, bb1->succs)
1872         {
1873           basic_block d2 = e2->dest;
1874           if (FORWARDER_BLOCK_P (d2))
1875             d2 = EDGE_SUCC (d2, 0)->dest;
1876           if (d1 == d2)
1877             break;
1878         }
1879 
1880       if (!e2)
1881         return false;
1882     }
1883 
1884   return true;
1885 }
1886 
1887 /* Returns true if BB basic block has a preserve label.  */
1888 
1889 static bool
1890 block_has_preserve_label (basic_block bb)
1891 {
1892   return (bb
1893           && block_label (bb)
1894           && LABEL_PRESERVE_P (block_label (bb)));
1895 }
1896 
1897 /* E1 and E2 are edges with the same destination block.  Search their
1898    predecessors for common code.  If found, redirect control flow from
1899    (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1900    or the other way around (dir_backward).  DIR specifies the allowed
1901    replacement direction.  */
1902 
1903 static bool
1904 try_crossjump_to_edge (int mode, edge e1, edge e2,
1905                        enum replace_direction dir)
1906 {
1907   int nmatch;
1908   basic_block src1 = e1->src, src2 = e2->src;
1909   basic_block redirect_to, redirect_from, to_remove;
1910   basic_block osrc1, osrc2, redirect_edges_to, tmp;
1911   rtx_insn *newpos1, *newpos2;
1912   edge s;
1913   edge_iterator ei;
1914 
1915   newpos1 = newpos2 = NULL;
1916 
1917   /* If we have partitioned hot/cold basic blocks, it is a bad idea
1918      to try this optimization.
1919 
1920      Basic block partitioning may result in some jumps that appear to
1921      be optimizable (or blocks that appear to be mergeable), but which really
1922      must be left untouched (they are required to make it safely across
1923      partition boundaries).  See the comments at the top of
1924      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1925 
1926   if (crtl->has_bb_partition && reload_completed)
1927     return false;
1928 
1929   /* Search backward through forwarder blocks.  We don't need to worry
1930      about multiple entry or chained forwarders, as they will be optimized
1931      away.  We do this to look past the unconditional jump following a
1932      conditional jump that is required due to the current CFG shape.  */
1933   if (single_pred_p (src1)
1934       && FORWARDER_BLOCK_P (src1))
1935     e1 = single_pred_edge (src1), src1 = e1->src;
1936 
1937   if (single_pred_p (src2)
1938       && FORWARDER_BLOCK_P (src2))
1939     e2 = single_pred_edge (src2), src2 = e2->src;
1940 
1941   /* Nothing to do if we reach ENTRY, or a common source block.  */
1942   if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1943       == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1944     return false;
1945   if (src1 == src2)
1946     return false;
1947 
1948   /* Seeing more than 1 forwarder blocks would confuse us later...  */
1949   if (FORWARDER_BLOCK_P (e1->dest)
1950       && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1951     return false;
1952 
1953   if (FORWARDER_BLOCK_P (e2->dest)
1954       && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1955     return false;
1956 
1957   /* Likewise with dead code (possibly newly created by the other optimizations
1958      of cfg_cleanup).  */
1959   if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1960     return false;
1961 
1962   /* Look for the common insn sequence, part the first ...  */
1963   if (!outgoing_edges_match (mode, src1, src2))
1964     return false;
1965 
1966   /* ... and part the second.  */
1967   nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1968 
1969   osrc1 = src1;
1970   osrc2 = src2;
1971   if (newpos1 != NULL_RTX)
1972     src1 = BLOCK_FOR_INSN (newpos1);
1973   if (newpos2 != NULL_RTX)
1974     src2 = BLOCK_FOR_INSN (newpos2);
1975 
1976   if (dir == dir_backward)
1977     {
1978 #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1979       SWAP (basic_block, osrc1, osrc2);
1980       SWAP (basic_block, src1, src2);
1981       SWAP (edge, e1, e2);
1982       SWAP (rtx_insn *, newpos1, newpos2);
1983 #undef SWAP
1984     }
1985 
1986   /* Don't proceed with the crossjump unless we found a sufficient number
1987      of matching instructions or the 'from' block was totally matched
1988      (such that its predecessors will hopefully be redirected and the
1989      block removed).  */
1990   if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1991       && (newpos1 != BB_HEAD (src1)))
1992     return false;
1993 
1994   /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
1995   if (block_has_preserve_label (e1->dest)
1996       && (e1->flags & EDGE_ABNORMAL))
1997     return false;
1998 
1999   /* Here we know that the insns in the end of SRC1 which are common with SRC2
2000      will be deleted.
2001      If we have tablejumps in the end of SRC1 and SRC2
2002      they have been already compared for equivalence in outgoing_edges_match ()
2003      so replace the references to TABLE1 by references to TABLE2.  */
2004     {
2005       rtx label1, label2;
2006       rtx_jump_table_data *table1, *table2;
2007 
2008       if (tablejump_p (BB_END (osrc1), &label1, &table1)
2009 	  && tablejump_p (BB_END (osrc2), &label2, &table2)
2010 	  && label1 != label2)
2011 	{
2012 	  rtx_insn *insn;
2013 
2014 	  /* Replace references to LABEL1 with LABEL2.  */
2015 	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2016 	    {
2017 	      /* Do not replace the label in SRC1->END because when deleting
2018 		 a block whose end is a tablejump, the tablejump referenced
2019 		 from the instruction is deleted too.  */
2020 	      if (insn != BB_END (osrc1))
2021 		replace_label_in_insn (insn, label1, label2, true);
2022 	    }
2023 	}
2024     }
2025 
2026   /* Avoid splitting if possible.  We must always split when SRC2 has
2027      EH predecessor edges, or we may end up with basic blocks with both
2028      normal and EH predecessor edges.  */
2029   if (newpos2 == BB_HEAD (src2)
2030       && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2031     redirect_to = src2;
2032   else
2033     {
2034       if (newpos2 == BB_HEAD (src2))
2035 	{
2036 	  /* Skip possible basic block header.  */
2037 	  if (LABEL_P (newpos2))
2038 	    newpos2 = NEXT_INSN (newpos2);
2039 	  while (DEBUG_INSN_P (newpos2))
2040 	    newpos2 = NEXT_INSN (newpos2);
2041 	  if (NOTE_P (newpos2))
2042 	    newpos2 = NEXT_INSN (newpos2);
2043 	  while (DEBUG_INSN_P (newpos2))
2044 	    newpos2 = NEXT_INSN (newpos2);
2045 	}
2046 
2047       if (dump_file)
2048 	fprintf (dump_file, "Splitting bb %i before %i insns\n",
2049 		 src2->index, nmatch);
2050       redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2051     }
2052 
2053   if (dump_file)
2054     fprintf (dump_file,
2055 	     "Cross jumping from bb %i to bb %i; %i common insns\n",
2056 	     src1->index, src2->index, nmatch);
2057 
2058   /* We may have some registers visible through the block.  */
2059   df_set_bb_dirty (redirect_to);
2060 
2061   if (osrc2 == src2)
2062     redirect_edges_to = redirect_to;
2063   else
2064     redirect_edges_to = osrc2;
2065 
2066   /* Recompute the frequencies and counts of outgoing edges.  */
2067   FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2068     {
2069       edge s2;
2070       edge_iterator ei;
2071       basic_block d = s->dest;
2072 
2073       if (FORWARDER_BLOCK_P (d))
2074 	d = single_succ (d);
2075 
2076       FOR_EACH_EDGE (s2, ei, src1->succs)
2077 	{
2078 	  basic_block d2 = s2->dest;
2079 	  if (FORWARDER_BLOCK_P (d2))
2080 	    d2 = single_succ (d2);
2081 	  if (d == d2)
2082 	    break;
2083 	}
2084 
2085       s->count += s2->count;
2086 
2087       /* Take care to update possible forwarder blocks.  We verified
2088 	 that there is no more than one in the chain, so we can't run
2089 	 into infinite loop.  */
2090       if (FORWARDER_BLOCK_P (s->dest))
2091 	{
2092 	  single_succ_edge (s->dest)->count += s2->count;
2093 	  s->dest->count += s2->count;
2094 	  s->dest->frequency += EDGE_FREQUENCY (s);
2095 	}
2096 
2097       if (FORWARDER_BLOCK_P (s2->dest))
2098 	{
2099 	  single_succ_edge (s2->dest)->count -= s2->count;
2100 	  if (single_succ_edge (s2->dest)->count < 0)
2101 	    single_succ_edge (s2->dest)->count = 0;
2102 	  s2->dest->count -= s2->count;
2103 	  s2->dest->frequency -= EDGE_FREQUENCY (s);
2104 	  if (s2->dest->frequency < 0)
2105 	    s2->dest->frequency = 0;
2106 	  if (s2->dest->count < 0)
2107 	    s2->dest->count = 0;
2108 	}
2109 
2110       if (!redirect_edges_to->frequency && !src1->frequency)
2111 	s->probability = (s->probability + s2->probability) / 2;
2112       else
2113 	s->probability
2114 	  = ((s->probability * redirect_edges_to->frequency +
2115 	      s2->probability * src1->frequency)
2116 	     / (redirect_edges_to->frequency + src1->frequency));
2117     }
2118 
2119   /* Adjust count and frequency for the block.  An earlier jump
2120      threading pass may have left the profile in an inconsistent
2121      state (see update_bb_profile_for_threading) so we must be
2122      prepared for overflows.  */
2123   tmp = redirect_to;
2124   do
2125     {
2126       tmp->count += src1->count;
2127       tmp->frequency += src1->frequency;
2128       if (tmp->frequency > BB_FREQ_MAX)
2129         tmp->frequency = BB_FREQ_MAX;
2130       if (tmp == redirect_edges_to)
2131         break;
2132       tmp = find_fallthru_edge (tmp->succs)->dest;
2133     }
2134   while (true);
2135   update_br_prob_note (redirect_edges_to);
2136 
2137   /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
2138 
2139   /* Skip possible basic block header.  */
2140   if (LABEL_P (newpos1))
2141     newpos1 = NEXT_INSN (newpos1);
2142 
2143   while (DEBUG_INSN_P (newpos1))
2144     newpos1 = NEXT_INSN (newpos1);
2145 
2146   if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2147     newpos1 = NEXT_INSN (newpos1);
2148 
2149   while (DEBUG_INSN_P (newpos1))
2150     newpos1 = NEXT_INSN (newpos1);
2151 
2152   redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2153   to_remove = single_succ (redirect_from);
2154 
2155   redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2156   delete_basic_block (to_remove);
2157 
2158   update_forwarder_flag (redirect_from);
2159   if (redirect_to != src2)
2160     update_forwarder_flag (src2);
2161 
2162   return true;
2163 }
2164 
2165 /* Search the predecessors of BB for common insn sequences.  When found,
2166    share code between them by redirecting control flow.  Return true if
2167    any changes made.  */
2168 
2169 static bool
2170 try_crossjump_bb (int mode, basic_block bb)
2171 {
2172   edge e, e2, fallthru;
2173   bool changed;
2174   unsigned max, ix, ix2;
2175 
2176   /* Nothing to do if there is not at least two incoming edges.  */
2177   if (EDGE_COUNT (bb->preds) < 2)
2178     return false;
2179 
2180   /* Don't crossjump if this block ends in a computed jump,
2181      unless we are optimizing for size.  */
2182   if (optimize_bb_for_size_p (bb)
2183       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2184       && computed_jump_p (BB_END (bb)))
2185     return false;
2186 
2187   /* If we are partitioning hot/cold basic blocks, we don't want to
2188      mess up unconditional or indirect jumps that cross between hot
2189      and cold sections.
2190 
2191      Basic block partitioning may result in some jumps that appear to
2192      be optimizable (or blocks that appear to be mergeable), but which really
2193      must be left untouched (they are required to make it safely across
2194      partition boundaries).  See the comments at the top of
2195      bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
2196 
2197   if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2198 					BB_PARTITION (EDGE_PRED (bb, 1)->src)
2199       || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2200     return false;
2201 
2202   /* It is always cheapest to redirect a block that ends in a branch to
2203      a block that falls through into BB, as that adds no branches to the
2204      program.  We'll try that combination first.  */
2205   fallthru = NULL;
2206   max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2207 
2208   if (EDGE_COUNT (bb->preds) > max)
2209     return false;
2210 
2211   fallthru = find_fallthru_edge (bb->preds);
2212 
2213   changed = false;
2214   for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2215     {
2216       e = EDGE_PRED (bb, ix);
2217       ix++;
2218 
2219       /* As noted above, first try with the fallthru predecessor (or, a
2220 	 fallthru predecessor if we are in cfglayout mode).  */
2221       if (fallthru)
2222 	{
2223 	  /* Don't combine the fallthru edge into anything else.
2224 	     If there is a match, we'll do it the other way around.  */
2225 	  if (e == fallthru)
2226 	    continue;
2227 	  /* If nothing changed since the last attempt, there is nothing
2228 	     we can do.  */
2229 	  if (!first_pass
2230 	      && !((e->src->flags & BB_MODIFIED)
2231 		   || (fallthru->src->flags & BB_MODIFIED)))
2232 	    continue;
2233 
2234 	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2235 	    {
2236 	      changed = true;
2237 	      ix = 0;
2238 	      continue;
2239 	    }
2240 	}
2241 
2242       /* Non-obvious work limiting check: Recognize that we're going
2243 	 to call try_crossjump_bb on every basic block.  So if we have
2244 	 two blocks with lots of outgoing edges (a switch) and they
2245 	 share lots of common destinations, then we would do the
2246 	 cross-jump check once for each common destination.
2247 
2248 	 Now, if the blocks actually are cross-jump candidates, then
2249 	 all of their destinations will be shared.  Which means that
2250 	 we only need check them for cross-jump candidacy once.  We
2251 	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2252 	 choosing to do the check from the block for which the edge
2253 	 in question is the first successor of A.  */
2254       if (EDGE_SUCC (e->src, 0) != e)
2255 	continue;
2256 
2257       for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2258 	{
2259 	  e2 = EDGE_PRED (bb, ix2);
2260 
2261 	  if (e2 == e)
2262 	    continue;
2263 
2264 	  /* We've already checked the fallthru edge above.  */
2265 	  if (e2 == fallthru)
2266 	    continue;
2267 
2268 	  /* The "first successor" check above only prevents multiple
2269 	     checks of crossjump(A,B).  In order to prevent redundant
2270 	     checks of crossjump(B,A), require that A be the block
2271 	     with the lowest index.  */
2272 	  if (e->src->index > e2->src->index)
2273 	    continue;
2274 
2275 	  /* If nothing changed since the last attempt, there is nothing
2276 	     we can do.  */
2277 	  if (!first_pass
2278 	      && !((e->src->flags & BB_MODIFIED)
2279 		   || (e2->src->flags & BB_MODIFIED)))
2280 	    continue;
2281 
2282 	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
2283 	     direction.  */
2284 	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
2285 	    {
2286 	      changed = true;
2287 	      ix = 0;
2288 	      break;
2289 	    }
2290 	}
2291     }
2292 
2293   if (changed)
2294     crossjumps_occured = true;
2295 
2296   return changed;
2297 }
2298 
2299 /* Search the successors of BB for common insn sequences.  When found,
2300    share code between them by moving it across the basic block
2301    boundary.  Return true if any changes made.  */
2302 
2303 static bool
2304 try_head_merge_bb (basic_block bb)
2305 {
2306   basic_block final_dest_bb = NULL;
2307   int max_match = INT_MAX;
2308   edge e0;
2309   rtx_insn **headptr, **currptr, **nextptr;
2310   bool changed, moveall;
2311   unsigned ix;
2312   rtx_insn *e0_last_head;
2313   rtx cond;
2314   rtx_insn *move_before;
2315   unsigned nedges = EDGE_COUNT (bb->succs);
2316   rtx_insn *jump = BB_END (bb);
2317   regset live, live_union;
2318 
2319   /* Nothing to do if there is not at least two outgoing edges.  */
2320   if (nedges < 2)
2321     return false;
2322 
2323   /* Don't crossjump if this block ends in a computed jump,
2324      unless we are optimizing for size.  */
2325   if (optimize_bb_for_size_p (bb)
2326       && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2327       && computed_jump_p (BB_END (bb)))
2328     return false;
2329 
2330   cond = get_condition (jump, &move_before, true, false);
2331   if (cond == NULL_RTX)
2332     {
2333 #ifdef HAVE_cc0
2334       if (reg_mentioned_p (cc0_rtx, jump))
2335 	move_before = prev_nonnote_nondebug_insn (jump);
2336       else
2337 #endif
2338 	move_before = jump;
2339     }
2340 
2341   for (ix = 0; ix < nedges; ix++)
2342     if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2343       return false;
2344 
2345   for (ix = 0; ix < nedges; ix++)
2346     {
2347       edge e = EDGE_SUCC (bb, ix);
2348       basic_block other_bb = e->dest;
2349 
2350       if (df_get_bb_dirty (other_bb))
2351 	{
2352 	  block_was_dirty = true;
2353 	  return false;
2354 	}
2355 
2356       if (e->flags & EDGE_ABNORMAL)
2357 	return false;
2358 
2359       /* Normally, all destination blocks must only be reachable from this
2360 	 block, i.e. they must have one incoming edge.
2361 
2362 	 There is one special case we can handle, that of multiple consecutive
2363 	 jumps where the first jumps to one of the targets of the second jump.
2364 	 This happens frequently in switch statements for default labels.
2365 	 The structure is as follows:
2366 	 FINAL_DEST_BB
2367 	 ....
2368 	 if (cond) jump A;
2369 	 fall through
2370 	 BB
2371 	 jump with targets A, B, C, D...
2372 	 A
2373 	 has two incoming edges, from FINAL_DEST_BB and BB
2374 
2375 	 In this case, we can try to move the insns through BB and into
2376 	 FINAL_DEST_BB.  */
2377       if (EDGE_COUNT (other_bb->preds) != 1)
2378 	{
2379 	  edge incoming_edge, incoming_bb_other_edge;
2380 	  edge_iterator ei;
2381 
2382 	  if (final_dest_bb != NULL
2383 	      || EDGE_COUNT (other_bb->preds) != 2)
2384 	    return false;
2385 
2386 	  /* We must be able to move the insns across the whole block.  */
2387 	  move_before = BB_HEAD (bb);
2388 	  while (!NONDEBUG_INSN_P (move_before))
2389 	    move_before = NEXT_INSN (move_before);
2390 
2391 	  if (EDGE_COUNT (bb->preds) != 1)
2392 	    return false;
2393 	  incoming_edge = EDGE_PRED (bb, 0);
2394 	  final_dest_bb = incoming_edge->src;
2395 	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
2396 	    return false;
2397 	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2398 	    if (incoming_bb_other_edge != incoming_edge)
2399 	      break;
2400 	  if (incoming_bb_other_edge->dest != other_bb)
2401 	    return false;
2402 	}
2403     }
2404 
2405   e0 = EDGE_SUCC (bb, 0);
2406   e0_last_head = NULL;
2407   changed = false;
2408 
2409   for (ix = 1; ix < nedges; ix++)
2410     {
2411       edge e = EDGE_SUCC (bb, ix);
2412       rtx_insn *e0_last, *e_last;
2413       int nmatch;
2414 
2415       nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2416 						 &e0_last, &e_last, 0);
2417       if (nmatch == 0)
2418 	return false;
2419 
2420       if (nmatch < max_match)
2421 	{
2422 	  max_match = nmatch;
2423 	  e0_last_head = e0_last;
2424 	}
2425     }
2426 
2427   /* If we matched an entire block, we probably have to avoid moving the
2428      last insn.  */
2429   if (max_match > 0
2430       && e0_last_head == BB_END (e0->dest)
2431       && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2432 	  || control_flow_insn_p (e0_last_head)))
2433     {
2434       max_match--;
2435       if (max_match == 0)
2436 	return false;
2437       do
2438 	e0_last_head = prev_real_insn (e0_last_head);
2439       while (DEBUG_INSN_P (e0_last_head));
2440     }
2441 
2442   if (max_match == 0)
2443     return false;
2444 
2445   /* We must find a union of the live registers at each of the end points.  */
2446   live = BITMAP_ALLOC (NULL);
2447   live_union = BITMAP_ALLOC (NULL);
2448 
2449   currptr = XNEWVEC (rtx_insn *, nedges);
2450   headptr = XNEWVEC (rtx_insn *, nedges);
2451   nextptr = XNEWVEC (rtx_insn *, nedges);
2452 
2453   for (ix = 0; ix < nedges; ix++)
2454     {
2455       int j;
2456       basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2457       rtx_insn *head = BB_HEAD (merge_bb);
2458 
2459       while (!NONDEBUG_INSN_P (head))
2460 	head = NEXT_INSN (head);
2461       headptr[ix] = head;
2462       currptr[ix] = head;
2463 
2464       /* Compute the end point and live information  */
2465       for (j = 1; j < max_match; j++)
2466 	do
2467 	  head = NEXT_INSN (head);
2468 	while (!NONDEBUG_INSN_P (head));
2469       simulate_backwards_to_point (merge_bb, live, head);
2470       IOR_REG_SET (live_union, live);
2471     }
2472 
2473   /* If we're moving across two blocks, verify the validity of the
2474      first move, then adjust the target and let the loop below deal
2475      with the final move.  */
2476   if (final_dest_bb != NULL)
2477     {
2478       rtx_insn *move_upto;
2479 
2480       moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2481 				       jump, e0->dest, live_union,
2482 				       NULL, &move_upto);
2483       if (!moveall)
2484 	{
2485 	  if (move_upto == NULL_RTX)
2486 	    goto out;
2487 
2488 	  while (e0_last_head != move_upto)
2489 	    {
2490 	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2491 					      live_union);
2492 	      e0_last_head = PREV_INSN (e0_last_head);
2493 	    }
2494 	}
2495       if (e0_last_head == NULL_RTX)
2496 	goto out;
2497 
2498       jump = BB_END (final_dest_bb);
2499       cond = get_condition (jump, &move_before, true, false);
2500       if (cond == NULL_RTX)
2501 	{
2502 #ifdef HAVE_cc0
2503 	  if (reg_mentioned_p (cc0_rtx, jump))
2504 	    move_before = prev_nonnote_nondebug_insn (jump);
2505 	  else
2506 #endif
2507 	    move_before = jump;
2508 	}
2509     }
2510 
2511   do
2512     {
2513       rtx_insn *move_upto;
2514       moveall = can_move_insns_across (currptr[0], e0_last_head,
2515 				       move_before, jump, e0->dest, live_union,
2516 				       NULL, &move_upto);
2517       if (!moveall && move_upto == NULL_RTX)
2518 	{
2519 	  if (jump == move_before)
2520 	    break;
2521 
2522 	  /* Try again, using a different insertion point.  */
2523 	  move_before = jump;
2524 
2525 #ifdef HAVE_cc0
2526 	  /* Don't try moving before a cc0 user, as that may invalidate
2527 	     the cc0.  */
2528 	  if (reg_mentioned_p (cc0_rtx, jump))
2529 	    break;
2530 #endif
2531 
2532 	  continue;
2533 	}
2534 
2535       if (final_dest_bb && !moveall)
2536 	/* We haven't checked whether a partial move would be OK for the first
2537 	   move, so we have to fail this case.  */
2538 	break;
2539 
2540       changed = true;
2541       for (;;)
2542 	{
2543 	  if (currptr[0] == move_upto)
2544 	    break;
2545 	  for (ix = 0; ix < nedges; ix++)
2546 	    {
2547 	      rtx_insn *curr = currptr[ix];
2548 	      do
2549 		curr = NEXT_INSN (curr);
2550 	      while (!NONDEBUG_INSN_P (curr));
2551 	      currptr[ix] = curr;
2552 	    }
2553 	}
2554 
2555       /* If we can't currently move all of the identical insns, remember
2556 	 each insn after the range that we'll merge.  */
2557       if (!moveall)
2558 	for (ix = 0; ix < nedges; ix++)
2559 	  {
2560 	    rtx_insn *curr = currptr[ix];
2561 	    do
2562 	      curr = NEXT_INSN (curr);
2563 	    while (!NONDEBUG_INSN_P (curr));
2564 	    nextptr[ix] = curr;
2565 	  }
2566 
2567       reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2568       df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2569       if (final_dest_bb != NULL)
2570 	df_set_bb_dirty (final_dest_bb);
2571       df_set_bb_dirty (bb);
2572       for (ix = 1; ix < nedges; ix++)
2573 	{
2574 	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2575 	  delete_insn_chain (headptr[ix], currptr[ix], false);
2576 	}
2577       if (!moveall)
2578 	{
2579 	  if (jump == move_before)
2580 	    break;
2581 
2582 	  /* For the unmerged insns, try a different insertion point.  */
2583 	  move_before = jump;
2584 
2585 #ifdef HAVE_cc0
2586 	  /* Don't try moving before a cc0 user, as that may invalidate
2587 	     the cc0.  */
2588 	  if (reg_mentioned_p (cc0_rtx, jump))
2589 	    break;
2590 #endif
2591 
2592 	  for (ix = 0; ix < nedges; ix++)
2593 	    currptr[ix] = headptr[ix] = nextptr[ix];
2594 	}
2595     }
2596   while (!moveall);
2597 
2598  out:
2599   free (currptr);
2600   free (headptr);
2601   free (nextptr);
2602 
2603   crossjumps_occured |= changed;
2604 
2605   return changed;
2606 }
2607 
2608 /* Return true if BB contains just bb note, or bb note followed
2609    by only DEBUG_INSNs.  */
2610 
2611 static bool
2612 trivially_empty_bb_p (basic_block bb)
2613 {
2614   rtx_insn *insn = BB_END (bb);
2615 
2616   while (1)
2617     {
2618       if (insn == BB_HEAD (bb))
2619 	return true;
2620       if (!DEBUG_INSN_P (insn))
2621 	return false;
2622       insn = PREV_INSN (insn);
2623     }
2624 }
2625 
2626 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2627    instructions etc.  Return nonzero if changes were made.  */
2628 
2629 static bool
2630 try_optimize_cfg (int mode)
2631 {
2632   bool changed_overall = false;
2633   bool changed;
2634   int iterations = 0;
2635   basic_block bb, b, next;
2636 
2637   if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2638     clear_bb_flags ();
2639 
2640   crossjumps_occured = false;
2641 
2642   FOR_EACH_BB_FN (bb, cfun)
2643     update_forwarder_flag (bb);
2644 
2645   if (! targetm.cannot_modify_jumps_p ())
2646     {
2647       first_pass = true;
2648       /* Attempt to merge blocks as made possible by edge removal.  If
2649 	 a block has only one successor, and the successor has only
2650 	 one predecessor, they may be combined.  */
2651       do
2652 	{
2653 	  block_was_dirty = false;
2654 	  changed = false;
2655 	  iterations++;
2656 
2657 	  if (dump_file)
2658 	    fprintf (dump_file,
2659 		     "\n\ntry_optimize_cfg iteration %i\n\n",
2660 		     iterations);
2661 
2662 	  for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2663 	       != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2664 	    {
2665 	      basic_block c;
2666 	      edge s;
2667 	      bool changed_here = false;
2668 
2669 	      /* Delete trivially dead basic blocks.  This is either
2670 		 blocks with no predecessors, or empty blocks with no
2671 		 successors.  However if the empty block with no
2672 		 successors is the successor of the ENTRY_BLOCK, it is
2673 		 kept.  This ensures that the ENTRY_BLOCK will have a
2674 		 successor which is a precondition for many RTL
2675 		 passes.  Empty blocks may result from expanding
2676 		 __builtin_unreachable ().  */
2677 	      if (EDGE_COUNT (b->preds) == 0
2678 		  || (EDGE_COUNT (b->succs) == 0
2679 		      && trivially_empty_bb_p (b)
2680 		      && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2681 		      != b))
2682 		{
2683 		  c = b->prev_bb;
2684 		  if (EDGE_COUNT (b->preds) > 0)
2685 		    {
2686 		      edge e;
2687 		      edge_iterator ei;
2688 
2689 		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
2690 			{
2691 			  if (BB_FOOTER (b)
2692 			      && BARRIER_P (BB_FOOTER (b)))
2693 			    FOR_EACH_EDGE (e, ei, b->preds)
2694 			      if ((e->flags & EDGE_FALLTHRU)
2695 				  && BB_FOOTER (e->src) == NULL)
2696 				{
2697 				  if (BB_FOOTER (b))
2698 				    {
2699 				      BB_FOOTER (e->src) = BB_FOOTER (b);
2700 				      BB_FOOTER (b) = NULL;
2701 				    }
2702 				  else
2703 				    {
2704 				      start_sequence ();
2705 				      BB_FOOTER (e->src) = emit_barrier ();
2706 				      end_sequence ();
2707 				    }
2708 				}
2709 			}
2710 		      else
2711 			{
2712 			  rtx_insn *last = get_last_bb_insn (b);
2713 			  if (last && BARRIER_P (last))
2714 			    FOR_EACH_EDGE (e, ei, b->preds)
2715 			      if ((e->flags & EDGE_FALLTHRU))
2716 				emit_barrier_after (BB_END (e->src));
2717 			}
2718 		    }
2719 		  delete_basic_block (b);
2720 		  changed = true;
2721 		  /* Avoid trying to remove the exit block.  */
2722 		  b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2723 		  continue;
2724 		}
2725 
2726 	      /* Remove code labels no longer used.  */
2727 	      if (single_pred_p (b)
2728 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2729 		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2730 		  && LABEL_P (BB_HEAD (b))
2731 		  && !LABEL_PRESERVE_P (BB_HEAD (b))
2732 		  /* If the previous block ends with a branch to this
2733 		     block, we can't delete the label.  Normally this
2734 		     is a condjump that is yet to be simplified, but
2735 		     if CASE_DROPS_THRU, this can be a tablejump with
2736 		     some element going to the same place as the
2737 		     default (fallthru).  */
2738 		  && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2739 		      || !JUMP_P (BB_END (single_pred (b)))
2740 		      || ! label_is_jump_target_p (BB_HEAD (b),
2741 						   BB_END (single_pred (b)))))
2742 		{
2743 		  delete_insn (BB_HEAD (b));
2744 		  if (dump_file)
2745 		    fprintf (dump_file, "Deleted label in block %i.\n",
2746 			     b->index);
2747 		}
2748 
2749 	      /* If we fall through an empty block, we can remove it.  */
2750 	      if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2751 		  && single_pred_p (b)
2752 		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2753 		  && !LABEL_P (BB_HEAD (b))
2754 		  && FORWARDER_BLOCK_P (b)
2755 		  /* Note that forwarder_block_p true ensures that
2756 		     there is a successor for this block.  */
2757 		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2758 		  && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
2759 		{
2760 		  if (dump_file)
2761 		    fprintf (dump_file,
2762 			     "Deleting fallthru block %i.\n",
2763 			     b->index);
2764 
2765 		  c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2766 		       ? b->next_bb : b->prev_bb);
2767 		  redirect_edge_succ_nodup (single_pred_edge (b),
2768 					    single_succ (b));
2769 		  delete_basic_block (b);
2770 		  changed = true;
2771 		  b = c;
2772 		  continue;
2773 		}
2774 
2775 	      /* Merge B with its single successor, if any.  */
2776 	      if (single_succ_p (b)
2777 		  && (s = single_succ_edge (b))
2778 		  && !(s->flags & EDGE_COMPLEX)
2779 		  && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2780 		  && single_pred_p (c)
2781 		  && b != c)
2782 		{
2783 		  /* When not in cfg_layout mode use code aware of reordering
2784 		     INSN.  This code possibly creates new basic blocks so it
2785 		     does not fit merge_blocks interface and is kept here in
2786 		     hope that it will become useless once more of compiler
2787 		     is transformed to use cfg_layout mode.  */
2788 
2789 		  if ((mode & CLEANUP_CFGLAYOUT)
2790 		      && can_merge_blocks_p (b, c))
2791 		    {
2792 		      merge_blocks (b, c);
2793 		      update_forwarder_flag (b);
2794 		      changed_here = true;
2795 		    }
2796 		  else if (!(mode & CLEANUP_CFGLAYOUT)
2797 			   /* If the jump insn has side effects,
2798 			      we can't kill the edge.  */
2799 			   && (!JUMP_P (BB_END (b))
2800 			       || (reload_completed
2801 				   ? simplejump_p (BB_END (b))
2802 				   : (onlyjump_p (BB_END (b))
2803 				      && !tablejump_p (BB_END (b),
2804 						       NULL, NULL))))
2805 			   && (next = merge_blocks_move (s, b, c, mode)))
2806 		      {
2807 			b = next;
2808 			changed_here = true;
2809 		      }
2810 		}
2811 
2812 	      /* Simplify branch over branch.  */
2813 	      if ((mode & CLEANUP_EXPENSIVE)
2814 		   && !(mode & CLEANUP_CFGLAYOUT)
2815 		   && try_simplify_condjump (b))
2816 		changed_here = true;
2817 
2818 	      /* If B has a single outgoing edge, but uses a
2819 		 non-trivial jump instruction without side-effects, we
2820 		 can either delete the jump entirely, or replace it
2821 		 with a simple unconditional jump.  */
2822 	      if (single_succ_p (b)
2823 		  && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2824 		  && onlyjump_p (BB_END (b))
2825 		  && !CROSSING_JUMP_P (BB_END (b))
2826 		  && try_redirect_by_replacing_jump (single_succ_edge (b),
2827 						     single_succ (b),
2828 						     (mode & CLEANUP_CFGLAYOUT) != 0))
2829 		{
2830 		  update_forwarder_flag (b);
2831 		  changed_here = true;
2832 		}
2833 
2834 	      /* Simplify branch to branch.  */
2835 	      if (try_forward_edges (mode, b))
2836 		{
2837 		  update_forwarder_flag (b);
2838 		  changed_here = true;
2839 		}
2840 
2841 	      /* Look for shared code between blocks.  */
2842 	      if ((mode & CLEANUP_CROSSJUMP)
2843 		  && try_crossjump_bb (mode, b))
2844 		changed_here = true;
2845 
2846 	      if ((mode & CLEANUP_CROSSJUMP)
2847 		  /* This can lengthen register lifetimes.  Do it only after
2848 		     reload.  */
2849 		  && reload_completed
2850 		  && try_head_merge_bb (b))
2851 		changed_here = true;
2852 
2853 	      /* Don't get confused by the index shift caused by
2854 		 deleting blocks.  */
2855 	      if (!changed_here)
2856 		b = b->next_bb;
2857 	      else
2858 		changed = true;
2859 	    }
2860 
2861 	  if ((mode & CLEANUP_CROSSJUMP)
2862 	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2863 	    changed = true;
2864 
2865 	  if (block_was_dirty)
2866 	    {
2867 	      /* This should only be set by head-merging.  */
2868 	      gcc_assert (mode & CLEANUP_CROSSJUMP);
2869 	      df_analyze ();
2870 	    }
2871 
2872 	  if (changed)
2873             {
2874               /* Edge forwarding in particular can cause hot blocks previously
2875                  reached by both hot and cold blocks to become dominated only
2876                  by cold blocks. This will cause the verification below to fail,
2877                  and lead to now cold code in the hot section. This is not easy
2878                  to detect and fix during edge forwarding, and in some cases
2879                  is only visible after newly unreachable blocks are deleted,
2880                  which will be done in fixup_partitions.  */
2881               fixup_partitions ();
2882 
2883 #ifdef ENABLE_CHECKING
2884               verify_flow_info ();
2885 #endif
2886             }
2887 
2888 	  changed_overall |= changed;
2889 	  first_pass = false;
2890 	}
2891       while (changed);
2892     }
2893 
2894   FOR_ALL_BB_FN (b, cfun)
2895     b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2896 
2897   return changed_overall;
2898 }
2899 
2900 /* Delete all unreachable basic blocks.  */
2901 
2902 bool
2903 delete_unreachable_blocks (void)
2904 {
2905   bool changed = false;
2906   basic_block b, prev_bb;
2907 
2908   find_unreachable_blocks ();
2909 
2910   /* When we're in GIMPLE mode and there may be debug insns, we should
2911      delete blocks in reverse dominator order, so as to get a chance
2912      to substitute all released DEFs into debug stmts.  If we don't
2913      have dominators information, walking blocks backward gets us a
2914      better chance of retaining most debug information than
2915      otherwise.  */
2916   if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE
2917       && dom_info_available_p (CDI_DOMINATORS))
2918     {
2919       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2920 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
2921 	{
2922 	  prev_bb = b->prev_bb;
2923 
2924 	  if (!(b->flags & BB_REACHABLE))
2925 	    {
2926 	      /* Speed up the removal of blocks that don't dominate
2927 		 others.  Walking backwards, this should be the common
2928 		 case.  */
2929 	      if (!first_dom_son (CDI_DOMINATORS, b))
2930 		delete_basic_block (b);
2931 	      else
2932 		{
2933 		  vec<basic_block> h
2934 		    = get_all_dominated_blocks (CDI_DOMINATORS, b);
2935 
2936 		  while (h.length ())
2937 		    {
2938 		      b = h.pop ();
2939 
2940 		      prev_bb = b->prev_bb;
2941 
2942 		      gcc_assert (!(b->flags & BB_REACHABLE));
2943 
2944 		      delete_basic_block (b);
2945 		    }
2946 
2947 		  h.release ();
2948 		}
2949 
2950 	      changed = true;
2951 	    }
2952 	}
2953     }
2954   else
2955     {
2956       for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2957 	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
2958 	{
2959 	  prev_bb = b->prev_bb;
2960 
2961 	  if (!(b->flags & BB_REACHABLE))
2962 	    {
2963 	      delete_basic_block (b);
2964 	      changed = true;
2965 	    }
2966 	}
2967     }
2968 
2969   if (changed)
2970     tidy_fallthru_edges ();
2971   return changed;
2972 }
2973 
2974 /* Delete any jump tables never referenced.  We can't delete them at the
2975    time of removing tablejump insn as they are referenced by the preceding
2976    insns computing the destination, so we delay deleting and garbagecollect
2977    them once life information is computed.  */
2978 void
2979 delete_dead_jumptables (void)
2980 {
2981   basic_block bb;
2982 
2983   /* A dead jump table does not belong to any basic block.  Scan insns
2984      between two adjacent basic blocks.  */
2985   FOR_EACH_BB_FN (bb, cfun)
2986     {
2987       rtx_insn *insn, *next;
2988 
2989       for (insn = NEXT_INSN (BB_END (bb));
2990 	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2991 	   insn = next)
2992 	{
2993 	  next = NEXT_INSN (insn);
2994 	  if (LABEL_P (insn)
2995 	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2996 	      && JUMP_TABLE_DATA_P (next))
2997 	    {
2998 	      rtx_insn *label = insn, *jump = next;
2999 
3000 	      if (dump_file)
3001 		fprintf (dump_file, "Dead jumptable %i removed\n",
3002 			 INSN_UID (insn));
3003 
3004 	      next = NEXT_INSN (next);
3005 	      delete_insn (jump);
3006 	      delete_insn (label);
3007 	    }
3008 	}
3009     }
3010 }
3011 
3012 
3013 /* Tidy the CFG by deleting unreachable code and whatnot.  */
3014 
3015 bool
3016 cleanup_cfg (int mode)
3017 {
3018   bool changed = false;
3019 
3020   /* Set the cfglayout mode flag here.  We could update all the callers
3021      but that is just inconvenient, especially given that we eventually
3022      want to have cfglayout mode as the default.  */
3023   if (current_ir_type () == IR_RTL_CFGLAYOUT)
3024     mode |= CLEANUP_CFGLAYOUT;
3025 
3026   timevar_push (TV_CLEANUP_CFG);
3027   if (delete_unreachable_blocks ())
3028     {
3029       changed = true;
3030       /* We've possibly created trivially dead code.  Cleanup it right
3031 	 now to introduce more opportunities for try_optimize_cfg.  */
3032       if (!(mode & (CLEANUP_NO_INSN_DEL))
3033 	  && !reload_completed)
3034 	delete_trivially_dead_insns (get_insns (), max_reg_num ());
3035     }
3036 
3037   compact_blocks ();
3038 
3039   /* To tail-merge blocks ending in the same noreturn function (e.g.
3040      a call to abort) we have to insert fake edges to exit.  Do this
3041      here once.  The fake edges do not interfere with any other CFG
3042      cleanups.  */
3043   if (mode & CLEANUP_CROSSJUMP)
3044     add_noreturn_fake_exit_edges ();
3045 
3046   if (!dbg_cnt (cfg_cleanup))
3047     return changed;
3048 
3049   while (try_optimize_cfg (mode))
3050     {
3051       delete_unreachable_blocks (), changed = true;
3052       if (!(mode & CLEANUP_NO_INSN_DEL))
3053 	{
3054 	  /* Try to remove some trivially dead insns when doing an expensive
3055 	     cleanup.  But delete_trivially_dead_insns doesn't work after
3056 	     reload (it only handles pseudos) and run_fast_dce is too costly
3057 	     to run in every iteration.
3058 
3059 	     For effective cross jumping, we really want to run a fast DCE to
3060 	     clean up any dead conditions, or they get in the way of performing
3061 	     useful tail merges.
3062 
3063 	     Other transformations in cleanup_cfg are not so sensitive to dead
3064 	     code, so delete_trivially_dead_insns or even doing nothing at all
3065 	     is good enough.  */
3066 	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3067 	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3068 	    break;
3069 	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
3070 	    run_fast_dce ();
3071 	}
3072       else
3073 	break;
3074     }
3075 
3076   if (mode & CLEANUP_CROSSJUMP)
3077     remove_fake_exit_edges ();
3078 
3079   /* Don't call delete_dead_jumptables in cfglayout mode, because
3080      that function assumes that jump tables are in the insns stream.
3081      But we also don't _have_ to delete dead jumptables in cfglayout
3082      mode because we shouldn't even be looking at things that are
3083      not in a basic block.  Dead jumptables are cleaned up when
3084      going out of cfglayout mode.  */
3085   if (!(mode & CLEANUP_CFGLAYOUT))
3086     delete_dead_jumptables ();
3087 
3088   /* ???  We probably do this way too often.  */
3089   if (current_loops
3090       && (changed
3091 	  || (mode & CLEANUP_CFG_CHANGED)))
3092     {
3093       timevar_push (TV_REPAIR_LOOPS);
3094       /* The above doesn't preserve dominance info if available.  */
3095       gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3096       calculate_dominance_info (CDI_DOMINATORS);
3097       fix_loop_structure (NULL);
3098       free_dominance_info (CDI_DOMINATORS);
3099       timevar_pop (TV_REPAIR_LOOPS);
3100     }
3101 
3102   timevar_pop (TV_CLEANUP_CFG);
3103 
3104   return changed;
3105 }
3106 
3107 namespace {
3108 
3109 const pass_data pass_data_jump =
3110 {
3111   RTL_PASS, /* type */
3112   "jump", /* name */
3113   OPTGROUP_NONE, /* optinfo_flags */
3114   TV_JUMP, /* tv_id */
3115   0, /* properties_required */
3116   0, /* properties_provided */
3117   0, /* properties_destroyed */
3118   0, /* todo_flags_start */
3119   0, /* todo_flags_finish */
3120 };
3121 
3122 class pass_jump : public rtl_opt_pass
3123 {
3124 public:
3125   pass_jump (gcc::context *ctxt)
3126     : rtl_opt_pass (pass_data_jump, ctxt)
3127   {}
3128 
3129   /* opt_pass methods: */
3130   virtual unsigned int execute (function *);
3131 
3132 }; // class pass_jump
3133 
3134 unsigned int
3135 pass_jump::execute (function *)
3136 {
3137   delete_trivially_dead_insns (get_insns (), max_reg_num ());
3138   if (dump_file)
3139     dump_flow_info (dump_file, dump_flags);
3140   cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3141 	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3142   return 0;
3143 }
3144 
3145 } // anon namespace
3146 
3147 rtl_opt_pass *
3148 make_pass_jump (gcc::context *ctxt)
3149 {
3150   return new pass_jump (ctxt);
3151 }
3152 
3153 namespace {
3154 
3155 const pass_data pass_data_jump2 =
3156 {
3157   RTL_PASS, /* type */
3158   "jump2", /* name */
3159   OPTGROUP_NONE, /* optinfo_flags */
3160   TV_JUMP, /* tv_id */
3161   0, /* properties_required */
3162   0, /* properties_provided */
3163   0, /* properties_destroyed */
3164   0, /* todo_flags_start */
3165   0, /* todo_flags_finish */
3166 };
3167 
3168 class pass_jump2 : public rtl_opt_pass
3169 {
3170 public:
3171   pass_jump2 (gcc::context *ctxt)
3172     : rtl_opt_pass (pass_data_jump2, ctxt)
3173   {}
3174 
3175   /* opt_pass methods: */
3176   virtual unsigned int execute (function *)
3177     {
3178       cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3179       return 0;
3180     }
3181 
3182 }; // class pass_jump2
3183 
3184 } // anon namespace
3185 
3186 rtl_opt_pass *
3187 make_pass_jump2 (gcc::context *ctxt)
3188 {
3189   return new pass_jump2 (ctxt);
3190 }
3191