xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-threadupdate.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2    Copyright (C) 2004-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfganal.h"
31 #include "gimple-iterator.h"
32 #include "tree-ssa.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "cfgloop.h"
35 #include "dbgcnt.h"
36 #include "tree-cfg.h"
37 #include "tree-vectorizer.h"
38 
39 /* Given a block B, update the CFG and SSA graph to reflect redirecting
40    one or more in-edges to B to instead reach the destination of an
41    out-edge from B while preserving any side effects in B.
42 
43    i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
44    side effects of executing B.
45 
46      1. Make a copy of B (including its outgoing edges and statements).  Call
47 	the copy B'.  Note B' has no incoming edges or PHIs at this time.
48 
49      2. Remove the control statement at the end of B' and all outgoing edges
50 	except B'->C.
51 
52      3. Add a new argument to each PHI in C with the same value as the existing
53 	argument associated with edge B->C.  Associate the new PHI arguments
54 	with the edge B'->C.
55 
56      4. For each PHI in B, find or create a PHI in B' with an identical
57 	PHI_RESULT.  Add an argument to the PHI in B' which has the same
58 	value as the PHI in B associated with the edge A->B.  Associate
59 	the new argument in the PHI in B' with the edge A->B.
60 
61      5. Change the edge A->B to A->B'.
62 
63 	5a. This automatically deletes any PHI arguments associated with the
64 	    edge A->B in B.
65 
66 	5b. This automatically associates each new argument added in step 4
67 	    with the edge A->B'.
68 
69      6. Repeat for other incoming edges into B.
70 
71      7. Put the duplicated resources in B and all the B' blocks into SSA form.
72 
73    Note that block duplication can be minimized by first collecting the
74    set of unique destination blocks that the incoming edges should
75    be threaded to.
76 
77    We reduce the number of edges and statements we create by not copying all
78    the outgoing edges and the control statement in step #1.  We instead create
79    a template block without the outgoing edges and duplicate the template.
80 
81    Another case this code handles is threading through a "joiner" block.  In
82    this case, we do not know the destination of the joiner block, but one
83    of the outgoing edges from the joiner block leads to a threadable path.  This
84    case largely works as outlined above, except the duplicate of the joiner
85    block still contains a full set of outgoing edges and its control statement.
86    We just redirect one of its outgoing edges to our jump threading path.  */
87 
88 
89 /* Steps #5 and #6 of the above algorithm are best implemented by walking
90    all the incoming edges which thread to the same destination edge at
91    the same time.  That avoids lots of table lookups to get information
92    for the destination edge.
93 
94    To realize that implementation we create a list of incoming edges
95    which thread to the same outgoing edge.  Thus to implement steps
96    #5 and #6 we traverse our hash table of outgoing edge information.
97    For each entry we walk the list of incoming edges which thread to
98    the current outgoing edge.  */
99 
100 struct el
101 {
102   edge e;
103   struct el *next;
104 };
105 
106 /* Main data structure recording information regarding B's duplicate
107    blocks.  */
108 
109 /* We need to efficiently record the unique thread destinations of this
110    block and specific information associated with those destinations.  We
111    may have many incoming edges threaded to the same outgoing edge.  This
112    can be naturally implemented with a hash table.  */
113 
114 struct redirection_data : free_ptr_hash<redirection_data>
115 {
116   /* We support wiring up two block duplicates in a jump threading path.
117 
118      One is a normal block copy where we remove the control statement
119      and wire up its single remaining outgoing edge to the thread path.
120 
121      The other is a joiner block where we leave the control statement
122      in place, but wire one of the outgoing edges to a thread path.
123 
124      In theory we could have multiple block duplicates in a jump
125      threading path, but I haven't tried that.
126 
127      The duplicate blocks appear in this array in the same order in
128      which they appear in the jump thread path.  */
129   basic_block dup_blocks[2];
130 
131   /* The jump threading path.  */
132   vec<jump_thread_edge *> *path;
133 
134   /* A list of incoming edges which we want to thread to the
135      same path.  */
136   struct el *incoming_edges;
137 
138   /* hash_table support.  */
139   static inline hashval_t hash (const redirection_data *);
140   static inline int equal (const redirection_data *, const redirection_data *);
141 };
142 
143 /* Dump a jump threading path, including annotations about each
144    edge in the path.  */
145 
146 static void
147 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
148 		       bool registering)
149 {
150   fprintf (dump_file,
151 	   "  %s%s jump thread: (%d, %d) incoming edge; ",
152 	   (registering ? "Registering" : "Cancelling"),
153 	   (path[0]->type == EDGE_FSM_THREAD ? " FSM": ""),
154 	   path[0]->e->src->index, path[0]->e->dest->index);
155 
156   for (unsigned int i = 1; i < path.length (); i++)
157     {
158       /* We can get paths with a NULL edge when the final destination
159 	 of a jump thread turns out to be a constant address.  We dump
160 	 those paths when debugging, so we have to be prepared for that
161 	 possibility here.  */
162       if (path[i]->e == NULL)
163 	continue;
164 
165       if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
166 	fprintf (dump_file, " (%d, %d) joiner; ",
167 		 path[i]->e->src->index, path[i]->e->dest->index);
168       if (path[i]->type == EDGE_COPY_SRC_BLOCK)
169        fprintf (dump_file, " (%d, %d) normal;",
170 		 path[i]->e->src->index, path[i]->e->dest->index);
171       if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
172        fprintf (dump_file, " (%d, %d) nocopy;",
173 		 path[i]->e->src->index, path[i]->e->dest->index);
174       if (path[0]->type == EDGE_FSM_THREAD)
175 	fprintf (dump_file, " (%d, %d) ",
176 		 path[i]->e->src->index, path[i]->e->dest->index);
177     }
178   fputc ('\n', dump_file);
179 }
180 
181 /* Simple hashing function.  For any given incoming edge E, we're going
182    to be most concerned with the final destination of its jump thread
183    path.  So hash on the block index of the final edge in the path.  */
184 
185 inline hashval_t
186 redirection_data::hash (const redirection_data *p)
187 {
188   vec<jump_thread_edge *> *path = p->path;
189   return path->last ()->e->dest->index;
190 }
191 
192 /* Given two hash table entries, return true if they have the same
193    jump threading path.  */
194 inline int
195 redirection_data::equal (const redirection_data *p1, const redirection_data *p2)
196 {
197   vec<jump_thread_edge *> *path1 = p1->path;
198   vec<jump_thread_edge *> *path2 = p2->path;
199 
200   if (path1->length () != path2->length ())
201     return false;
202 
203   for (unsigned int i = 1; i < path1->length (); i++)
204     {
205       if ((*path1)[i]->type != (*path2)[i]->type
206 	  || (*path1)[i]->e != (*path2)[i]->e)
207 	return false;
208     }
209 
210   return true;
211 }
212 
213 /* Rather than search all the edges in jump thread paths each time
214    DOM is able to simply if control statement, we build a hash table
215    with the deleted edges.  We only care about the address of the edge,
216    not its contents.  */
217 struct removed_edges : nofree_ptr_hash<edge_def>
218 {
219   static hashval_t hash (edge e) { return htab_hash_pointer (e); }
220   static bool equal (edge e1, edge e2) { return e1 == e2; }
221 };
222 
223 static hash_table<removed_edges> *removed_edges;
224 
225 /* Data structure of information to pass to hash table traversal routines.  */
226 struct ssa_local_info_t
227 {
228   /* The current block we are working on.  */
229   basic_block bb;
230 
231   /* We only create a template block for the first duplicated block in a
232      jump threading path as we may need many duplicates of that block.
233 
234      The second duplicate block in a path is specific to that path.  Creating
235      and sharing a template for that block is considerably more difficult.  */
236   basic_block template_block;
237 
238   /* Blocks duplicated for the thread.  */
239   bitmap duplicate_blocks;
240 
241   /* TRUE if we thread one or more jumps, FALSE otherwise.  */
242   bool jumps_threaded;
243 
244   /* When we have multiple paths through a joiner which reach different
245      final destinations, then we may need to correct for potential
246      profile insanities.  */
247   bool need_profile_correction;
248 };
249 
250 /* Passes which use the jump threading code register jump threading
251    opportunities as they are discovered.  We keep the registered
252    jump threading opportunities in this vector as edge pairs
253    (original_edge, target_edge).  */
254 static vec<vec<jump_thread_edge *> *> paths;
255 
256 /* When we start updating the CFG for threading, data necessary for jump
257    threading is attached to the AUX field for the incoming edge.  Use these
258    macros to access the underlying structure attached to the AUX field.  */
259 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
260 
261 /* Jump threading statistics.  */
262 
263 struct thread_stats_d
264 {
265   unsigned long num_threaded_edges;
266 };
267 
268 struct thread_stats_d thread_stats;
269 
270 
271 /* Remove the last statement in block BB if it is a control statement
272    Also remove all outgoing edges except the edge which reaches DEST_BB.
273    If DEST_BB is NULL, then remove all outgoing edges.  */
274 
275 void
276 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
277 {
278   gimple_stmt_iterator gsi;
279   edge e;
280   edge_iterator ei;
281 
282   gsi = gsi_last_bb (bb);
283 
284   /* If the duplicate ends with a control statement, then remove it.
285 
286      Note that if we are duplicating the template block rather than the
287      original basic block, then the duplicate might not have any real
288      statements in it.  */
289   if (!gsi_end_p (gsi)
290       && gsi_stmt (gsi)
291       && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
292 	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
293 	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
294     gsi_remove (&gsi, true);
295 
296   for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
297     {
298       if (e->dest != dest_bb)
299 	{
300 	  free_dom_edge_info (e);
301 	  remove_edge (e);
302 	}
303       else
304 	{
305 	  e->probability = REG_BR_PROB_BASE;
306 	  e->count = bb->count;
307 	  ei_next (&ei);
308 	}
309     }
310 
311   /* If the remaining edge is a loop exit, there must have
312      a removed edge that was not a loop exit.
313 
314      In that case BB and possibly other blocks were previously
315      in the loop, but are now outside the loop.  Thus, we need
316      to update the loop structures.  */
317   if (single_succ_p (bb)
318       && loop_outer (bb->loop_father)
319       && loop_exit_edge_p (bb->loop_father, single_succ_edge (bb)))
320     loops_state_set (LOOPS_NEED_FIXUP);
321 }
322 
323 /* Create a duplicate of BB.  Record the duplicate block in an array
324    indexed by COUNT stored in RD.  */
325 
326 static void
327 create_block_for_threading (basic_block bb,
328 			    struct redirection_data *rd,
329 			    unsigned int count,
330 			    bitmap *duplicate_blocks)
331 {
332   edge_iterator ei;
333   edge e;
334 
335   /* We can use the generic block duplication code and simply remove
336      the stuff we do not need.  */
337   rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
338 
339   FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
340     e->aux = NULL;
341 
342   /* Zero out the profile, since the block is unreachable for now.  */
343   rd->dup_blocks[count]->frequency = 0;
344   rd->dup_blocks[count]->count = 0;
345   if (duplicate_blocks)
346     bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index);
347 }
348 
349 /* Main data structure to hold information for duplicates of BB.  */
350 
351 static hash_table<redirection_data> *redirection_data;
352 
353 /* Given an outgoing edge E lookup and return its entry in our hash table.
354 
355    If INSERT is true, then we insert the entry into the hash table if
356    it is not already present.  INCOMING_EDGE is added to the list of incoming
357    edges associated with E in the hash table.  */
358 
359 static struct redirection_data *
360 lookup_redirection_data (edge e, enum insert_option insert)
361 {
362   struct redirection_data **slot;
363   struct redirection_data *elt;
364   vec<jump_thread_edge *> *path = THREAD_PATH (e);
365 
366   /* Build a hash table element so we can see if E is already
367      in the table.  */
368   elt = XNEW (struct redirection_data);
369   elt->path = path;
370   elt->dup_blocks[0] = NULL;
371   elt->dup_blocks[1] = NULL;
372   elt->incoming_edges = NULL;
373 
374   slot = redirection_data->find_slot (elt, insert);
375 
376   /* This will only happen if INSERT is false and the entry is not
377      in the hash table.  */
378   if (slot == NULL)
379     {
380       free (elt);
381       return NULL;
382     }
383 
384   /* This will only happen if E was not in the hash table and
385      INSERT is true.  */
386   if (*slot == NULL)
387     {
388       *slot = elt;
389       elt->incoming_edges = XNEW (struct el);
390       elt->incoming_edges->e = e;
391       elt->incoming_edges->next = NULL;
392       return elt;
393     }
394   /* E was in the hash table.  */
395   else
396     {
397       /* Free ELT as we do not need it anymore, we will extract the
398 	 relevant entry from the hash table itself.  */
399       free (elt);
400 
401       /* Get the entry stored in the hash table.  */
402       elt = *slot;
403 
404       /* If insertion was requested, then we need to add INCOMING_EDGE
405 	 to the list of incoming edges associated with E.  */
406       if (insert)
407 	{
408 	  struct el *el = XNEW (struct el);
409 	  el->next = elt->incoming_edges;
410 	  el->e = e;
411 	  elt->incoming_edges = el;
412 	}
413 
414       return elt;
415     }
416 }
417 
418 /* Similar to copy_phi_args, except that the PHI arg exists, it just
419    does not have a value associated with it.  */
420 
421 static void
422 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
423 {
424   int src_idx = src_e->dest_idx;
425   int tgt_idx = tgt_e->dest_idx;
426 
427   /* Iterate over each PHI in e->dest.  */
428   for (gphi_iterator gsi = gsi_start_phis (src_e->dest),
429 			   gsi2 = gsi_start_phis (tgt_e->dest);
430        !gsi_end_p (gsi);
431        gsi_next (&gsi), gsi_next (&gsi2))
432     {
433       gphi *src_phi = gsi.phi ();
434       gphi *dest_phi = gsi2.phi ();
435       tree val = gimple_phi_arg_def (src_phi, src_idx);
436       source_location locus = gimple_phi_arg_location (src_phi, src_idx);
437 
438       SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
439       gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
440     }
441 }
442 
443 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
444    to see if it has constant value in a flow sensitive manner.  Set
445    LOCUS to location of the constant phi arg and return the value.
446    Return DEF directly if either PATH or idx is ZERO.  */
447 
448 static tree
449 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
450 			 basic_block bb, int idx, source_location *locus)
451 {
452   tree arg;
453   gphi *def_phi;
454   basic_block def_bb;
455 
456   if (path == NULL || idx == 0)
457     return def;
458 
459   def_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (def));
460   if (!def_phi)
461     return def;
462 
463   def_bb = gimple_bb (def_phi);
464   /* Don't propagate loop invariants into deeper loops.  */
465   if (!def_bb || bb_loop_depth (def_bb) < bb_loop_depth (bb))
466     return def;
467 
468   /* Backtrack jump threading path from IDX to see if def has constant
469      value.  */
470   for (int j = idx - 1; j >= 0; j--)
471     {
472       edge e = (*path)[j]->e;
473       if (e->dest == def_bb)
474 	{
475 	  arg = gimple_phi_arg_def (def_phi, e->dest_idx);
476 	  if (is_gimple_min_invariant (arg))
477 	    {
478 	      *locus = gimple_phi_arg_location (def_phi, e->dest_idx);
479 	      return arg;
480 	    }
481 	  break;
482 	}
483     }
484 
485   return def;
486 }
487 
488 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
489    Try to backtrack jump threading PATH from node IDX to see if the arg
490    has constant value, copy constant value instead of argument itself
491    if yes.  */
492 
493 static void
494 copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
495 	       vec<jump_thread_edge *> *path, int idx)
496 {
497   gphi_iterator gsi;
498   int src_indx = src_e->dest_idx;
499 
500   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
501     {
502       gphi *phi = gsi.phi ();
503       tree def = gimple_phi_arg_def (phi, src_indx);
504       source_location locus = gimple_phi_arg_location (phi, src_indx);
505 
506       if (TREE_CODE (def) == SSA_NAME
507 	  && !virtual_operand_p (gimple_phi_result (phi)))
508 	def = get_value_locus_in_path (def, path, bb, idx, &locus);
509 
510       add_phi_arg (phi, def, tgt_e, locus);
511     }
512 }
513 
514 /* We have recently made a copy of ORIG_BB, including its outgoing
515    edges.  The copy is NEW_BB.  Every PHI node in every direct successor of
516    ORIG_BB has a new argument associated with edge from NEW_BB to the
517    successor.  Initialize the PHI argument so that it is equal to the PHI
518    argument associated with the edge from ORIG_BB to the successor.
519    PATH and IDX are used to check if the new PHI argument has constant
520    value in a flow sensitive manner.  */
521 
522 static void
523 update_destination_phis (basic_block orig_bb, basic_block new_bb,
524 			 vec<jump_thread_edge *> *path, int idx)
525 {
526   edge_iterator ei;
527   edge e;
528 
529   FOR_EACH_EDGE (e, ei, orig_bb->succs)
530     {
531       edge e2 = find_edge (new_bb, e->dest);
532       copy_phi_args (e->dest, e, e2, path, idx);
533     }
534 }
535 
536 /* Given a duplicate block and its single destination (both stored
537    in RD).  Create an edge between the duplicate and its single
538    destination.
539 
540    Add an additional argument to any PHI nodes at the single
541    destination.  IDX is the start node in jump threading path
542    we start to check to see if the new PHI argument has constant
543    value along the jump threading path.  */
544 
545 static void
546 create_edge_and_update_destination_phis (struct redirection_data *rd,
547 					 basic_block bb, int idx)
548 {
549   edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
550 
551   rescan_loop_exit (e, true, false);
552   e->probability = REG_BR_PROB_BASE;
553   e->count = bb->count;
554 
555   /* We used to copy the thread path here.  That was added in 2007
556      and dutifully updated through the representation changes in 2013.
557 
558      In 2013 we added code to thread from an interior node through
559      the backedge to another interior node.  That runs after the code
560      to thread through loop headers from outside the loop.
561 
562      The latter may delete edges in the CFG, including those
563      which appeared in the jump threading path we copied here.  Thus
564      we'd end up using a dangling pointer.
565 
566      After reviewing the 2007/2011 code, I can't see how anything
567      depended on copying the AUX field and clearly copying the jump
568      threading path is problematical due to embedded edge pointers.
569      It has been removed.  */
570   e->aux = NULL;
571 
572   /* If there are any PHI nodes at the destination of the outgoing edge
573      from the duplicate block, then we will need to add a new argument
574      to them.  The argument should have the same value as the argument
575      associated with the outgoing edge stored in RD.  */
576   copy_phi_args (e->dest, rd->path->last ()->e, e, rd->path, idx);
577 }
578 
579 /* Look through PATH beginning at START and return TRUE if there are
580    any additional blocks that need to be duplicated.  Otherwise,
581    return FALSE.  */
582 static bool
583 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
584 				 unsigned int start)
585 {
586   for (unsigned int i = start + 1; i < path->length (); i++)
587     {
588       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
589 	  || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
590 	return true;
591     }
592   return false;
593 }
594 
595 
596 /* Compute the amount of profile count/frequency coming into the jump threading
597    path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
598    PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
599    duplicated path, returned in PATH_OUT_COUNT_PTR.  LOCAL_INFO is used to
600    identify blocks duplicated for jump threading, which have duplicated
601    edges that need to be ignored in the analysis.  Return true if path contains
602    a joiner, false otherwise.
603 
604    In the non-joiner case, this is straightforward - all the counts/frequency
605    flowing into the jump threading path should flow through the duplicated
606    block and out of the duplicated path.
607 
608    In the joiner case, it is very tricky.  Some of the counts flowing into
609    the original path go offpath at the joiner.  The problem is that while
610    we know how much total count goes off-path in the original control flow,
611    we don't know how many of the counts corresponding to just the jump
612    threading path go offpath at the joiner.
613 
614    For example, assume we have the following control flow and identified
615    jump threading paths:
616 
617 		A     B     C
618 		 \    |    /
619 	       Ea \   |Eb / Ec
620 		   \  |  /
621 		    v v v
622 		      J       <-- Joiner
623 		     / \
624 		Eoff/   \Eon
625 		   /     \
626 		  v       v
627 		Soff     Son  <--- Normal
628 			 /\
629 		      Ed/  \ Ee
630 		       /    \
631 		      v     v
632 		      D      E
633 
634 	    Jump threading paths: A -> J -> Son -> D (path 1)
635 				  C -> J -> Son -> E (path 2)
636 
637    Note that the control flow could be more complicated:
638    - Each jump threading path may have more than one incoming edge.  I.e. A and
639    Ea could represent multiple incoming blocks/edges that are included in
640    path 1.
641    - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
642    before or after the "normal" copy block).  These are not duplicated onto
643    the jump threading path, as they are single-successor.
644    - Any of the blocks along the path may have other incoming edges that
645    are not part of any jump threading path, but add profile counts along
646    the path.
647 
648    In the above example, after all jump threading is complete, we will
649    end up with the following control flow:
650 
651 		A	   B	       C
652 		|	   |	       |
653 	      Ea|	   |Eb	       |Ec
654 		|	   |	       |
655 		v	   v	       v
656 	       Ja	   J	      Jc
657 	       / \	  / \Eon'     / \
658 	  Eona/   \   ---/---\--------   \Eonc
659 	     /     \ /  /     \		  \
660 	    v       v  v       v	  v
661 	   Sona     Soff      Son	Sonc
662 	     \		       /\	  /
663 	      \___________    /  \  _____/
664 			  \  /    \/
665 			   vv      v
666 			    D      E
667 
668    The main issue to notice here is that when we are processing path 1
669    (A->J->Son->D) we need to figure out the outgoing edge weights to
670    the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
671    sum of the incoming weights to D remain Ed.  The problem with simply
672    assuming that Ja (and Jc when processing path 2) has the same outgoing
673    probabilities to its successors as the original block J, is that after
674    all paths are processed and other edges/counts removed (e.g. none
675    of Ec will reach D after processing path 2), we may end up with not
676    enough count flowing along duplicated edge Sona->D.
677 
678    Therefore, in the case of a joiner, we keep track of all counts
679    coming in along the current path, as well as from predecessors not
680    on any jump threading path (Eb in the above example).  While we
681    first assume that the duplicated Eona for Ja->Sona has the same
682    probability as the original, we later compensate for other jump
683    threading paths that may eliminate edges.  We do that by keep track
684    of all counts coming into the original path that are not in a jump
685    thread (Eb in the above example, but as noted earlier, there could
686    be other predecessors incoming to the path at various points, such
687    as at Son).  Call this cumulative non-path count coming into the path
688    before D as Enonpath.  We then ensure that the count from Sona->D is as at
689    least as big as (Ed - Enonpath), but no bigger than the minimum
690    weight along the jump threading path.  The probabilities of both the
691    original and duplicated joiner block J and Ja will be adjusted
692    accordingly after the updates.  */
693 
694 static bool
695 compute_path_counts (struct redirection_data *rd,
696 		     ssa_local_info_t *local_info,
697 		     gcov_type *path_in_count_ptr,
698 		     gcov_type *path_out_count_ptr,
699 		     int *path_in_freq_ptr)
700 {
701   edge e = rd->incoming_edges->e;
702   vec<jump_thread_edge *> *path = THREAD_PATH (e);
703   edge elast = path->last ()->e;
704   gcov_type nonpath_count = 0;
705   bool has_joiner = false;
706   gcov_type path_in_count = 0;
707   int path_in_freq = 0;
708 
709   /* Start by accumulating incoming edge counts to the path's first bb
710      into a couple buckets:
711 	path_in_count: total count of incoming edges that flow into the
712 		  current path.
713 	nonpath_count: total count of incoming edges that are not
714 		  flowing along *any* path.  These are the counts
715 		  that will still flow along the original path after
716 		  all path duplication is done by potentially multiple
717 		  calls to this routine.
718      (any other incoming edge counts are for a different jump threading
719      path that will be handled by a later call to this routine.)
720      To make this easier, start by recording all incoming edges that flow into
721      the current path in a bitmap.  We could add up the path's incoming edge
722      counts here, but we still need to walk all the first bb's incoming edges
723      below to add up the counts of the other edges not included in this jump
724      threading path.  */
725   struct el *next, *el;
726   bitmap in_edge_srcs = BITMAP_ALLOC (NULL);
727   for (el = rd->incoming_edges; el; el = next)
728     {
729       next = el->next;
730       bitmap_set_bit (in_edge_srcs, el->e->src->index);
731     }
732   edge ein;
733   edge_iterator ei;
734   FOR_EACH_EDGE (ein, ei, e->dest->preds)
735     {
736       vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
737       /* Simply check the incoming edge src against the set captured above.  */
738       if (ein_path
739 	  && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
740 	{
741 	  /* It is necessary but not sufficient that the last path edges
742 	     are identical.  There may be different paths that share the
743 	     same last path edge in the case where the last edge has a nocopy
744 	     source block.  */
745 	  gcc_assert (ein_path->last ()->e == elast);
746 	  path_in_count += ein->count;
747 	  path_in_freq += EDGE_FREQUENCY (ein);
748 	}
749       else if (!ein_path)
750 	{
751 	  /* Keep track of the incoming edges that are not on any jump-threading
752 	     path.  These counts will still flow out of original path after all
753 	     jump threading is complete.  */
754 	    nonpath_count += ein->count;
755 	}
756     }
757 
758   /* This is needed due to insane incoming frequencies.  */
759   if (path_in_freq > BB_FREQ_MAX)
760     path_in_freq = BB_FREQ_MAX;
761 
762   BITMAP_FREE (in_edge_srcs);
763 
764   /* Now compute the fraction of the total count coming into the first
765      path bb that is from the current threading path.  */
766   gcov_type total_count = e->dest->count;
767   /* Handle incoming profile insanities.  */
768   if (total_count < path_in_count)
769     path_in_count = total_count;
770   int onpath_scale = GCOV_COMPUTE_SCALE (path_in_count, total_count);
771 
772   /* Walk the entire path to do some more computation in order to estimate
773      how much of the path_in_count will flow out of the duplicated threading
774      path.  In the non-joiner case this is straightforward (it should be
775      the same as path_in_count, although we will handle incoming profile
776      insanities by setting it equal to the minimum count along the path).
777 
778      In the joiner case, we need to estimate how much of the path_in_count
779      will stay on the threading path after the joiner's conditional branch.
780      We don't really know for sure how much of the counts
781      associated with this path go to each successor of the joiner, but we'll
782      estimate based on the fraction of the total count coming into the path
783      bb was from the threading paths (computed above in onpath_scale).
784      Afterwards, we will need to do some fixup to account for other threading
785      paths and possible profile insanities.
786 
787      In order to estimate the joiner case's counts we also need to update
788      nonpath_count with any additional counts coming into the path.  Other
789      blocks along the path may have additional predecessors from outside
790      the path.  */
791   gcov_type path_out_count = path_in_count;
792   gcov_type min_path_count = path_in_count;
793   for (unsigned int i = 1; i < path->length (); i++)
794     {
795       edge epath = (*path)[i]->e;
796       gcov_type cur_count = epath->count;
797       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
798 	{
799 	  has_joiner = true;
800 	  cur_count = apply_probability (cur_count, onpath_scale);
801 	}
802       /* In the joiner case we need to update nonpath_count for any edges
803 	 coming into the path that will contribute to the count flowing
804 	 into the path successor.  */
805       if (has_joiner && epath != elast)
806 	{
807 	  /* Look for other incoming edges after joiner.  */
808 	  FOR_EACH_EDGE (ein, ei, epath->dest->preds)
809 	    {
810 	      if (ein != epath
811 		  /* Ignore in edges from blocks we have duplicated for a
812 		     threading path, which have duplicated edge counts until
813 		     they are redirected by an invocation of this routine.  */
814 		  && !bitmap_bit_p (local_info->duplicate_blocks,
815 				    ein->src->index))
816 		nonpath_count += ein->count;
817 	    }
818 	}
819       if (cur_count < path_out_count)
820 	path_out_count = cur_count;
821       if (epath->count < min_path_count)
822 	min_path_count = epath->count;
823     }
824 
825   /* We computed path_out_count above assuming that this path targeted
826      the joiner's on-path successor with the same likelihood as it
827      reached the joiner.  However, other thread paths through the joiner
828      may take a different path through the normal copy source block
829      (i.e. they have a different elast), meaning that they do not
830      contribute any counts to this path's elast.  As a result, it may
831      turn out that this path must have more count flowing to the on-path
832      successor of the joiner.  Essentially, all of this path's elast
833      count must be contributed by this path and any nonpath counts
834      (since any path through the joiner with a different elast will not
835      include a copy of this elast in its duplicated path).
836      So ensure that this path's path_out_count is at least the
837      difference between elast->count and nonpath_count.  Otherwise the edge
838      counts after threading will not be sane.  */
839   if (local_info->need_profile_correction
840       && has_joiner && path_out_count < elast->count - nonpath_count)
841     {
842       path_out_count = elast->count - nonpath_count;
843       /* But neither can we go above the minimum count along the path
844 	 we are duplicating.  This can be an issue due to profile
845 	 insanities coming in to this pass.  */
846       if (path_out_count > min_path_count)
847 	path_out_count = min_path_count;
848     }
849 
850   *path_in_count_ptr = path_in_count;
851   *path_out_count_ptr = path_out_count;
852   *path_in_freq_ptr = path_in_freq;
853   return has_joiner;
854 }
855 
856 
857 /* Update the counts and frequencies for both an original path
858    edge EPATH and its duplicate EDUP.  The duplicate source block
859    will get a count/frequency of PATH_IN_COUNT and PATH_IN_FREQ,
860    and the duplicate edge EDUP will have a count of PATH_OUT_COUNT.  */
861 static void
862 update_profile (edge epath, edge edup, gcov_type path_in_count,
863 		gcov_type path_out_count, int path_in_freq)
864 {
865 
866   /* First update the duplicated block's count / frequency.  */
867   if (edup)
868     {
869       basic_block dup_block = edup->src;
870       gcc_assert (dup_block->count == 0);
871       gcc_assert (dup_block->frequency == 0);
872       dup_block->count = path_in_count;
873       dup_block->frequency = path_in_freq;
874     }
875 
876   /* Now update the original block's count and frequency in the
877      opposite manner - remove the counts/freq that will flow
878      into the duplicated block.  Handle underflow due to precision/
879      rounding issues.  */
880   epath->src->count -= path_in_count;
881   if (epath->src->count < 0)
882     epath->src->count = 0;
883   epath->src->frequency -= path_in_freq;
884   if (epath->src->frequency < 0)
885     epath->src->frequency = 0;
886 
887   /* Next update this path edge's original and duplicated counts.  We know
888      that the duplicated path will have path_out_count flowing
889      out of it (in the joiner case this is the count along the duplicated path
890      out of the duplicated joiner).  This count can then be removed from the
891      original path edge.  */
892   if (edup)
893     edup->count = path_out_count;
894   epath->count -= path_out_count;
895   gcc_assert (epath->count >= 0);
896 }
897 
898 
899 /* The duplicate and original joiner blocks may end up with different
900    probabilities (different from both the original and from each other).
901    Recompute the probabilities here once we have updated the edge
902    counts and frequencies.  */
903 
904 static void
905 recompute_probabilities (basic_block bb)
906 {
907   edge esucc;
908   edge_iterator ei;
909   FOR_EACH_EDGE (esucc, ei, bb->succs)
910     {
911       if (!bb->count)
912 	continue;
913 
914       /* Prevent overflow computation due to insane profiles.  */
915       if (esucc->count < bb->count)
916 	esucc->probability = GCOV_COMPUTE_SCALE (esucc->count,
917 						 bb->count);
918       else
919 	/* Can happen with missing/guessed probabilities, since we
920 	   may determine that more is flowing along duplicated
921 	   path than joiner succ probabilities allowed.
922 	   Counts and freqs will be insane after jump threading,
923 	   at least make sure probability is sane or we will
924 	   get a flow verification error.
925 	   Not much we can do to make counts/freqs sane without
926 	   redoing the profile estimation.  */
927 	esucc->probability = REG_BR_PROB_BASE;
928     }
929 }
930 
931 
932 /* Update the counts of the original and duplicated edges from a joiner
933    that go off path, given that we have already determined that the
934    duplicate joiner DUP_BB has incoming count PATH_IN_COUNT and
935    outgoing count along the path PATH_OUT_COUNT.  The original (on-)path
936    edge from joiner is EPATH.  */
937 
938 static void
939 update_joiner_offpath_counts (edge epath, basic_block dup_bb,
940 			      gcov_type path_in_count,
941 			      gcov_type path_out_count)
942 {
943   /* Compute the count that currently flows off path from the joiner.
944      In other words, the total count of joiner's out edges other than
945      epath.  Compute this by walking the successors instead of
946      subtracting epath's count from the joiner bb count, since there
947      are sometimes slight insanities where the total out edge count is
948      larger than the bb count (possibly due to rounding/truncation
949      errors).  */
950   gcov_type total_orig_off_path_count = 0;
951   edge enonpath;
952   edge_iterator ei;
953   FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
954     {
955       if (enonpath == epath)
956 	continue;
957       total_orig_off_path_count += enonpath->count;
958     }
959 
960   /* For the path that we are duplicating, the amount that will flow
961      off path from the duplicated joiner is the delta between the
962      path's cumulative in count and the portion of that count we
963      estimated above as flowing from the joiner along the duplicated
964      path.  */
965   gcov_type total_dup_off_path_count = path_in_count - path_out_count;
966 
967   /* Now do the actual updates of the off-path edges.  */
968   FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
969     {
970       /* Look for edges going off of the threading path.  */
971       if (enonpath == epath)
972 	continue;
973 
974       /* Find the corresponding edge out of the duplicated joiner.  */
975       edge enonpathdup = find_edge (dup_bb, enonpath->dest);
976       gcc_assert (enonpathdup);
977 
978       /* We can't use the original probability of the joiner's out
979 	 edges, since the probabilities of the original branch
980 	 and the duplicated branches may vary after all threading is
981 	 complete.  But apportion the duplicated joiner's off-path
982 	 total edge count computed earlier (total_dup_off_path_count)
983 	 among the duplicated off-path edges based on their original
984 	 ratio to the full off-path count (total_orig_off_path_count).
985 	 */
986       int scale = GCOV_COMPUTE_SCALE (enonpath->count,
987 				      total_orig_off_path_count);
988       /* Give the duplicated offpath edge a portion of the duplicated
989 	 total.  */
990       enonpathdup->count = apply_scale (scale,
991 					total_dup_off_path_count);
992       /* Now update the original offpath edge count, handling underflow
993 	 due to rounding errors.  */
994       enonpath->count -= enonpathdup->count;
995       if (enonpath->count < 0)
996 	enonpath->count = 0;
997     }
998 }
999 
1000 
1001 /* Check if the paths through RD all have estimated frequencies but zero
1002    profile counts.  This is more accurate than checking the entry block
1003    for a zero profile count, since profile insanities sometimes creep in.  */
1004 
1005 static bool
1006 estimated_freqs_path (struct redirection_data *rd)
1007 {
1008   edge e = rd->incoming_edges->e;
1009   vec<jump_thread_edge *> *path = THREAD_PATH (e);
1010   edge ein;
1011   edge_iterator ei;
1012   bool non_zero_freq = false;
1013   FOR_EACH_EDGE (ein, ei, e->dest->preds)
1014     {
1015       if (ein->count)
1016 	return false;
1017       non_zero_freq |= ein->src->frequency != 0;
1018     }
1019 
1020   for (unsigned int i = 1; i < path->length (); i++)
1021     {
1022       edge epath = (*path)[i]->e;
1023       if (epath->src->count)
1024 	return false;
1025       non_zero_freq |= epath->src->frequency != 0;
1026       edge esucc;
1027       FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1028 	{
1029 	  if (esucc->count)
1030 	    return false;
1031 	  non_zero_freq |= esucc->src->frequency != 0;
1032 	}
1033     }
1034   return non_zero_freq;
1035 }
1036 
1037 
1038 /* Invoked for routines that have guessed frequencies and no profile
1039    counts to record the block and edge frequencies for paths through RD
1040    in the profile count fields of those blocks and edges.  This is because
1041    ssa_fix_duplicate_block_edges incrementally updates the block and
1042    edge counts as edges are redirected, and it is difficult to do that
1043    for edge frequencies which are computed on the fly from the source
1044    block frequency and probability.  When a block frequency is updated
1045    its outgoing edge frequencies are affected and become difficult to
1046    adjust.  */
1047 
1048 static void
1049 freqs_to_counts_path (struct redirection_data *rd)
1050 {
1051   edge e = rd->incoming_edges->e;
1052   vec<jump_thread_edge *> *path = THREAD_PATH (e);
1053   edge ein;
1054   edge_iterator ei;
1055   FOR_EACH_EDGE (ein, ei, e->dest->preds)
1056     {
1057       /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1058 	 errors applying the probability when the frequencies are very
1059 	 small.  */
1060       ein->count = apply_probability (ein->src->frequency * REG_BR_PROB_BASE,
1061 				      ein->probability);
1062     }
1063 
1064   for (unsigned int i = 1; i < path->length (); i++)
1065     {
1066       edge epath = (*path)[i]->e;
1067       edge esucc;
1068       /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1069 	 errors applying the edge probability when the frequencies are very
1070 	 small.  */
1071       epath->src->count = epath->src->frequency * REG_BR_PROB_BASE;
1072       FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1073 	esucc->count = apply_probability (esucc->src->count,
1074 					  esucc->probability);
1075     }
1076 }
1077 
1078 
1079 /* For routines that have guessed frequencies and no profile counts, where we
1080    used freqs_to_counts_path to record block and edge frequencies for paths
1081    through RD, we clear the counts after completing all updates for RD.
1082    The updates in ssa_fix_duplicate_block_edges are based off the count fields,
1083    but the block frequencies and edge probabilities were updated as well,
1084    so we can simply clear the count fields.  */
1085 
1086 static void
1087 clear_counts_path (struct redirection_data *rd)
1088 {
1089   edge e = rd->incoming_edges->e;
1090   vec<jump_thread_edge *> *path = THREAD_PATH (e);
1091   edge ein, esucc;
1092   edge_iterator ei;
1093   FOR_EACH_EDGE (ein, ei, e->dest->preds)
1094     ein->count = 0;
1095 
1096   /* First clear counts along original path.  */
1097   for (unsigned int i = 1; i < path->length (); i++)
1098     {
1099       edge epath = (*path)[i]->e;
1100       FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1101 	esucc->count = 0;
1102       epath->src->count = 0;
1103     }
1104   /* Also need to clear the counts along duplicated path.  */
1105   for (unsigned int i = 0; i < 2; i++)
1106     {
1107       basic_block dup = rd->dup_blocks[i];
1108       if (!dup)
1109 	continue;
1110       FOR_EACH_EDGE (esucc, ei, dup->succs)
1111 	esucc->count = 0;
1112       dup->count = 0;
1113     }
1114 }
1115 
1116 /* Wire up the outgoing edges from the duplicate blocks and
1117    update any PHIs as needed.  Also update the profile counts
1118    on the original and duplicate blocks and edges.  */
1119 void
1120 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
1121 			       ssa_local_info_t *local_info)
1122 {
1123   bool multi_incomings = (rd->incoming_edges->next != NULL);
1124   edge e = rd->incoming_edges->e;
1125   vec<jump_thread_edge *> *path = THREAD_PATH (e);
1126   edge elast = path->last ()->e;
1127   gcov_type path_in_count = 0;
1128   gcov_type path_out_count = 0;
1129   int path_in_freq = 0;
1130 
1131   /* This routine updates profile counts, frequencies, and probabilities
1132      incrementally. Since it is difficult to do the incremental updates
1133      using frequencies/probabilities alone, for routines without profile
1134      data we first take a snapshot of the existing block and edge frequencies
1135      by copying them into the empty profile count fields.  These counts are
1136      then used to do the incremental updates, and cleared at the end of this
1137      routine.  If the function is marked as having a profile, we still check
1138      to see if the paths through RD are using estimated frequencies because
1139      the routine had zero profile counts.  */
1140   bool do_freqs_to_counts = (profile_status_for_fn (cfun) != PROFILE_READ
1141 			     || estimated_freqs_path (rd));
1142   if (do_freqs_to_counts)
1143     freqs_to_counts_path (rd);
1144 
1145   /* First determine how much profile count to move from original
1146      path to the duplicate path.  This is tricky in the presence of
1147      a joiner (see comments for compute_path_counts), where some portion
1148      of the path's counts will flow off-path from the joiner.  In the
1149      non-joiner case the path_in_count and path_out_count should be the
1150      same.  */
1151   bool has_joiner = compute_path_counts (rd, local_info,
1152 					 &path_in_count, &path_out_count,
1153 					 &path_in_freq);
1154 
1155   int cur_path_freq = path_in_freq;
1156   for (unsigned int count = 0, i = 1; i < path->length (); i++)
1157     {
1158       edge epath = (*path)[i]->e;
1159 
1160       /* If we were threading through an joiner block, then we want
1161 	 to keep its control statement and redirect an outgoing edge.
1162 	 Else we want to remove the control statement & edges, then create
1163 	 a new outgoing edge.  In both cases we may need to update PHIs.  */
1164       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1165 	{
1166 	  edge victim;
1167 	  edge e2;
1168 
1169 	  gcc_assert (has_joiner);
1170 
1171 	  /* This updates the PHIs at the destination of the duplicate
1172 	     block.  Pass 0 instead of i if we are threading a path which
1173 	     has multiple incoming edges.  */
1174 	  update_destination_phis (local_info->bb, rd->dup_blocks[count],
1175 				   path, multi_incomings ? 0 : i);
1176 
1177 	  /* Find the edge from the duplicate block to the block we're
1178 	     threading through.  That's the edge we want to redirect.  */
1179 	  victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
1180 
1181 	  /* If there are no remaining blocks on the path to duplicate,
1182 	     then redirect VICTIM to the final destination of the jump
1183 	     threading path.  */
1184 	  if (!any_remaining_duplicated_blocks (path, i))
1185 	    {
1186 	      e2 = redirect_edge_and_branch (victim, elast->dest);
1187 	      /* If we redirected the edge, then we need to copy PHI arguments
1188 		 at the target.  If the edge already existed (e2 != victim
1189 		 case), then the PHIs in the target already have the correct
1190 		 arguments.  */
1191 	      if (e2 == victim)
1192 		copy_phi_args (e2->dest, elast, e2,
1193 			       path, multi_incomings ? 0 : i);
1194 	    }
1195 	  else
1196 	    {
1197 	      /* Redirect VICTIM to the next duplicated block in the path.  */
1198 	      e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
1199 
1200 	      /* We need to update the PHIs in the next duplicated block.  We
1201 		 want the new PHI args to have the same value as they had
1202 		 in the source of the next duplicate block.
1203 
1204 		 Thus, we need to know which edge we traversed into the
1205 		 source of the duplicate.  Furthermore, we may have
1206 		 traversed many edges to reach the source of the duplicate.
1207 
1208 		 Walk through the path starting at element I until we
1209 		 hit an edge marked with EDGE_COPY_SRC_BLOCK.  We want
1210 		 the edge from the prior element.  */
1211 	      for (unsigned int j = i + 1; j < path->length (); j++)
1212 		{
1213 		  if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
1214 		    {
1215 		      copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
1216 		      break;
1217 		    }
1218 		}
1219 	    }
1220 
1221 	  /* Update the counts and frequency of both the original block
1222 	     and path edge, and the duplicates.  The path duplicate's
1223 	     incoming count and frequency are the totals for all edges
1224 	     incoming to this jump threading path computed earlier.
1225 	     And we know that the duplicated path will have path_out_count
1226 	     flowing out of it (i.e. along the duplicated path out of the
1227 	     duplicated joiner).  */
1228 	  update_profile (epath, e2, path_in_count, path_out_count,
1229 			  path_in_freq);
1230 
1231 	  /* Next we need to update the counts of the original and duplicated
1232 	     edges from the joiner that go off path.  */
1233 	  update_joiner_offpath_counts (epath, e2->src, path_in_count,
1234 					path_out_count);
1235 
1236 	  /* Finally, we need to set the probabilities on the duplicated
1237 	     edges out of the duplicated joiner (e2->src).  The probabilities
1238 	     along the original path will all be updated below after we finish
1239 	     processing the whole path.  */
1240 	  recompute_probabilities (e2->src);
1241 
1242 	  /* Record the frequency flowing to the downstream duplicated
1243 	     path blocks.  */
1244 	  cur_path_freq = EDGE_FREQUENCY (e2);
1245 	}
1246       else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1247 	{
1248 	  remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
1249 	  create_edge_and_update_destination_phis (rd, rd->dup_blocks[count],
1250 						   multi_incomings ? 0 : i);
1251 	  if (count == 1)
1252 	    single_succ_edge (rd->dup_blocks[1])->aux = NULL;
1253 
1254 	  /* Update the counts and frequency of both the original block
1255 	     and path edge, and the duplicates.  Since we are now after
1256 	     any joiner that may have existed on the path, the count
1257 	     flowing along the duplicated threaded path is path_out_count.
1258 	     If we didn't have a joiner, then cur_path_freq was the sum
1259 	     of the total frequencies along all incoming edges to the
1260 	     thread path (path_in_freq).  If we had a joiner, it would have
1261 	     been updated at the end of that handling to the edge frequency
1262 	     along the duplicated joiner path edge.  */
1263 	  update_profile (epath, EDGE_SUCC (rd->dup_blocks[count], 0),
1264 			  path_out_count, path_out_count,
1265 			  cur_path_freq);
1266 	}
1267       else
1268 	{
1269 	  /* No copy case.  In this case we don't have an equivalent block
1270 	     on the duplicated thread path to update, but we do need
1271 	     to remove the portion of the counts/freqs that were moved
1272 	     to the duplicated path from the counts/freqs flowing through
1273 	     this block on the original path.  Since all the no-copy edges
1274 	     are after any joiner, the removed count is the same as
1275 	     path_out_count.
1276 
1277 	     If we didn't have a joiner, then cur_path_freq was the sum
1278 	     of the total frequencies along all incoming edges to the
1279 	     thread path (path_in_freq).  If we had a joiner, it would have
1280 	     been updated at the end of that handling to the edge frequency
1281 	     along the duplicated joiner path edge.  */
1282 	   update_profile (epath, NULL, path_out_count, path_out_count,
1283 			   cur_path_freq);
1284 	}
1285 
1286       /* Increment the index into the duplicated path when we processed
1287 	 a duplicated block.  */
1288       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
1289 	  || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1290 	{
1291 	  count++;
1292 	}
1293     }
1294 
1295   /* Now walk orig blocks and update their probabilities, since the
1296      counts and freqs should be updated properly by above loop.  */
1297   for (unsigned int i = 1; i < path->length (); i++)
1298     {
1299       edge epath = (*path)[i]->e;
1300       recompute_probabilities (epath->src);
1301     }
1302 
1303   /* Done with all profile and frequency updates, clear counts if they
1304      were copied.  */
1305   if (do_freqs_to_counts)
1306     clear_counts_path (rd);
1307 }
1308 
1309 /* Hash table traversal callback routine to create duplicate blocks.  */
1310 
1311 int
1312 ssa_create_duplicates (struct redirection_data **slot,
1313 		       ssa_local_info_t *local_info)
1314 {
1315   struct redirection_data *rd = *slot;
1316 
1317   /* The second duplicated block in a jump threading path is specific
1318      to the path.  So it gets stored in RD rather than in LOCAL_DATA.
1319 
1320      Each time we're called, we have to look through the path and see
1321      if a second block needs to be duplicated.
1322 
1323      Note the search starts with the third edge on the path.  The first
1324      edge is the incoming edge, the second edge always has its source
1325      duplicated.  Thus we start our search with the third edge.  */
1326   vec<jump_thread_edge *> *path = rd->path;
1327   for (unsigned int i = 2; i < path->length (); i++)
1328     {
1329       if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1330 	  || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1331 	{
1332 	  create_block_for_threading ((*path)[i]->e->src, rd, 1,
1333 				      &local_info->duplicate_blocks);
1334 	  break;
1335 	}
1336     }
1337 
1338   /* Create a template block if we have not done so already.  Otherwise
1339      use the template to create a new block.  */
1340   if (local_info->template_block == NULL)
1341     {
1342       create_block_for_threading ((*path)[1]->e->src, rd, 0,
1343 				  &local_info->duplicate_blocks);
1344       local_info->template_block = rd->dup_blocks[0];
1345 
1346       /* We do not create any outgoing edges for the template.  We will
1347 	 take care of that in a later traversal.  That way we do not
1348 	 create edges that are going to just be deleted.  */
1349     }
1350   else
1351     {
1352       create_block_for_threading (local_info->template_block, rd, 0,
1353 				  &local_info->duplicate_blocks);
1354 
1355       /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1356 	 block.   */
1357       ssa_fix_duplicate_block_edges (rd, local_info);
1358     }
1359 
1360   /* Keep walking the hash table.  */
1361   return 1;
1362 }
1363 
1364 /* We did not create any outgoing edges for the template block during
1365    block creation.  This hash table traversal callback creates the
1366    outgoing edge for the template block.  */
1367 
1368 inline int
1369 ssa_fixup_template_block (struct redirection_data **slot,
1370 			  ssa_local_info_t *local_info)
1371 {
1372   struct redirection_data *rd = *slot;
1373 
1374   /* If this is the template block halt the traversal after updating
1375      it appropriately.
1376 
1377      If we were threading through an joiner block, then we want
1378      to keep its control statement and redirect an outgoing edge.
1379      Else we want to remove the control statement & edges, then create
1380      a new outgoing edge.  In both cases we may need to update PHIs.  */
1381   if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
1382     {
1383       ssa_fix_duplicate_block_edges (rd, local_info);
1384       return 0;
1385     }
1386 
1387   return 1;
1388 }
1389 
1390 /* Hash table traversal callback to redirect each incoming edge
1391    associated with this hash table element to its new destination.  */
1392 
1393 int
1394 ssa_redirect_edges (struct redirection_data **slot,
1395 		    ssa_local_info_t *local_info)
1396 {
1397   struct redirection_data *rd = *slot;
1398   struct el *next, *el;
1399 
1400   /* Walk over all the incoming edges associated with this hash table
1401      entry.  */
1402   for (el = rd->incoming_edges; el; el = next)
1403     {
1404       edge e = el->e;
1405       vec<jump_thread_edge *> *path = THREAD_PATH (e);
1406 
1407       /* Go ahead and free this element from the list.  Doing this now
1408 	 avoids the need for another list walk when we destroy the hash
1409 	 table.  */
1410       next = el->next;
1411       free (el);
1412 
1413       thread_stats.num_threaded_edges++;
1414 
1415       if (rd->dup_blocks[0])
1416 	{
1417 	  edge e2;
1418 
1419 	  if (dump_file && (dump_flags & TDF_DETAILS))
1420 	    fprintf (dump_file, "  Threaded jump %d --> %d to %d\n",
1421 		     e->src->index, e->dest->index, rd->dup_blocks[0]->index);
1422 
1423 	  /* Redirect the incoming edge (possibly to the joiner block) to the
1424 	     appropriate duplicate block.  */
1425 	  e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
1426 	  gcc_assert (e == e2);
1427 	  flush_pending_stmts (e2);
1428 	}
1429 
1430       /* Go ahead and clear E->aux.  It's not needed anymore and failure
1431 	 to clear it will cause all kinds of unpleasant problems later.  */
1432       delete_jump_thread_path (path);
1433       e->aux = NULL;
1434 
1435     }
1436 
1437   /* Indicate that we actually threaded one or more jumps.  */
1438   if (rd->incoming_edges)
1439     local_info->jumps_threaded = true;
1440 
1441   return 1;
1442 }
1443 
1444 /* Return true if this block has no executable statements other than
1445    a simple ctrl flow instruction.  When the number of outgoing edges
1446    is one, this is equivalent to a "forwarder" block.  */
1447 
1448 static bool
1449 redirection_block_p (basic_block bb)
1450 {
1451   gimple_stmt_iterator gsi;
1452 
1453   /* Advance to the first executable statement.  */
1454   gsi = gsi_start_bb (bb);
1455   while (!gsi_end_p (gsi)
1456 	 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
1457 	     || is_gimple_debug (gsi_stmt (gsi))
1458 	     || gimple_nop_p (gsi_stmt (gsi))
1459 	     || gimple_clobber_p (gsi_stmt (gsi))))
1460     gsi_next (&gsi);
1461 
1462   /* Check if this is an empty block.  */
1463   if (gsi_end_p (gsi))
1464     return true;
1465 
1466   /* Test that we've reached the terminating control statement.  */
1467   return gsi_stmt (gsi)
1468 	 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
1469 	     || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
1470 	     || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
1471 }
1472 
1473 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1474    is reached via one or more specific incoming edges, we know which
1475    outgoing edge from BB will be traversed.
1476 
1477    We want to redirect those incoming edges to the target of the
1478    appropriate outgoing edge.  Doing so avoids a conditional branch
1479    and may expose new optimization opportunities.  Note that we have
1480    to update dominator tree and SSA graph after such changes.
1481 
1482    The key to keeping the SSA graph update manageable is to duplicate
1483    the side effects occurring in BB so that those side effects still
1484    occur on the paths which bypass BB after redirecting edges.
1485 
1486    We accomplish this by creating duplicates of BB and arranging for
1487    the duplicates to unconditionally pass control to one specific
1488    successor of BB.  We then revector the incoming edges into BB to
1489    the appropriate duplicate of BB.
1490 
1491    If NOLOOP_ONLY is true, we only perform the threading as long as it
1492    does not affect the structure of the loops in a nontrivial way.
1493 
1494    If JOINERS is true, then thread through joiner blocks as well.  */
1495 
1496 static bool
1497 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
1498 {
1499   /* E is an incoming edge into BB that we may or may not want to
1500      redirect to a duplicate of BB.  */
1501   edge e, e2;
1502   edge_iterator ei;
1503   ssa_local_info_t local_info;
1504 
1505   local_info.duplicate_blocks = BITMAP_ALLOC (NULL);
1506   local_info.need_profile_correction = false;
1507 
1508   /* To avoid scanning a linear array for the element we need we instead
1509      use a hash table.  For normal code there should be no noticeable
1510      difference.  However, if we have a block with a large number of
1511      incoming and outgoing edges such linear searches can get expensive.  */
1512   redirection_data
1513     = new hash_table<struct redirection_data> (EDGE_COUNT (bb->succs));
1514 
1515   /* Record each unique threaded destination into a hash table for
1516      efficient lookups.  */
1517   edge last = NULL;
1518   FOR_EACH_EDGE (e, ei, bb->preds)
1519     {
1520       if (e->aux == NULL)
1521 	continue;
1522 
1523       vec<jump_thread_edge *> *path = THREAD_PATH (e);
1524 
1525       if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
1526 	  || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
1527 	continue;
1528 
1529       e2 = path->last ()->e;
1530       if (!e2 || noloop_only)
1531 	{
1532 	  /* If NOLOOP_ONLY is true, we only allow threading through the
1533 	     header of a loop to exit edges.  */
1534 
1535 	  /* One case occurs when there was loop header buried in a jump
1536 	     threading path that crosses loop boundaries.  We do not try
1537 	     and thread this elsewhere, so just cancel the jump threading
1538 	     request by clearing the AUX field now.  */
1539 	  if (bb->loop_father != e2->src->loop_father
1540 	      && !loop_exit_edge_p (e2->src->loop_father, e2))
1541 	    {
1542 	      /* Since this case is not handled by our special code
1543 		 to thread through a loop header, we must explicitly
1544 		 cancel the threading request here.  */
1545 	      delete_jump_thread_path (path);
1546 	      e->aux = NULL;
1547 	      continue;
1548 	    }
1549 
1550 	  /* Another case occurs when trying to thread through our
1551 	     own loop header, possibly from inside the loop.  We will
1552 	     thread these later.  */
1553 	  unsigned int i;
1554 	  for (i = 1; i < path->length (); i++)
1555 	    {
1556 	      if ((*path)[i]->e->src == bb->loop_father->header
1557 		  && (!loop_exit_edge_p (bb->loop_father, e2)
1558 		      || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
1559 		break;
1560 	    }
1561 
1562 	  if (i != path->length ())
1563 	    continue;
1564 	}
1565 
1566       /* Insert the outgoing edge into the hash table if it is not
1567 	 already in the hash table.  */
1568       lookup_redirection_data (e, INSERT);
1569 
1570       /* When we have thread paths through a common joiner with different
1571 	 final destinations, then we may need corrections to deal with
1572 	 profile insanities.  See the big comment before compute_path_counts.  */
1573       if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1574 	{
1575 	  if (!last)
1576 	    last = e2;
1577 	  else if (e2 != last)
1578 	    local_info.need_profile_correction = true;
1579 	}
1580     }
1581 
1582   /* We do not update dominance info.  */
1583   free_dominance_info (CDI_DOMINATORS);
1584 
1585   /* We know we only thread through the loop header to loop exits.
1586      Let the basic block duplication hook know we are not creating
1587      a multiple entry loop.  */
1588   if (noloop_only
1589       && bb == bb->loop_father->header)
1590     set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
1591 
1592   /* Now create duplicates of BB.
1593 
1594      Note that for a block with a high outgoing degree we can waste
1595      a lot of time and memory creating and destroying useless edges.
1596 
1597      So we first duplicate BB and remove the control structure at the
1598      tail of the duplicate as well as all outgoing edges from the
1599      duplicate.  We then use that duplicate block as a template for
1600      the rest of the duplicates.  */
1601   local_info.template_block = NULL;
1602   local_info.bb = bb;
1603   local_info.jumps_threaded = false;
1604   redirection_data->traverse <ssa_local_info_t *, ssa_create_duplicates>
1605 			    (&local_info);
1606 
1607   /* The template does not have an outgoing edge.  Create that outgoing
1608      edge and update PHI nodes as the edge's target as necessary.
1609 
1610      We do this after creating all the duplicates to avoid creating
1611      unnecessary edges.  */
1612   redirection_data->traverse <ssa_local_info_t *, ssa_fixup_template_block>
1613 			    (&local_info);
1614 
1615   /* The hash table traversals above created the duplicate blocks (and the
1616      statements within the duplicate blocks).  This loop creates PHI nodes for
1617      the duplicated blocks and redirects the incoming edges into BB to reach
1618      the duplicates of BB.  */
1619   redirection_data->traverse <ssa_local_info_t *, ssa_redirect_edges>
1620 			    (&local_info);
1621 
1622   /* Done with this block.  Clear REDIRECTION_DATA.  */
1623   delete redirection_data;
1624   redirection_data = NULL;
1625 
1626   if (noloop_only
1627       && bb == bb->loop_father->header)
1628     set_loop_copy (bb->loop_father, NULL);
1629 
1630   BITMAP_FREE (local_info.duplicate_blocks);
1631   local_info.duplicate_blocks = NULL;
1632 
1633   /* Indicate to our caller whether or not any jumps were threaded.  */
1634   return local_info.jumps_threaded;
1635 }
1636 
1637 /* Wrapper for thread_block_1 so that we can first handle jump
1638    thread paths which do not involve copying joiner blocks, then
1639    handle jump thread paths which have joiner blocks.
1640 
1641    By doing things this way we can be as aggressive as possible and
1642    not worry that copying a joiner block will create a jump threading
1643    opportunity.  */
1644 
1645 static bool
1646 thread_block (basic_block bb, bool noloop_only)
1647 {
1648   bool retval;
1649   retval = thread_block_1 (bb, noloop_only, false);
1650   retval |= thread_block_1 (bb, noloop_only, true);
1651   return retval;
1652 }
1653 
1654 /* Callback for dfs_enumerate_from.  Returns true if BB is different
1655    from STOP and DBDS_CE_STOP.  */
1656 
1657 static basic_block dbds_ce_stop;
1658 static bool
1659 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
1660 {
1661   return (bb != (const_basic_block) stop
1662 	  && bb != dbds_ce_stop);
1663 }
1664 
1665 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1666    returns the state.  */
1667 
1668 enum bb_dom_status
1669 determine_bb_domination_status (struct loop *loop, basic_block bb)
1670 {
1671   basic_block *bblocks;
1672   unsigned nblocks, i;
1673   bool bb_reachable = false;
1674   edge_iterator ei;
1675   edge e;
1676 
1677   /* This function assumes BB is a successor of LOOP->header.
1678      If that is not the case return DOMST_NONDOMINATING which
1679      is always safe.  */
1680     {
1681       bool ok = false;
1682 
1683       FOR_EACH_EDGE (e, ei, bb->preds)
1684 	{
1685      	  if (e->src == loop->header)
1686 	    {
1687 	      ok = true;
1688 	      break;
1689 	    }
1690 	}
1691 
1692       if (!ok)
1693 	return DOMST_NONDOMINATING;
1694     }
1695 
1696   if (bb == loop->latch)
1697     return DOMST_DOMINATING;
1698 
1699   /* Check that BB dominates LOOP->latch, and that it is back-reachable
1700      from it.  */
1701 
1702   bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1703   dbds_ce_stop = loop->header;
1704   nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1705 				bblocks, loop->num_nodes, bb);
1706   for (i = 0; i < nblocks; i++)
1707     FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1708       {
1709 	if (e->src == loop->header)
1710 	  {
1711 	    free (bblocks);
1712 	    return DOMST_NONDOMINATING;
1713 	  }
1714 	if (e->src == bb)
1715 	  bb_reachable = true;
1716       }
1717 
1718   free (bblocks);
1719   return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1720 }
1721 
1722 /* Thread jumps through the header of LOOP.  Returns true if cfg changes.
1723    If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1724    to the inside of the loop.  */
1725 
1726 static bool
1727 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1728 {
1729   basic_block header = loop->header;
1730   edge e, tgt_edge, latch = loop_latch_edge (loop);
1731   edge_iterator ei;
1732   basic_block tgt_bb, atgt_bb;
1733   enum bb_dom_status domst;
1734 
1735   /* We have already threaded through headers to exits, so all the threading
1736      requests now are to the inside of the loop.  We need to avoid creating
1737      irreducible regions (i.e., loops with more than one entry block), and
1738      also loop with several latch edges, or new subloops of the loop (although
1739      there are cases where it might be appropriate, it is difficult to decide,
1740      and doing it wrongly may confuse other optimizers).
1741 
1742      We could handle more general cases here.  However, the intention is to
1743      preserve some information about the loop, which is impossible if its
1744      structure changes significantly, in a way that is not well understood.
1745      Thus we only handle few important special cases, in which also updating
1746      of the loop-carried information should be feasible:
1747 
1748      1) Propagation of latch edge to a block that dominates the latch block
1749 	of a loop.  This aims to handle the following idiom:
1750 
1751 	first = 1;
1752 	while (1)
1753 	  {
1754 	    if (first)
1755 	      initialize;
1756 	    first = 0;
1757 	    body;
1758 	  }
1759 
1760 	After threading the latch edge, this becomes
1761 
1762 	first = 1;
1763 	if (first)
1764 	  initialize;
1765 	while (1)
1766 	  {
1767 	    first = 0;
1768 	    body;
1769 	  }
1770 
1771 	The original header of the loop is moved out of it, and we may thread
1772 	the remaining edges through it without further constraints.
1773 
1774      2) All entry edges are propagated to a single basic block that dominates
1775 	the latch block of the loop.  This aims to handle the following idiom
1776 	(normally created for "for" loops):
1777 
1778 	i = 0;
1779 	while (1)
1780 	  {
1781 	    if (i >= 100)
1782 	      break;
1783 	    body;
1784 	    i++;
1785 	  }
1786 
1787 	This becomes
1788 
1789 	i = 0;
1790 	while (1)
1791 	  {
1792 	    body;
1793 	    i++;
1794 	    if (i >= 100)
1795 	      break;
1796 	  }
1797      */
1798 
1799   /* Threading through the header won't improve the code if the header has just
1800      one successor.  */
1801   if (single_succ_p (header))
1802     goto fail;
1803 
1804   if (!may_peel_loop_headers && !redirection_block_p (loop->header))
1805     goto fail;
1806   else
1807     {
1808       tgt_bb = NULL;
1809       tgt_edge = NULL;
1810       FOR_EACH_EDGE (e, ei, header->preds)
1811 	{
1812 	  if (!e->aux)
1813 	    {
1814 	      if (e == latch)
1815 		continue;
1816 
1817 	      /* If latch is not threaded, and there is a header
1818 		 edge that is not threaded, we would create loop
1819 		 with multiple entries.  */
1820 	      goto fail;
1821 	    }
1822 
1823 	  vec<jump_thread_edge *> *path = THREAD_PATH (e);
1824 
1825 	  if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1826 	    goto fail;
1827 	  tgt_edge = (*path)[1]->e;
1828 	  atgt_bb = tgt_edge->dest;
1829 	  if (!tgt_bb)
1830 	    tgt_bb = atgt_bb;
1831 	  /* Two targets of threading would make us create loop
1832 	     with multiple entries.  */
1833 	  else if (tgt_bb != atgt_bb)
1834 	    goto fail;
1835 	}
1836 
1837       if (!tgt_bb)
1838 	{
1839 	  /* There are no threading requests.  */
1840 	  return false;
1841 	}
1842 
1843       /* Redirecting to empty loop latch is useless.  */
1844       if (tgt_bb == loop->latch
1845 	  && empty_block_p (loop->latch))
1846 	goto fail;
1847     }
1848 
1849   /* The target block must dominate the loop latch, otherwise we would be
1850      creating a subloop.  */
1851   domst = determine_bb_domination_status (loop, tgt_bb);
1852   if (domst == DOMST_NONDOMINATING)
1853     goto fail;
1854   if (domst == DOMST_LOOP_BROKEN)
1855     {
1856       /* If the loop ceased to exist, mark it as such, and thread through its
1857 	 original header.  */
1858       mark_loop_for_removal (loop);
1859       return thread_block (header, false);
1860     }
1861 
1862   if (tgt_bb->loop_father->header == tgt_bb)
1863     {
1864       /* If the target of the threading is a header of a subloop, we need
1865 	 to create a preheader for it, so that the headers of the two loops
1866 	 do not merge.  */
1867       if (EDGE_COUNT (tgt_bb->preds) > 2)
1868 	{
1869 	  tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1870 	  gcc_assert (tgt_bb != NULL);
1871 	}
1872       else
1873 	tgt_bb = split_edge (tgt_edge);
1874     }
1875 
1876   basic_block new_preheader;
1877 
1878   /* Now consider the case entry edges are redirected to the new entry
1879      block.  Remember one entry edge, so that we can find the new
1880      preheader (its destination after threading).  */
1881   FOR_EACH_EDGE (e, ei, header->preds)
1882     {
1883       if (e->aux)
1884 	break;
1885     }
1886 
1887   /* The duplicate of the header is the new preheader of the loop.  Ensure
1888      that it is placed correctly in the loop hierarchy.  */
1889   set_loop_copy (loop, loop_outer (loop));
1890 
1891   thread_block (header, false);
1892   set_loop_copy (loop, NULL);
1893   new_preheader = e->dest;
1894 
1895   /* Create the new latch block.  This is always necessary, as the latch
1896      must have only a single successor, but the original header had at
1897      least two successors.  */
1898   loop->latch = NULL;
1899   mfb_kj_edge = single_succ_edge (new_preheader);
1900   loop->header = mfb_kj_edge->dest;
1901   latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1902   loop->header = latch->dest;
1903   loop->latch = latch->src;
1904   return true;
1905 
1906 fail:
1907   /* We failed to thread anything.  Cancel the requests.  */
1908   FOR_EACH_EDGE (e, ei, header->preds)
1909     {
1910       vec<jump_thread_edge *> *path = THREAD_PATH (e);
1911 
1912       if (path)
1913 	{
1914 	  delete_jump_thread_path (path);
1915 	  e->aux = NULL;
1916 	}
1917     }
1918   return false;
1919 }
1920 
1921 /* E1 and E2 are edges into the same basic block.  Return TRUE if the
1922    PHI arguments associated with those edges are equal or there are no
1923    PHI arguments, otherwise return FALSE.  */
1924 
1925 static bool
1926 phi_args_equal_on_edges (edge e1, edge e2)
1927 {
1928   gphi_iterator gsi;
1929   int indx1 = e1->dest_idx;
1930   int indx2 = e2->dest_idx;
1931 
1932   for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1933     {
1934       gphi *phi = gsi.phi ();
1935 
1936       if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1937 			    gimple_phi_arg_def (phi, indx2), 0))
1938 	return false;
1939     }
1940   return true;
1941 }
1942 
1943 /* Walk through the registered jump threads and convert them into a
1944    form convenient for this pass.
1945 
1946    Any block which has incoming edges threaded to outgoing edges
1947    will have its entry in THREADED_BLOCK set.
1948 
1949    Any threaded edge will have its new outgoing edge stored in the
1950    original edge's AUX field.
1951 
1952    This form avoids the need to walk all the edges in the CFG to
1953    discover blocks which need processing and avoids unnecessary
1954    hash table lookups to map from threaded edge to new target.  */
1955 
1956 static void
1957 mark_threaded_blocks (bitmap threaded_blocks)
1958 {
1959   unsigned int i;
1960   bitmap_iterator bi;
1961   bitmap tmp = BITMAP_ALLOC (NULL);
1962   basic_block bb;
1963   edge e;
1964   edge_iterator ei;
1965 
1966   /* It is possible to have jump threads in which one is a subpath
1967      of the other.  ie, (A, B), (B, C), (C, D) where B is a joiner
1968      block and (B, C), (C, D) where no joiner block exists.
1969 
1970      When this occurs ignore the jump thread request with the joiner
1971      block.  It's totally subsumed by the simpler jump thread request.
1972 
1973      This results in less block copying, simpler CFGs.  More importantly,
1974      when we duplicate the joiner block, B, in this case we will create
1975      a new threading opportunity that we wouldn't be able to optimize
1976      until the next jump threading iteration.
1977 
1978      So first convert the jump thread requests which do not require a
1979      joiner block.  */
1980   for (i = 0; i < paths.length (); i++)
1981     {
1982       vec<jump_thread_edge *> *path = paths[i];
1983 
1984       if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1985 	{
1986 	  edge e = (*path)[0]->e;
1987 	  e->aux = (void *)path;
1988 	  bitmap_set_bit (tmp, e->dest->index);
1989 	}
1990     }
1991 
1992   /* Now iterate again, converting cases where we want to thread
1993      through a joiner block, but only if no other edge on the path
1994      already has a jump thread attached to it.  We do this in two passes,
1995      to avoid situations where the order in the paths vec can hide overlapping
1996      threads (the path is recorded on the incoming edge, so we would miss
1997      cases where the second path starts at a downstream edge on the same
1998      path).  First record all joiner paths, deleting any in the unexpected
1999      case where there is already a path for that incoming edge.  */
2000   for (i = 0; i < paths.length ();)
2001     {
2002       vec<jump_thread_edge *> *path = paths[i];
2003 
2004       if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
2005 	{
2006 	  /* Attach the path to the starting edge if none is yet recorded.  */
2007 	  if ((*path)[0]->e->aux == NULL)
2008 	    {
2009 	      (*path)[0]->e->aux = path;
2010 	      i++;
2011 	    }
2012 	  else
2013 	    {
2014 	      paths.unordered_remove (i);
2015 	      if (dump_file && (dump_flags & TDF_DETAILS))
2016 		dump_jump_thread_path (dump_file, *path, false);
2017 	      delete_jump_thread_path (path);
2018 	    }
2019 	}
2020       else
2021 	{
2022 	  i++;
2023 	}
2024     }
2025 
2026   /* Second, look for paths that have any other jump thread attached to
2027      them, and either finish converting them or cancel them.  */
2028   for (i = 0; i < paths.length ();)
2029     {
2030       vec<jump_thread_edge *> *path = paths[i];
2031       edge e = (*path)[0]->e;
2032 
2033       if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && e->aux == path)
2034 	{
2035 	  unsigned int j;
2036 	  for (j = 1; j < path->length (); j++)
2037 	    if ((*path)[j]->e->aux != NULL)
2038 	      break;
2039 
2040 	  /* If we iterated through the entire path without exiting the loop,
2041 	     then we are good to go, record it.  */
2042 	  if (j == path->length ())
2043 	    {
2044 	      bitmap_set_bit (tmp, e->dest->index);
2045 	      i++;
2046 	    }
2047 	  else
2048 	    {
2049 	      e->aux = NULL;
2050 	      paths.unordered_remove (i);
2051 	      if (dump_file && (dump_flags & TDF_DETAILS))
2052 		dump_jump_thread_path (dump_file, *path, false);
2053 	      delete_jump_thread_path (path);
2054 	    }
2055 	}
2056       else
2057 	{
2058 	  i++;
2059 	}
2060     }
2061 
2062   /* If optimizing for size, only thread through block if we don't have
2063      to duplicate it or it's an otherwise empty redirection block.  */
2064   if (optimize_function_for_size_p (cfun))
2065     {
2066       EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2067 	{
2068 	  bb = BASIC_BLOCK_FOR_FN (cfun, i);
2069 	  if (EDGE_COUNT (bb->preds) > 1
2070 	      && !redirection_block_p (bb))
2071 	    {
2072 	      FOR_EACH_EDGE (e, ei, bb->preds)
2073 		{
2074 		  if (e->aux)
2075 		    {
2076 		      vec<jump_thread_edge *> *path = THREAD_PATH (e);
2077 		      delete_jump_thread_path (path);
2078 		      e->aux = NULL;
2079 		    }
2080 		}
2081 	    }
2082 	  else
2083 	    bitmap_set_bit (threaded_blocks, i);
2084 	}
2085     }
2086   else
2087     bitmap_copy (threaded_blocks, tmp);
2088 
2089   /* If we have a joiner block (J) which has two successors S1 and S2 and
2090      we are threading though S1 and the final destination of the thread
2091      is S2, then we must verify that any PHI nodes in S2 have the same
2092      PHI arguments for the edge J->S2 and J->S1->...->S2.
2093 
2094      We used to detect this prior to registering the jump thread, but
2095      that prohibits propagation of edge equivalences into non-dominated
2096      PHI nodes as the equivalency test might occur before propagation.
2097 
2098      This must also occur after we truncate any jump threading paths
2099      as this scenario may only show up after truncation.
2100 
2101      This works for now, but will need improvement as part of the FSA
2102      optimization.
2103 
2104      Note since we've moved the thread request data to the edges,
2105      we have to iterate on those rather than the threaded_edges vector.  */
2106   EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2107     {
2108       bb = BASIC_BLOCK_FOR_FN (cfun, i);
2109       FOR_EACH_EDGE (e, ei, bb->preds)
2110 	{
2111 	  if (e->aux)
2112 	    {
2113 	      vec<jump_thread_edge *> *path = THREAD_PATH (e);
2114 	      bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
2115 
2116 	      if (have_joiner)
2117 		{
2118 		  basic_block joiner = e->dest;
2119 		  edge final_edge = path->last ()->e;
2120 		  basic_block final_dest = final_edge->dest;
2121 		  edge e2 = find_edge (joiner, final_dest);
2122 
2123 		  if (e2 && !phi_args_equal_on_edges (e2, final_edge))
2124 		    {
2125 		      delete_jump_thread_path (path);
2126 		      e->aux = NULL;
2127 		    }
2128 		}
2129 	    }
2130 	}
2131     }
2132 
2133   /* Look for jump threading paths which cross multiple loop headers.
2134 
2135      The code to thread through loop headers will change the CFG in ways
2136      that invalidate the cached loop iteration information.  So we must
2137      detect that case and wipe the cached information.  */
2138   EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2139     {
2140       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2141       FOR_EACH_EDGE (e, ei, bb->preds)
2142 	{
2143 	  if (e->aux)
2144 	    {
2145 	      vec<jump_thread_edge *> *path = THREAD_PATH (e);
2146 
2147 	      for (unsigned int i = 0, crossed_headers = 0;
2148 		   i < path->length ();
2149 		   i++)
2150 		{
2151 		  basic_block dest = (*path)[i]->e->dest;
2152 		  basic_block src = (*path)[i]->e->src;
2153 		  /* If we enter a loop.  */
2154 		  if (flow_loop_nested_p (src->loop_father, dest->loop_father))
2155 		    ++crossed_headers;
2156 		  /* If we step from a block outside an irreducible region
2157 		     to a block inside an irreducible region, then we have
2158 		     crossed into a loop.  */
2159 		  else if (! (src->flags & BB_IRREDUCIBLE_LOOP)
2160 			   && (dest->flags & BB_IRREDUCIBLE_LOOP))
2161 		      ++crossed_headers;
2162 		  if (crossed_headers > 1)
2163 		    {
2164 		      vect_free_loop_info_assumptions
2165 			((*path)[path->length () - 1]->e->dest->loop_father);
2166 		      break;
2167 		    }
2168 		}
2169 	    }
2170 	}
2171     }
2172 
2173   BITMAP_FREE (tmp);
2174 }
2175 
2176 
2177 /* Verify that the REGION is a valid jump thread.  A jump thread is a special
2178    case of SEME Single Entry Multiple Exits region in which all nodes in the
2179    REGION have exactly one incoming edge.  The only exception is the first block
2180    that may not have been connected to the rest of the cfg yet.  */
2181 
2182 DEBUG_FUNCTION void
2183 verify_jump_thread (basic_block *region, unsigned n_region)
2184 {
2185   for (unsigned i = 0; i < n_region; i++)
2186     gcc_assert (EDGE_COUNT (region[i]->preds) <= 1);
2187 }
2188 
2189 /* Return true when BB is one of the first N items in BBS.  */
2190 
2191 static inline bool
2192 bb_in_bbs (basic_block bb, basic_block *bbs, int n)
2193 {
2194   for (int i = 0; i < n; i++)
2195     if (bb == bbs[i])
2196       return true;
2197 
2198   return false;
2199 }
2200 
2201 /* Duplicates a jump-thread path of N_REGION basic blocks.
2202    The ENTRY edge is redirected to the duplicate of the region.
2203 
2204    Remove the last conditional statement in the last basic block in the REGION,
2205    and create a single fallthru edge pointing to the same destination as the
2206    EXIT edge.
2207 
2208    The new basic blocks are stored to REGION_COPY in the same order as they had
2209    in REGION, provided that REGION_COPY is not NULL.
2210 
2211    Returns false if it is unable to copy the region, true otherwise.  */
2212 
2213 static bool
2214 duplicate_thread_path (edge entry, edge exit,
2215 		       basic_block *region, unsigned n_region,
2216 		       basic_block *region_copy)
2217 {
2218   unsigned i;
2219   bool free_region_copy = false;
2220   struct loop *loop = entry->dest->loop_father;
2221   edge exit_copy;
2222   edge redirected;
2223   int curr_freq;
2224   gcov_type curr_count;
2225 
2226   if (!can_copy_bbs_p (region, n_region))
2227     return false;
2228 
2229   /* Some sanity checking.  Note that we do not check for all possible
2230      missuses of the functions.  I.e. if you ask to copy something weird,
2231      it will work, but the state of structures probably will not be
2232      correct.  */
2233   for (i = 0; i < n_region; i++)
2234     {
2235       /* We do not handle subloops, i.e. all the blocks must belong to the
2236 	 same loop.  */
2237       if (region[i]->loop_father != loop)
2238 	return false;
2239     }
2240 
2241   initialize_original_copy_tables ();
2242 
2243   set_loop_copy (loop, loop);
2244 
2245   if (!region_copy)
2246     {
2247       region_copy = XNEWVEC (basic_block, n_region);
2248       free_region_copy = true;
2249     }
2250 
2251   copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
2252 	    split_edge_bb_loc (entry), false);
2253 
2254   /* Fix up: copy_bbs redirects all edges pointing to copied blocks.  The
2255      following code ensures that all the edges exiting the jump-thread path are
2256      redirected back to the original code: these edges are exceptions
2257      invalidating the property that is propagated by executing all the blocks of
2258      the jump-thread path in order.  */
2259 
2260   curr_count = entry->count;
2261   curr_freq = EDGE_FREQUENCY (entry);
2262 
2263   for (i = 0; i < n_region; i++)
2264     {
2265       edge e;
2266       edge_iterator ei;
2267       basic_block bb = region_copy[i];
2268 
2269       /* Watch inconsistent profile.  */
2270       if (curr_count > region[i]->count)
2271 	curr_count = region[i]->count;
2272       if (curr_freq > region[i]->frequency)
2273 	curr_freq = region[i]->frequency;
2274       /* Scale current BB.  */
2275       if (region[i]->count)
2276 	{
2277 	  /* In the middle of the path we only scale the frequencies.
2278 	     In last BB we need to update probabilities of outgoing edges
2279 	     because we know which one is taken at the threaded path.  */
2280 	  if (i + 1 != n_region)
2281 	    scale_bbs_frequencies_gcov_type (region + i, 1,
2282 					     region[i]->count - curr_count,
2283 					     region[i]->count);
2284 	  else
2285 	    update_bb_profile_for_threading (region[i],
2286 					     curr_freq, curr_count,
2287 					     exit);
2288 	  scale_bbs_frequencies_gcov_type (region_copy + i, 1, curr_count,
2289 					   region_copy[i]->count);
2290 	}
2291       else if (region[i]->frequency)
2292 	{
2293 	  if (i + 1 != n_region)
2294 	    scale_bbs_frequencies_int (region + i, 1,
2295 				       region[i]->frequency - curr_freq,
2296 				       region[i]->frequency);
2297 	  else
2298 	    update_bb_profile_for_threading (region[i],
2299 					     curr_freq, curr_count,
2300 					     exit);
2301 	  scale_bbs_frequencies_int (region_copy + i, 1, curr_freq,
2302 				     region_copy[i]->frequency);
2303 	}
2304 
2305       if (single_succ_p (bb))
2306 	{
2307 	  /* Make sure the successor is the next node in the path.  */
2308 	  gcc_assert (i + 1 == n_region
2309 		      || region_copy[i + 1] == single_succ_edge (bb)->dest);
2310 	  if (i + 1 != n_region)
2311 	    {
2312 	      curr_freq = EDGE_FREQUENCY (single_succ_edge (bb));
2313 	      curr_count = single_succ_edge (bb)->count;
2314 	    }
2315 	  continue;
2316 	}
2317 
2318       /* Special case the last block on the path: make sure that it does not
2319 	 jump back on the copied path, including back to itself.  */
2320       if (i + 1 == n_region)
2321 	{
2322 	  FOR_EACH_EDGE (e, ei, bb->succs)
2323 	    if (bb_in_bbs (e->dest, region_copy, n_region))
2324 	      {
2325 		basic_block orig = get_bb_original (e->dest);
2326 		if (orig)
2327 		  redirect_edge_and_branch_force (e, orig);
2328 	      }
2329 	  continue;
2330 	}
2331 
2332       /* Redirect all other edges jumping to non-adjacent blocks back to the
2333 	 original code.  */
2334       FOR_EACH_EDGE (e, ei, bb->succs)
2335 	if (region_copy[i + 1] != e->dest)
2336 	  {
2337 	    basic_block orig = get_bb_original (e->dest);
2338 	    if (orig)
2339 	      redirect_edge_and_branch_force (e, orig);
2340 	  }
2341 	else
2342 	  {
2343 	    curr_freq = EDGE_FREQUENCY (e);
2344 	    curr_count = e->count;
2345 	  }
2346     }
2347 
2348 
2349   if (flag_checking)
2350     verify_jump_thread (region_copy, n_region);
2351 
2352   /* Remove the last branch in the jump thread path.  */
2353   remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
2354 
2355   /* And fixup the flags on the single remaining edge.  */
2356   edge fix_e = find_edge (region_copy[n_region - 1], exit->dest);
2357   fix_e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
2358   fix_e->flags |= EDGE_FALLTHRU;
2359 
2360   edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
2361 
2362   if (e)
2363     {
2364       rescan_loop_exit (e, true, false);
2365       e->probability = REG_BR_PROB_BASE;
2366       e->count = region_copy[n_region - 1]->count;
2367     }
2368 
2369   /* Redirect the entry and add the phi node arguments.  */
2370   if (entry->dest == loop->header)
2371     mark_loop_for_removal (loop);
2372   redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
2373   gcc_assert (redirected != NULL);
2374   flush_pending_stmts (entry);
2375 
2376   /* Add the other PHI node arguments.  */
2377   add_phi_args_after_copy (region_copy, n_region, NULL);
2378 
2379   if (free_region_copy)
2380     free (region_copy);
2381 
2382   free_original_copy_tables ();
2383   return true;
2384 }
2385 
2386 /* Return true when PATH is a valid jump-thread path.  */
2387 
2388 static bool
2389 valid_jump_thread_path (vec<jump_thread_edge *> *path)
2390 {
2391   unsigned len = path->length ();
2392 
2393   /* Check that the path is connected.  */
2394   for (unsigned int j = 0; j < len - 1; j++)
2395     {
2396       edge e = (*path)[j]->e;
2397       if (e->dest != (*path)[j+1]->e->src)
2398 	return false;
2399     }
2400   return true;
2401 }
2402 
2403 /* Remove any queued jump threads that include edge E.
2404 
2405    We don't actually remove them here, just record the edges into ax
2406    hash table.  That way we can do the search once per iteration of
2407    DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR.  */
2408 
2409 void
2410 remove_jump_threads_including (edge_def *e)
2411 {
2412   if (!paths.exists ())
2413     return;
2414 
2415   if (!removed_edges)
2416     removed_edges = new hash_table<struct removed_edges> (17);
2417 
2418   edge *slot = removed_edges->find_slot (e, INSERT);
2419   *slot = e;
2420 }
2421 
2422 /* Walk through all blocks and thread incoming edges to the appropriate
2423    outgoing edge for each edge pair recorded in THREADED_EDGES.
2424 
2425    It is the caller's responsibility to fix the dominance information
2426    and rewrite duplicated SSA_NAMEs back into SSA form.
2427 
2428    If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
2429    loop headers if it does not simplify the loop.
2430 
2431    Returns true if one or more edges were threaded, false otherwise.  */
2432 
2433 bool
2434 thread_through_all_blocks (bool may_peel_loop_headers)
2435 {
2436   bool retval = false;
2437   unsigned int i;
2438   bitmap_iterator bi;
2439   bitmap threaded_blocks;
2440   struct loop *loop;
2441 
2442   if (!paths.exists ())
2443     {
2444       retval = false;
2445       goto out;
2446     }
2447 
2448   threaded_blocks = BITMAP_ALLOC (NULL);
2449   memset (&thread_stats, 0, sizeof (thread_stats));
2450 
2451   /* Remove any paths that referenced removed edges.  */
2452   if (removed_edges)
2453     for (i = 0; i < paths.length (); )
2454       {
2455 	unsigned int j;
2456 	vec<jump_thread_edge *> *path = paths[i];
2457 
2458 	for (j = 0; j < path->length (); j++)
2459 	  {
2460 	    edge e = (*path)[j]->e;
2461 	    if (removed_edges->find_slot (e, NO_INSERT))
2462 	      break;
2463 	  }
2464 
2465 	if (j != path->length ())
2466 	  {
2467 	    delete_jump_thread_path (path);
2468 	    paths.unordered_remove (i);
2469 	    continue;
2470 	  }
2471 	i++;
2472       }
2473 
2474   /* Jump-thread all FSM threads before other jump-threads.  */
2475   for (i = 0; i < paths.length ();)
2476     {
2477       vec<jump_thread_edge *> *path = paths[i];
2478       edge entry = (*path)[0]->e;
2479 
2480       /* Only code-generate FSM jump-threads in this loop.  */
2481       if ((*path)[0]->type != EDGE_FSM_THREAD)
2482 	{
2483 	  i++;
2484 	  continue;
2485 	}
2486 
2487       /* Do not jump-thread twice from the same block.  */
2488       if (bitmap_bit_p (threaded_blocks, entry->src->index)
2489 	  /* We may not want to realize this jump thread path
2490 	     for various reasons.  So check it first.  */
2491 	  || !valid_jump_thread_path (path))
2492 	{
2493 	  /* Remove invalid FSM jump-thread paths.  */
2494 	  delete_jump_thread_path (path);
2495 	  paths.unordered_remove (i);
2496 	  continue;
2497 	}
2498 
2499       unsigned len = path->length ();
2500       edge exit = (*path)[len - 1]->e;
2501       basic_block *region = XNEWVEC (basic_block, len - 1);
2502 
2503       for (unsigned int j = 0; j < len - 1; j++)
2504 	region[j] = (*path)[j]->e->dest;
2505 
2506       if (duplicate_thread_path (entry, exit, region, len - 1, NULL))
2507 	{
2508 	  /* We do not update dominance info.  */
2509 	  free_dominance_info (CDI_DOMINATORS);
2510 	  bitmap_set_bit (threaded_blocks, entry->src->index);
2511 	  retval = true;
2512 	  thread_stats.num_threaded_edges++;
2513 	}
2514 
2515       delete_jump_thread_path (path);
2516       paths.unordered_remove (i);
2517       free (region);
2518     }
2519 
2520   /* Remove from PATHS all the jump-threads starting with an edge already
2521      jump-threaded.  */
2522   for (i = 0; i < paths.length ();)
2523     {
2524       vec<jump_thread_edge *> *path = paths[i];
2525       edge entry = (*path)[0]->e;
2526 
2527       /* Do not jump-thread twice from the same block.  */
2528       if (bitmap_bit_p (threaded_blocks, entry->src->index))
2529 	{
2530 	  delete_jump_thread_path (path);
2531 	  paths.unordered_remove (i);
2532 	}
2533       else
2534 	i++;
2535     }
2536 
2537   bitmap_clear (threaded_blocks);
2538 
2539   mark_threaded_blocks (threaded_blocks);
2540 
2541   initialize_original_copy_tables ();
2542 
2543   /* First perform the threading requests that do not affect
2544      loop structure.  */
2545   EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
2546     {
2547       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2548 
2549       if (EDGE_COUNT (bb->preds) > 0)
2550 	retval |= thread_block (bb, true);
2551     }
2552 
2553   /* Then perform the threading through loop headers.  We start with the
2554      innermost loop, so that the changes in cfg we perform won't affect
2555      further threading.  */
2556   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2557     {
2558       if (!loop->header
2559 	  || !bitmap_bit_p (threaded_blocks, loop->header->index))
2560 	continue;
2561 
2562       retval |= thread_through_loop_header (loop, may_peel_loop_headers);
2563     }
2564 
2565   /* All jump threading paths should have been resolved at this
2566      point.  Verify that is the case.  */
2567   basic_block bb;
2568   FOR_EACH_BB_FN (bb, cfun)
2569     {
2570       edge_iterator ei;
2571       edge e;
2572       FOR_EACH_EDGE (e, ei, bb->preds)
2573 	gcc_assert (e->aux == NULL);
2574     }
2575 
2576   statistics_counter_event (cfun, "Jumps threaded",
2577 			    thread_stats.num_threaded_edges);
2578 
2579   free_original_copy_tables ();
2580 
2581   BITMAP_FREE (threaded_blocks);
2582   threaded_blocks = NULL;
2583   paths.release ();
2584 
2585   if (retval)
2586     loops_state_set (LOOPS_NEED_FIXUP);
2587 
2588  out:
2589   delete removed_edges;
2590   removed_edges = NULL;
2591   return retval;
2592 }
2593 
2594 /* Delete the jump threading path PATH.  We have to explcitly delete
2595    each entry in the vector, then the container.  */
2596 
2597 void
2598 delete_jump_thread_path (vec<jump_thread_edge *> *path)
2599 {
2600   for (unsigned int i = 0; i < path->length (); i++)
2601     delete (*path)[i];
2602   path->release();
2603   delete path;
2604 }
2605 
2606 /* Register a jump threading opportunity.  We queue up all the jump
2607    threading opportunities discovered by a pass and update the CFG
2608    and SSA form all at once.
2609 
2610    E is the edge we can thread, E2 is the new target edge, i.e., we
2611    are effectively recording that E->dest can be changed to E2->dest
2612    after fixing the SSA graph.  */
2613 
2614 void
2615 register_jump_thread (vec<jump_thread_edge *> *path)
2616 {
2617   if (!dbg_cnt (registered_jump_thread))
2618     {
2619       delete_jump_thread_path (path);
2620       return;
2621     }
2622 
2623   /* First make sure there are no NULL outgoing edges on the jump threading
2624      path.  That can happen for jumping to a constant address.  */
2625   for (unsigned int i = 0; i < path->length (); i++)
2626     {
2627       if ((*path)[i]->e == NULL)
2628 	{
2629 	  if (dump_file && (dump_flags & TDF_DETAILS))
2630 	    {
2631 	      fprintf (dump_file,
2632 		       "Found NULL edge in jump threading path.  Cancelling jump thread:\n");
2633 	      dump_jump_thread_path (dump_file, *path, false);
2634 	    }
2635 
2636 	  delete_jump_thread_path (path);
2637 	  return;
2638 	}
2639 
2640       /* Only the FSM threader is allowed to thread across
2641 	 backedges in the CFG.  */
2642       if (flag_checking
2643 	  && (*path)[0]->type != EDGE_FSM_THREAD)
2644 	gcc_assert (((*path)[i]->e->flags & EDGE_DFS_BACK) == 0);
2645     }
2646 
2647   if (dump_file && (dump_flags & TDF_DETAILS))
2648     dump_jump_thread_path (dump_file, *path, true);
2649 
2650   if (!paths.exists ())
2651     paths.create (5);
2652 
2653   paths.safe_push (path);
2654 }
2655