xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-threadupdate.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2    Copyright (C) 2004-2016 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfganal.h"
31 #include "gimple-iterator.h"
32 #include "tree-ssa.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "cfgloop.h"
35 #include "dbgcnt.h"
36 #include "tree-cfg.h"
37 
38 /* Given a block B, update the CFG and SSA graph to reflect redirecting
39    one or more in-edges to B to instead reach the destination of an
40    out-edge from B while preserving any side effects in B.
41 
42    i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
43    side effects of executing B.
44 
45      1. Make a copy of B (including its outgoing edges and statements).  Call
46 	the copy B'.  Note B' has no incoming edges or PHIs at this time.
47 
48      2. Remove the control statement at the end of B' and all outgoing edges
49 	except B'->C.
50 
51      3. Add a new argument to each PHI in C with the same value as the existing
52 	argument associated with edge B->C.  Associate the new PHI arguments
53 	with the edge B'->C.
54 
55      4. For each PHI in B, find or create a PHI in B' with an identical
56 	PHI_RESULT.  Add an argument to the PHI in B' which has the same
57 	value as the PHI in B associated with the edge A->B.  Associate
58 	the new argument in the PHI in B' with the edge A->B.
59 
60      5. Change the edge A->B to A->B'.
61 
62 	5a. This automatically deletes any PHI arguments associated with the
63 	    edge A->B in B.
64 
65 	5b. This automatically associates each new argument added in step 4
66 	    with the edge A->B'.
67 
68      6. Repeat for other incoming edges into B.
69 
70      7. Put the duplicated resources in B and all the B' blocks into SSA form.
71 
72    Note that block duplication can be minimized by first collecting the
73    set of unique destination blocks that the incoming edges should
74    be threaded to.
75 
76    We reduce the number of edges and statements we create by not copying all
77    the outgoing edges and the control statement in step #1.  We instead create
78    a template block without the outgoing edges and duplicate the template.
79 
80    Another case this code handles is threading through a "joiner" block.  In
81    this case, we do not know the destination of the joiner block, but one
82    of the outgoing edges from the joiner block leads to a threadable path.  This
83    case largely works as outlined above, except the duplicate of the joiner
84    block still contains a full set of outgoing edges and its control statement.
85    We just redirect one of its outgoing edges to our jump threading path.  */
86 
87 
88 /* Steps #5 and #6 of the above algorithm are best implemented by walking
89    all the incoming edges which thread to the same destination edge at
90    the same time.  That avoids lots of table lookups to get information
91    for the destination edge.
92 
93    To realize that implementation we create a list of incoming edges
94    which thread to the same outgoing edge.  Thus to implement steps
95    #5 and #6 we traverse our hash table of outgoing edge information.
96    For each entry we walk the list of incoming edges which thread to
97    the current outgoing edge.  */
98 
99 struct el
100 {
101   edge e;
102   struct el *next;
103 };
104 
105 /* Main data structure recording information regarding B's duplicate
106    blocks.  */
107 
108 /* We need to efficiently record the unique thread destinations of this
109    block and specific information associated with those destinations.  We
110    may have many incoming edges threaded to the same outgoing edge.  This
111    can be naturally implemented with a hash table.  */
112 
113 struct redirection_data : free_ptr_hash<redirection_data>
114 {
115   /* We support wiring up two block duplicates in a jump threading path.
116 
117      One is a normal block copy where we remove the control statement
118      and wire up its single remaining outgoing edge to the thread path.
119 
120      The other is a joiner block where we leave the control statement
121      in place, but wire one of the outgoing edges to a thread path.
122 
123      In theory we could have multiple block duplicates in a jump
124      threading path, but I haven't tried that.
125 
126      The duplicate blocks appear in this array in the same order in
127      which they appear in the jump thread path.  */
128   basic_block dup_blocks[2];
129 
130   /* The jump threading path.  */
131   vec<jump_thread_edge *> *path;
132 
133   /* A list of incoming edges which we want to thread to the
134      same path.  */
135   struct el *incoming_edges;
136 
137   /* hash_table support.  */
138   static inline hashval_t hash (const redirection_data *);
139   static inline int equal (const redirection_data *, const redirection_data *);
140 };
141 
142 /* Dump a jump threading path, including annotations about each
143    edge in the path.  */
144 
145 static void
146 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
147 		       bool registering)
148 {
149   fprintf (dump_file,
150 	   "  %s%s jump thread: (%d, %d) incoming edge; ",
151 	   (registering ? "Registering" : "Cancelling"),
152 	   (path[0]->type == EDGE_FSM_THREAD ? " FSM": ""),
153 	   path[0]->e->src->index, path[0]->e->dest->index);
154 
155   for (unsigned int i = 1; i < path.length (); i++)
156     {
157       /* We can get paths with a NULL edge when the final destination
158 	 of a jump thread turns out to be a constant address.  We dump
159 	 those paths when debugging, so we have to be prepared for that
160 	 possibility here.  */
161       if (path[i]->e == NULL)
162 	continue;
163 
164       if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
165 	fprintf (dump_file, " (%d, %d) joiner; ",
166 		 path[i]->e->src->index, path[i]->e->dest->index);
167       if (path[i]->type == EDGE_COPY_SRC_BLOCK)
168        fprintf (dump_file, " (%d, %d) normal;",
169 		 path[i]->e->src->index, path[i]->e->dest->index);
170       if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
171        fprintf (dump_file, " (%d, %d) nocopy;",
172 		 path[i]->e->src->index, path[i]->e->dest->index);
173       if (path[0]->type == EDGE_FSM_THREAD)
174 	fprintf (dump_file, " (%d, %d) ",
175 		 path[i]->e->src->index, path[i]->e->dest->index);
176     }
177   fputc ('\n', dump_file);
178 }
179 
180 /* Simple hashing function.  For any given incoming edge E, we're going
181    to be most concerned with the final destination of its jump thread
182    path.  So hash on the block index of the final edge in the path.  */
183 
184 inline hashval_t
185 redirection_data::hash (const redirection_data *p)
186 {
187   vec<jump_thread_edge *> *path = p->path;
188   return path->last ()->e->dest->index;
189 }
190 
191 /* Given two hash table entries, return true if they have the same
192    jump threading path.  */
193 inline int
194 redirection_data::equal (const redirection_data *p1, const redirection_data *p2)
195 {
196   vec<jump_thread_edge *> *path1 = p1->path;
197   vec<jump_thread_edge *> *path2 = p2->path;
198 
199   if (path1->length () != path2->length ())
200     return false;
201 
202   for (unsigned int i = 1; i < path1->length (); i++)
203     {
204       if ((*path1)[i]->type != (*path2)[i]->type
205 	  || (*path1)[i]->e != (*path2)[i]->e)
206 	return false;
207     }
208 
209   return true;
210 }
211 
212 /* Rather than search all the edges in jump thread paths each time
213    DOM is able to simply if control statement, we build a hash table
214    with the deleted edges.  We only care about the address of the edge,
215    not its contents.  */
216 struct removed_edges : nofree_ptr_hash<edge_def>
217 {
218   static hashval_t hash (edge e) { return htab_hash_pointer (e); }
219   static bool equal (edge e1, edge e2) { return e1 == e2; }
220 };
221 
222 static hash_table<removed_edges> *removed_edges;
223 
224 /* Data structure of information to pass to hash table traversal routines.  */
225 struct ssa_local_info_t
226 {
227   /* The current block we are working on.  */
228   basic_block bb;
229 
230   /* We only create a template block for the first duplicated block in a
231      jump threading path as we may need many duplicates of that block.
232 
233      The second duplicate block in a path is specific to that path.  Creating
234      and sharing a template for that block is considerably more difficult.  */
235   basic_block template_block;
236 
237   /* TRUE if we thread one or more jumps, FALSE otherwise.  */
238   bool jumps_threaded;
239 
240   /* Blocks duplicated for the thread.  */
241   bitmap duplicate_blocks;
242 
243   /* When we have multiple paths through a joiner which reach different
244      final destinations, then we may need to correct for potential
245      profile insanities.  */
246   bool need_profile_correction;
247 };
248 
249 /* Passes which use the jump threading code register jump threading
250    opportunities as they are discovered.  We keep the registered
251    jump threading opportunities in this vector as edge pairs
252    (original_edge, target_edge).  */
253 static vec<vec<jump_thread_edge *> *> paths;
254 
255 /* When we start updating the CFG for threading, data necessary for jump
256    threading is attached to the AUX field for the incoming edge.  Use these
257    macros to access the underlying structure attached to the AUX field.  */
258 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
259 
260 /* Jump threading statistics.  */
261 
262 struct thread_stats_d
263 {
264   unsigned long num_threaded_edges;
265 };
266 
267 struct thread_stats_d thread_stats;
268 
269 
270 /* Remove the last statement in block BB if it is a control statement
271    Also remove all outgoing edges except the edge which reaches DEST_BB.
272    If DEST_BB is NULL, then remove all outgoing edges.  */
273 
274 void
275 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
276 {
277   gimple_stmt_iterator gsi;
278   edge e;
279   edge_iterator ei;
280 
281   gsi = gsi_last_bb (bb);
282 
283   /* If the duplicate ends with a control statement, then remove it.
284 
285      Note that if we are duplicating the template block rather than the
286      original basic block, then the duplicate might not have any real
287      statements in it.  */
288   if (!gsi_end_p (gsi)
289       && gsi_stmt (gsi)
290       && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
291 	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
292 	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
293     gsi_remove (&gsi, true);
294 
295   for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
296     {
297       if (e->dest != dest_bb)
298 	{
299 	  free_dom_edge_info (e);
300 	  remove_edge (e);
301 	}
302       else
303 	ei_next (&ei);
304     }
305 
306   /* If the remaining edge is a loop exit, there must have
307      a removed edge that was not a loop exit.
308 
309      In that case BB and possibly other blocks were previously
310      in the loop, but are now outside the loop.  Thus, we need
311      to update the loop structures.  */
312   if (single_succ_p (bb)
313       && loop_outer (bb->loop_father)
314       && loop_exit_edge_p (bb->loop_father, single_succ_edge (bb)))
315     loops_state_set (LOOPS_NEED_FIXUP);
316 }
317 
318 /* Create a duplicate of BB.  Record the duplicate block in an array
319    indexed by COUNT stored in RD.  */
320 
321 static void
322 create_block_for_threading (basic_block bb,
323 			    struct redirection_data *rd,
324 			    unsigned int count,
325 			    bitmap *duplicate_blocks)
326 {
327   edge_iterator ei;
328   edge e;
329 
330   /* We can use the generic block duplication code and simply remove
331      the stuff we do not need.  */
332   rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
333 
334   FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
335     e->aux = NULL;
336 
337   /* Zero out the profile, since the block is unreachable for now.  */
338   rd->dup_blocks[count]->frequency = 0;
339   rd->dup_blocks[count]->count = 0;
340   if (duplicate_blocks)
341     bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index);
342 }
343 
344 /* Main data structure to hold information for duplicates of BB.  */
345 
346 static hash_table<redirection_data> *redirection_data;
347 
348 /* Given an outgoing edge E lookup and return its entry in our hash table.
349 
350    If INSERT is true, then we insert the entry into the hash table if
351    it is not already present.  INCOMING_EDGE is added to the list of incoming
352    edges associated with E in the hash table.  */
353 
354 static struct redirection_data *
355 lookup_redirection_data (edge e, enum insert_option insert)
356 {
357   struct redirection_data **slot;
358   struct redirection_data *elt;
359   vec<jump_thread_edge *> *path = THREAD_PATH (e);
360 
361   /* Build a hash table element so we can see if E is already
362      in the table.  */
363   elt = XNEW (struct redirection_data);
364   elt->path = path;
365   elt->dup_blocks[0] = NULL;
366   elt->dup_blocks[1] = NULL;
367   elt->incoming_edges = NULL;
368 
369   slot = redirection_data->find_slot (elt, insert);
370 
371   /* This will only happen if INSERT is false and the entry is not
372      in the hash table.  */
373   if (slot == NULL)
374     {
375       free (elt);
376       return NULL;
377     }
378 
379   /* This will only happen if E was not in the hash table and
380      INSERT is true.  */
381   if (*slot == NULL)
382     {
383       *slot = elt;
384       elt->incoming_edges = XNEW (struct el);
385       elt->incoming_edges->e = e;
386       elt->incoming_edges->next = NULL;
387       return elt;
388     }
389   /* E was in the hash table.  */
390   else
391     {
392       /* Free ELT as we do not need it anymore, we will extract the
393 	 relevant entry from the hash table itself.  */
394       free (elt);
395 
396       /* Get the entry stored in the hash table.  */
397       elt = *slot;
398 
399       /* If insertion was requested, then we need to add INCOMING_EDGE
400 	 to the list of incoming edges associated with E.  */
401       if (insert)
402 	{
403 	  struct el *el = XNEW (struct el);
404 	  el->next = elt->incoming_edges;
405 	  el->e = e;
406 	  elt->incoming_edges = el;
407 	}
408 
409       return elt;
410     }
411 }
412 
413 /* Similar to copy_phi_args, except that the PHI arg exists, it just
414    does not have a value associated with it.  */
415 
416 static void
417 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
418 {
419   int src_idx = src_e->dest_idx;
420   int tgt_idx = tgt_e->dest_idx;
421 
422   /* Iterate over each PHI in e->dest.  */
423   for (gphi_iterator gsi = gsi_start_phis (src_e->dest),
424 			   gsi2 = gsi_start_phis (tgt_e->dest);
425        !gsi_end_p (gsi);
426        gsi_next (&gsi), gsi_next (&gsi2))
427     {
428       gphi *src_phi = gsi.phi ();
429       gphi *dest_phi = gsi2.phi ();
430       tree val = gimple_phi_arg_def (src_phi, src_idx);
431       source_location locus = gimple_phi_arg_location (src_phi, src_idx);
432 
433       SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
434       gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
435     }
436 }
437 
438 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
439    to see if it has constant value in a flow sensitive manner.  Set
440    LOCUS to location of the constant phi arg and return the value.
441    Return DEF directly if either PATH or idx is ZERO.  */
442 
443 static tree
444 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
445 			 basic_block bb, int idx, source_location *locus)
446 {
447   tree arg;
448   gphi *def_phi;
449   basic_block def_bb;
450 
451   if (path == NULL || idx == 0)
452     return def;
453 
454   def_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (def));
455   if (!def_phi)
456     return def;
457 
458   def_bb = gimple_bb (def_phi);
459   /* Don't propagate loop invariants into deeper loops.  */
460   if (!def_bb || bb_loop_depth (def_bb) < bb_loop_depth (bb))
461     return def;
462 
463   /* Backtrack jump threading path from IDX to see if def has constant
464      value.  */
465   for (int j = idx - 1; j >= 0; j--)
466     {
467       edge e = (*path)[j]->e;
468       if (e->dest == def_bb)
469 	{
470 	  arg = gimple_phi_arg_def (def_phi, e->dest_idx);
471 	  if (is_gimple_min_invariant (arg))
472 	    {
473 	      *locus = gimple_phi_arg_location (def_phi, e->dest_idx);
474 	      return arg;
475 	    }
476 	  break;
477 	}
478     }
479 
480   return def;
481 }
482 
483 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
484    Try to backtrack jump threading PATH from node IDX to see if the arg
485    has constant value, copy constant value instead of argument itself
486    if yes.  */
487 
488 static void
489 copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
490 	       vec<jump_thread_edge *> *path, int idx)
491 {
492   gphi_iterator gsi;
493   int src_indx = src_e->dest_idx;
494 
495   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
496     {
497       gphi *phi = gsi.phi ();
498       tree def = gimple_phi_arg_def (phi, src_indx);
499       source_location locus = gimple_phi_arg_location (phi, src_indx);
500 
501       if (TREE_CODE (def) == SSA_NAME
502 	  && !virtual_operand_p (gimple_phi_result (phi)))
503 	def = get_value_locus_in_path (def, path, bb, idx, &locus);
504 
505       add_phi_arg (phi, def, tgt_e, locus);
506     }
507 }
508 
509 /* We have recently made a copy of ORIG_BB, including its outgoing
510    edges.  The copy is NEW_BB.  Every PHI node in every direct successor of
511    ORIG_BB has a new argument associated with edge from NEW_BB to the
512    successor.  Initialize the PHI argument so that it is equal to the PHI
513    argument associated with the edge from ORIG_BB to the successor.
514    PATH and IDX are used to check if the new PHI argument has constant
515    value in a flow sensitive manner.  */
516 
517 static void
518 update_destination_phis (basic_block orig_bb, basic_block new_bb,
519 			 vec<jump_thread_edge *> *path, int idx)
520 {
521   edge_iterator ei;
522   edge e;
523 
524   FOR_EACH_EDGE (e, ei, orig_bb->succs)
525     {
526       edge e2 = find_edge (new_bb, e->dest);
527       copy_phi_args (e->dest, e, e2, path, idx);
528     }
529 }
530 
531 /* Given a duplicate block and its single destination (both stored
532    in RD).  Create an edge between the duplicate and its single
533    destination.
534 
535    Add an additional argument to any PHI nodes at the single
536    destination.  IDX is the start node in jump threading path
537    we start to check to see if the new PHI argument has constant
538    value along the jump threading path.  */
539 
540 static void
541 create_edge_and_update_destination_phis (struct redirection_data *rd,
542 					 basic_block bb, int idx)
543 {
544   edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
545 
546   rescan_loop_exit (e, true, false);
547   e->probability = REG_BR_PROB_BASE;
548   e->count = bb->count;
549 
550   /* We used to copy the thread path here.  That was added in 2007
551      and dutifully updated through the representation changes in 2013.
552 
553      In 2013 we added code to thread from an interior node through
554      the backedge to another interior node.  That runs after the code
555      to thread through loop headers from outside the loop.
556 
557      The latter may delete edges in the CFG, including those
558      which appeared in the jump threading path we copied here.  Thus
559      we'd end up using a dangling pointer.
560 
561      After reviewing the 2007/2011 code, I can't see how anything
562      depended on copying the AUX field and clearly copying the jump
563      threading path is problematical due to embedded edge pointers.
564      It has been removed.  */
565   e->aux = NULL;
566 
567   /* If there are any PHI nodes at the destination of the outgoing edge
568      from the duplicate block, then we will need to add a new argument
569      to them.  The argument should have the same value as the argument
570      associated with the outgoing edge stored in RD.  */
571   copy_phi_args (e->dest, rd->path->last ()->e, e, rd->path, idx);
572 }
573 
574 /* Look through PATH beginning at START and return TRUE if there are
575    any additional blocks that need to be duplicated.  Otherwise,
576    return FALSE.  */
577 static bool
578 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
579 				 unsigned int start)
580 {
581   for (unsigned int i = start + 1; i < path->length (); i++)
582     {
583       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
584 	  || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
585 	return true;
586     }
587   return false;
588 }
589 
590 
591 /* Compute the amount of profile count/frequency coming into the jump threading
592    path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
593    PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
594    duplicated path, returned in PATH_OUT_COUNT_PTR.  LOCAL_INFO is used to
595    identify blocks duplicated for jump threading, which have duplicated
596    edges that need to be ignored in the analysis.  Return true if path contains
597    a joiner, false otherwise.
598 
599    In the non-joiner case, this is straightforward - all the counts/frequency
600    flowing into the jump threading path should flow through the duplicated
601    block and out of the duplicated path.
602 
603    In the joiner case, it is very tricky.  Some of the counts flowing into
604    the original path go offpath at the joiner.  The problem is that while
605    we know how much total count goes off-path in the original control flow,
606    we don't know how many of the counts corresponding to just the jump
607    threading path go offpath at the joiner.
608 
609    For example, assume we have the following control flow and identified
610    jump threading paths:
611 
612 		A     B     C
613 		 \    |    /
614 	       Ea \   |Eb / Ec
615 		   \  |  /
616 		    v v v
617 		      J       <-- Joiner
618 		     / \
619 		Eoff/   \Eon
620 		   /     \
621 		  v       v
622 		Soff     Son  <--- Normal
623 			 /\
624 		      Ed/  \ Ee
625 		       /    \
626 		      v     v
627 		      D      E
628 
629 	    Jump threading paths: A -> J -> Son -> D (path 1)
630 				  C -> J -> Son -> E (path 2)
631 
632    Note that the control flow could be more complicated:
633    - Each jump threading path may have more than one incoming edge.  I.e. A and
634    Ea could represent multiple incoming blocks/edges that are included in
635    path 1.
636    - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
637    before or after the "normal" copy block).  These are not duplicated onto
638    the jump threading path, as they are single-successor.
639    - Any of the blocks along the path may have other incoming edges that
640    are not part of any jump threading path, but add profile counts along
641    the path.
642 
643    In the above example, after all jump threading is complete, we will
644    end up with the following control flow:
645 
646 		A	   B	       C
647 		|	   |	       |
648 	      Ea|	   |Eb	       |Ec
649 		|	   |	       |
650 		v	   v	       v
651 	       Ja	   J	      Jc
652 	       / \	  / \Eon'     / \
653 	  Eona/   \   ---/---\--------   \Eonc
654 	     /     \ /  /     \		  \
655 	    v       v  v       v	  v
656 	   Sona     Soff      Son	Sonc
657 	     \		       /\	  /
658 	      \___________    /  \  _____/
659 			  \  /    \/
660 			   vv      v
661 			    D      E
662 
663    The main issue to notice here is that when we are processing path 1
664    (A->J->Son->D) we need to figure out the outgoing edge weights to
665    the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
666    sum of the incoming weights to D remain Ed.  The problem with simply
667    assuming that Ja (and Jc when processing path 2) has the same outgoing
668    probabilities to its successors as the original block J, is that after
669    all paths are processed and other edges/counts removed (e.g. none
670    of Ec will reach D after processing path 2), we may end up with not
671    enough count flowing along duplicated edge Sona->D.
672 
673    Therefore, in the case of a joiner, we keep track of all counts
674    coming in along the current path, as well as from predecessors not
675    on any jump threading path (Eb in the above example).  While we
676    first assume that the duplicated Eona for Ja->Sona has the same
677    probability as the original, we later compensate for other jump
678    threading paths that may eliminate edges.  We do that by keep track
679    of all counts coming into the original path that are not in a jump
680    thread (Eb in the above example, but as noted earlier, there could
681    be other predecessors incoming to the path at various points, such
682    as at Son).  Call this cumulative non-path count coming into the path
683    before D as Enonpath.  We then ensure that the count from Sona->D is as at
684    least as big as (Ed - Enonpath), but no bigger than the minimum
685    weight along the jump threading path.  The probabilities of both the
686    original and duplicated joiner block J and Ja will be adjusted
687    accordingly after the updates.  */
688 
689 static bool
690 compute_path_counts (struct redirection_data *rd,
691 		     ssa_local_info_t *local_info,
692 		     gcov_type *path_in_count_ptr,
693 		     gcov_type *path_out_count_ptr,
694 		     int *path_in_freq_ptr)
695 {
696   edge e = rd->incoming_edges->e;
697   vec<jump_thread_edge *> *path = THREAD_PATH (e);
698   edge elast = path->last ()->e;
699   gcov_type nonpath_count = 0;
700   bool has_joiner = false;
701   gcov_type path_in_count = 0;
702   int path_in_freq = 0;
703 
704   /* Start by accumulating incoming edge counts to the path's first bb
705      into a couple buckets:
706 	path_in_count: total count of incoming edges that flow into the
707 		  current path.
708 	nonpath_count: total count of incoming edges that are not
709 		  flowing along *any* path.  These are the counts
710 		  that will still flow along the original path after
711 		  all path duplication is done by potentially multiple
712 		  calls to this routine.
713      (any other incoming edge counts are for a different jump threading
714      path that will be handled by a later call to this routine.)
715      To make this easier, start by recording all incoming edges that flow into
716      the current path in a bitmap.  We could add up the path's incoming edge
717      counts here, but we still need to walk all the first bb's incoming edges
718      below to add up the counts of the other edges not included in this jump
719      threading path.  */
720   struct el *next, *el;
721   bitmap in_edge_srcs = BITMAP_ALLOC (NULL);
722   for (el = rd->incoming_edges; el; el = next)
723     {
724       next = el->next;
725       bitmap_set_bit (in_edge_srcs, el->e->src->index);
726     }
727   edge ein;
728   edge_iterator ei;
729   FOR_EACH_EDGE (ein, ei, e->dest->preds)
730     {
731       vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
732       /* Simply check the incoming edge src against the set captured above.  */
733       if (ein_path
734 	  && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
735 	{
736 	  /* It is necessary but not sufficient that the last path edges
737 	     are identical.  There may be different paths that share the
738 	     same last path edge in the case where the last edge has a nocopy
739 	     source block.  */
740 	  gcc_assert (ein_path->last ()->e == elast);
741 	  path_in_count += ein->count;
742 	  path_in_freq += EDGE_FREQUENCY (ein);
743 	}
744       else if (!ein_path)
745 	{
746 	  /* Keep track of the incoming edges that are not on any jump-threading
747 	     path.  These counts will still flow out of original path after all
748 	     jump threading is complete.  */
749 	    nonpath_count += ein->count;
750 	}
751     }
752 
753   /* This is needed due to insane incoming frequencies.  */
754   if (path_in_freq > BB_FREQ_MAX)
755     path_in_freq = BB_FREQ_MAX;
756 
757   BITMAP_FREE (in_edge_srcs);
758 
759   /* Now compute the fraction of the total count coming into the first
760      path bb that is from the current threading path.  */
761   gcov_type total_count = e->dest->count;
762   /* Handle incoming profile insanities.  */
763   if (total_count < path_in_count)
764     path_in_count = total_count;
765   int onpath_scale = GCOV_COMPUTE_SCALE (path_in_count, total_count);
766 
767   /* Walk the entire path to do some more computation in order to estimate
768      how much of the path_in_count will flow out of the duplicated threading
769      path.  In the non-joiner case this is straightforward (it should be
770      the same as path_in_count, although we will handle incoming profile
771      insanities by setting it equal to the minimum count along the path).
772 
773      In the joiner case, we need to estimate how much of the path_in_count
774      will stay on the threading path after the joiner's conditional branch.
775      We don't really know for sure how much of the counts
776      associated with this path go to each successor of the joiner, but we'll
777      estimate based on the fraction of the total count coming into the path
778      bb was from the threading paths (computed above in onpath_scale).
779      Afterwards, we will need to do some fixup to account for other threading
780      paths and possible profile insanities.
781 
782      In order to estimate the joiner case's counts we also need to update
783      nonpath_count with any additional counts coming into the path.  Other
784      blocks along the path may have additional predecessors from outside
785      the path.  */
786   gcov_type path_out_count = path_in_count;
787   gcov_type min_path_count = path_in_count;
788   for (unsigned int i = 1; i < path->length (); i++)
789     {
790       edge epath = (*path)[i]->e;
791       gcov_type cur_count = epath->count;
792       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
793 	{
794 	  has_joiner = true;
795 	  cur_count = apply_probability (cur_count, onpath_scale);
796 	}
797       /* In the joiner case we need to update nonpath_count for any edges
798 	 coming into the path that will contribute to the count flowing
799 	 into the path successor.  */
800       if (has_joiner && epath != elast)
801 	{
802 	  /* Look for other incoming edges after joiner.  */
803 	  FOR_EACH_EDGE (ein, ei, epath->dest->preds)
804 	    {
805 	      if (ein != epath
806 		  /* Ignore in edges from blocks we have duplicated for a
807 		     threading path, which have duplicated edge counts until
808 		     they are redirected by an invocation of this routine.  */
809 		  && !bitmap_bit_p (local_info->duplicate_blocks,
810 				    ein->src->index))
811 		nonpath_count += ein->count;
812 	    }
813 	}
814       if (cur_count < path_out_count)
815 	path_out_count = cur_count;
816       if (epath->count < min_path_count)
817 	min_path_count = epath->count;
818     }
819 
820   /* We computed path_out_count above assuming that this path targeted
821      the joiner's on-path successor with the same likelihood as it
822      reached the joiner.  However, other thread paths through the joiner
823      may take a different path through the normal copy source block
824      (i.e. they have a different elast), meaning that they do not
825      contribute any counts to this path's elast.  As a result, it may
826      turn out that this path must have more count flowing to the on-path
827      successor of the joiner.  Essentially, all of this path's elast
828      count must be contributed by this path and any nonpath counts
829      (since any path through the joiner with a different elast will not
830      include a copy of this elast in its duplicated path).
831      So ensure that this path's path_out_count is at least the
832      difference between elast->count and nonpath_count.  Otherwise the edge
833      counts after threading will not be sane.  */
834   if (local_info->need_profile_correction
835       && has_joiner && path_out_count < elast->count - nonpath_count)
836     {
837       path_out_count = elast->count - nonpath_count;
838       /* But neither can we go above the minimum count along the path
839 	 we are duplicating.  This can be an issue due to profile
840 	 insanities coming in to this pass.  */
841       if (path_out_count > min_path_count)
842 	path_out_count = min_path_count;
843     }
844 
845   *path_in_count_ptr = path_in_count;
846   *path_out_count_ptr = path_out_count;
847   *path_in_freq_ptr = path_in_freq;
848   return has_joiner;
849 }
850 
851 
852 /* Update the counts and frequencies for both an original path
853    edge EPATH and its duplicate EDUP.  The duplicate source block
854    will get a count/frequency of PATH_IN_COUNT and PATH_IN_FREQ,
855    and the duplicate edge EDUP will have a count of PATH_OUT_COUNT.  */
856 static void
857 update_profile (edge epath, edge edup, gcov_type path_in_count,
858 		gcov_type path_out_count, int path_in_freq)
859 {
860 
861   /* First update the duplicated block's count / frequency.  */
862   if (edup)
863     {
864       basic_block dup_block = edup->src;
865       gcc_assert (dup_block->count == 0);
866       gcc_assert (dup_block->frequency == 0);
867       dup_block->count = path_in_count;
868       dup_block->frequency = path_in_freq;
869     }
870 
871   /* Now update the original block's count and frequency in the
872      opposite manner - remove the counts/freq that will flow
873      into the duplicated block.  Handle underflow due to precision/
874      rounding issues.  */
875   epath->src->count -= path_in_count;
876   if (epath->src->count < 0)
877     epath->src->count = 0;
878   epath->src->frequency -= path_in_freq;
879   if (epath->src->frequency < 0)
880     epath->src->frequency = 0;
881 
882   /* Next update this path edge's original and duplicated counts.  We know
883      that the duplicated path will have path_out_count flowing
884      out of it (in the joiner case this is the count along the duplicated path
885      out of the duplicated joiner).  This count can then be removed from the
886      original path edge.  */
887   if (edup)
888     edup->count = path_out_count;
889   epath->count -= path_out_count;
890   gcc_assert (epath->count >= 0);
891 }
892 
893 
894 /* The duplicate and original joiner blocks may end up with different
895    probabilities (different from both the original and from each other).
896    Recompute the probabilities here once we have updated the edge
897    counts and frequencies.  */
898 
899 static void
900 recompute_probabilities (basic_block bb)
901 {
902   edge esucc;
903   edge_iterator ei;
904   FOR_EACH_EDGE (esucc, ei, bb->succs)
905     {
906       if (!bb->count)
907 	continue;
908 
909       /* Prevent overflow computation due to insane profiles.  */
910       if (esucc->count < bb->count)
911 	esucc->probability = GCOV_COMPUTE_SCALE (esucc->count,
912 						 bb->count);
913       else
914 	/* Can happen with missing/guessed probabilities, since we
915 	   may determine that more is flowing along duplicated
916 	   path than joiner succ probabilities allowed.
917 	   Counts and freqs will be insane after jump threading,
918 	   at least make sure probability is sane or we will
919 	   get a flow verification error.
920 	   Not much we can do to make counts/freqs sane without
921 	   redoing the profile estimation.  */
922 	esucc->probability = REG_BR_PROB_BASE;
923     }
924 }
925 
926 
927 /* Update the counts of the original and duplicated edges from a joiner
928    that go off path, given that we have already determined that the
929    duplicate joiner DUP_BB has incoming count PATH_IN_COUNT and
930    outgoing count along the path PATH_OUT_COUNT.  The original (on-)path
931    edge from joiner is EPATH.  */
932 
933 static void
934 update_joiner_offpath_counts (edge epath, basic_block dup_bb,
935 			      gcov_type path_in_count,
936 			      gcov_type path_out_count)
937 {
938   /* Compute the count that currently flows off path from the joiner.
939      In other words, the total count of joiner's out edges other than
940      epath.  Compute this by walking the successors instead of
941      subtracting epath's count from the joiner bb count, since there
942      are sometimes slight insanities where the total out edge count is
943      larger than the bb count (possibly due to rounding/truncation
944      errors).  */
945   gcov_type total_orig_off_path_count = 0;
946   edge enonpath;
947   edge_iterator ei;
948   FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
949     {
950       if (enonpath == epath)
951 	continue;
952       total_orig_off_path_count += enonpath->count;
953     }
954 
955   /* For the path that we are duplicating, the amount that will flow
956      off path from the duplicated joiner is the delta between the
957      path's cumulative in count and the portion of that count we
958      estimated above as flowing from the joiner along the duplicated
959      path.  */
960   gcov_type total_dup_off_path_count = path_in_count - path_out_count;
961 
962   /* Now do the actual updates of the off-path edges.  */
963   FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
964     {
965       /* Look for edges going off of the threading path.  */
966       if (enonpath == epath)
967 	continue;
968 
969       /* Find the corresponding edge out of the duplicated joiner.  */
970       edge enonpathdup = find_edge (dup_bb, enonpath->dest);
971       gcc_assert (enonpathdup);
972 
973       /* We can't use the original probability of the joiner's out
974 	 edges, since the probabilities of the original branch
975 	 and the duplicated branches may vary after all threading is
976 	 complete.  But apportion the duplicated joiner's off-path
977 	 total edge count computed earlier (total_dup_off_path_count)
978 	 among the duplicated off-path edges based on their original
979 	 ratio to the full off-path count (total_orig_off_path_count).
980 	 */
981       int scale = GCOV_COMPUTE_SCALE (enonpath->count,
982 				      total_orig_off_path_count);
983       /* Give the duplicated offpath edge a portion of the duplicated
984 	 total.  */
985       enonpathdup->count = apply_scale (scale,
986 					total_dup_off_path_count);
987       /* Now update the original offpath edge count, handling underflow
988 	 due to rounding errors.  */
989       enonpath->count -= enonpathdup->count;
990       if (enonpath->count < 0)
991 	enonpath->count = 0;
992     }
993 }
994 
995 
996 /* Check if the paths through RD all have estimated frequencies but zero
997    profile counts.  This is more accurate than checking the entry block
998    for a zero profile count, since profile insanities sometimes creep in.  */
999 
1000 static bool
1001 estimated_freqs_path (struct redirection_data *rd)
1002 {
1003   edge e = rd->incoming_edges->e;
1004   vec<jump_thread_edge *> *path = THREAD_PATH (e);
1005   edge ein;
1006   edge_iterator ei;
1007   bool non_zero_freq = false;
1008   FOR_EACH_EDGE (ein, ei, e->dest->preds)
1009     {
1010       if (ein->count)
1011 	return false;
1012       non_zero_freq |= ein->src->frequency != 0;
1013     }
1014 
1015   for (unsigned int i = 1; i < path->length (); i++)
1016     {
1017       edge epath = (*path)[i]->e;
1018       if (epath->src->count)
1019 	return false;
1020       non_zero_freq |= epath->src->frequency != 0;
1021       edge esucc;
1022       FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1023 	{
1024 	  if (esucc->count)
1025 	    return false;
1026 	  non_zero_freq |= esucc->src->frequency != 0;
1027 	}
1028     }
1029   return non_zero_freq;
1030 }
1031 
1032 
1033 /* Invoked for routines that have guessed frequencies and no profile
1034    counts to record the block and edge frequencies for paths through RD
1035    in the profile count fields of those blocks and edges.  This is because
1036    ssa_fix_duplicate_block_edges incrementally updates the block and
1037    edge counts as edges are redirected, and it is difficult to do that
1038    for edge frequencies which are computed on the fly from the source
1039    block frequency and probability.  When a block frequency is updated
1040    its outgoing edge frequencies are affected and become difficult to
1041    adjust.  */
1042 
1043 static void
1044 freqs_to_counts_path (struct redirection_data *rd)
1045 {
1046   edge e = rd->incoming_edges->e;
1047   vec<jump_thread_edge *> *path = THREAD_PATH (e);
1048   edge ein;
1049   edge_iterator ei;
1050   FOR_EACH_EDGE (ein, ei, e->dest->preds)
1051     {
1052       /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1053 	 errors applying the probability when the frequencies are very
1054 	 small.  */
1055       ein->count = apply_probability (ein->src->frequency * REG_BR_PROB_BASE,
1056 				      ein->probability);
1057     }
1058 
1059   for (unsigned int i = 1; i < path->length (); i++)
1060     {
1061       edge epath = (*path)[i]->e;
1062       edge esucc;
1063       /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1064 	 errors applying the edge probability when the frequencies are very
1065 	 small.  */
1066       epath->src->count = epath->src->frequency * REG_BR_PROB_BASE;
1067       FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1068 	esucc->count = apply_probability (esucc->src->count,
1069 					  esucc->probability);
1070     }
1071 }
1072 
1073 
1074 /* For routines that have guessed frequencies and no profile counts, where we
1075    used freqs_to_counts_path to record block and edge frequencies for paths
1076    through RD, we clear the counts after completing all updates for RD.
1077    The updates in ssa_fix_duplicate_block_edges are based off the count fields,
1078    but the block frequencies and edge probabilities were updated as well,
1079    so we can simply clear the count fields.  */
1080 
1081 static void
1082 clear_counts_path (struct redirection_data *rd)
1083 {
1084   edge e = rd->incoming_edges->e;
1085   vec<jump_thread_edge *> *path = THREAD_PATH (e);
1086   edge ein, esucc;
1087   edge_iterator ei;
1088   FOR_EACH_EDGE (ein, ei, e->dest->preds)
1089     ein->count = 0;
1090 
1091   /* First clear counts along original path.  */
1092   for (unsigned int i = 1; i < path->length (); i++)
1093     {
1094       edge epath = (*path)[i]->e;
1095       FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1096 	esucc->count = 0;
1097       epath->src->count = 0;
1098     }
1099   /* Also need to clear the counts along duplicated path.  */
1100   for (unsigned int i = 0; i < 2; i++)
1101     {
1102       basic_block dup = rd->dup_blocks[i];
1103       if (!dup)
1104 	continue;
1105       FOR_EACH_EDGE (esucc, ei, dup->succs)
1106 	esucc->count = 0;
1107       dup->count = 0;
1108     }
1109 }
1110 
1111 /* Wire up the outgoing edges from the duplicate blocks and
1112    update any PHIs as needed.  Also update the profile counts
1113    on the original and duplicate blocks and edges.  */
1114 void
1115 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
1116 			       ssa_local_info_t *local_info)
1117 {
1118   bool multi_incomings = (rd->incoming_edges->next != NULL);
1119   edge e = rd->incoming_edges->e;
1120   vec<jump_thread_edge *> *path = THREAD_PATH (e);
1121   edge elast = path->last ()->e;
1122   gcov_type path_in_count = 0;
1123   gcov_type path_out_count = 0;
1124   int path_in_freq = 0;
1125 
1126   /* This routine updates profile counts, frequencies, and probabilities
1127      incrementally. Since it is difficult to do the incremental updates
1128      using frequencies/probabilities alone, for routines without profile
1129      data we first take a snapshot of the existing block and edge frequencies
1130      by copying them into the empty profile count fields.  These counts are
1131      then used to do the incremental updates, and cleared at the end of this
1132      routine.  If the function is marked as having a profile, we still check
1133      to see if the paths through RD are using estimated frequencies because
1134      the routine had zero profile counts.  */
1135   bool do_freqs_to_counts = (profile_status_for_fn (cfun) != PROFILE_READ
1136 			     || estimated_freqs_path (rd));
1137   if (do_freqs_to_counts)
1138     freqs_to_counts_path (rd);
1139 
1140   /* First determine how much profile count to move from original
1141      path to the duplicate path.  This is tricky in the presence of
1142      a joiner (see comments for compute_path_counts), where some portion
1143      of the path's counts will flow off-path from the joiner.  In the
1144      non-joiner case the path_in_count and path_out_count should be the
1145      same.  */
1146   bool has_joiner = compute_path_counts (rd, local_info,
1147 					 &path_in_count, &path_out_count,
1148 					 &path_in_freq);
1149 
1150   int cur_path_freq = path_in_freq;
1151   for (unsigned int count = 0, i = 1; i < path->length (); i++)
1152     {
1153       edge epath = (*path)[i]->e;
1154 
1155       /* If we were threading through an joiner block, then we want
1156 	 to keep its control statement and redirect an outgoing edge.
1157 	 Else we want to remove the control statement & edges, then create
1158 	 a new outgoing edge.  In both cases we may need to update PHIs.  */
1159       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1160 	{
1161 	  edge victim;
1162 	  edge e2;
1163 
1164 	  gcc_assert (has_joiner);
1165 
1166 	  /* This updates the PHIs at the destination of the duplicate
1167 	     block.  Pass 0 instead of i if we are threading a path which
1168 	     has multiple incoming edges.  */
1169 	  update_destination_phis (local_info->bb, rd->dup_blocks[count],
1170 				   path, multi_incomings ? 0 : i);
1171 
1172 	  /* Find the edge from the duplicate block to the block we're
1173 	     threading through.  That's the edge we want to redirect.  */
1174 	  victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
1175 
1176 	  /* If there are no remaining blocks on the path to duplicate,
1177 	     then redirect VICTIM to the final destination of the jump
1178 	     threading path.  */
1179 	  if (!any_remaining_duplicated_blocks (path, i))
1180 	    {
1181 	      e2 = redirect_edge_and_branch (victim, elast->dest);
1182 	      /* If we redirected the edge, then we need to copy PHI arguments
1183 		 at the target.  If the edge already existed (e2 != victim
1184 		 case), then the PHIs in the target already have the correct
1185 		 arguments.  */
1186 	      if (e2 == victim)
1187 		copy_phi_args (e2->dest, elast, e2,
1188 			       path, multi_incomings ? 0 : i);
1189 	    }
1190 	  else
1191 	    {
1192 	      /* Redirect VICTIM to the next duplicated block in the path.  */
1193 	      e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
1194 
1195 	      /* We need to update the PHIs in the next duplicated block.  We
1196 		 want the new PHI args to have the same value as they had
1197 		 in the source of the next duplicate block.
1198 
1199 		 Thus, we need to know which edge we traversed into the
1200 		 source of the duplicate.  Furthermore, we may have
1201 		 traversed many edges to reach the source of the duplicate.
1202 
1203 		 Walk through the path starting at element I until we
1204 		 hit an edge marked with EDGE_COPY_SRC_BLOCK.  We want
1205 		 the edge from the prior element.  */
1206 	      for (unsigned int j = i + 1; j < path->length (); j++)
1207 		{
1208 		  if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
1209 		    {
1210 		      copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
1211 		      break;
1212 		    }
1213 		}
1214 	    }
1215 
1216 	  /* Update the counts and frequency of both the original block
1217 	     and path edge, and the duplicates.  The path duplicate's
1218 	     incoming count and frequency are the totals for all edges
1219 	     incoming to this jump threading path computed earlier.
1220 	     And we know that the duplicated path will have path_out_count
1221 	     flowing out of it (i.e. along the duplicated path out of the
1222 	     duplicated joiner).  */
1223 	  update_profile (epath, e2, path_in_count, path_out_count,
1224 			  path_in_freq);
1225 
1226 	  /* Next we need to update the counts of the original and duplicated
1227 	     edges from the joiner that go off path.  */
1228 	  update_joiner_offpath_counts (epath, e2->src, path_in_count,
1229 					path_out_count);
1230 
1231 	  /* Finally, we need to set the probabilities on the duplicated
1232 	     edges out of the duplicated joiner (e2->src).  The probabilities
1233 	     along the original path will all be updated below after we finish
1234 	     processing the whole path.  */
1235 	  recompute_probabilities (e2->src);
1236 
1237 	  /* Record the frequency flowing to the downstream duplicated
1238 	     path blocks.  */
1239 	  cur_path_freq = EDGE_FREQUENCY (e2);
1240 	}
1241       else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1242 	{
1243 	  remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
1244 	  create_edge_and_update_destination_phis (rd, rd->dup_blocks[count],
1245 						   multi_incomings ? 0 : i);
1246 	  if (count == 1)
1247 	    single_succ_edge (rd->dup_blocks[1])->aux = NULL;
1248 
1249 	  /* Update the counts and frequency of both the original block
1250 	     and path edge, and the duplicates.  Since we are now after
1251 	     any joiner that may have existed on the path, the count
1252 	     flowing along the duplicated threaded path is path_out_count.
1253 	     If we didn't have a joiner, then cur_path_freq was the sum
1254 	     of the total frequencies along all incoming edges to the
1255 	     thread path (path_in_freq).  If we had a joiner, it would have
1256 	     been updated at the end of that handling to the edge frequency
1257 	     along the duplicated joiner path edge.  */
1258 	  update_profile (epath, EDGE_SUCC (rd->dup_blocks[count], 0),
1259 			  path_out_count, path_out_count,
1260 			  cur_path_freq);
1261 	}
1262       else
1263 	{
1264 	  /* No copy case.  In this case we don't have an equivalent block
1265 	     on the duplicated thread path to update, but we do need
1266 	     to remove the portion of the counts/freqs that were moved
1267 	     to the duplicated path from the counts/freqs flowing through
1268 	     this block on the original path.  Since all the no-copy edges
1269 	     are after any joiner, the removed count is the same as
1270 	     path_out_count.
1271 
1272 	     If we didn't have a joiner, then cur_path_freq was the sum
1273 	     of the total frequencies along all incoming edges to the
1274 	     thread path (path_in_freq).  If we had a joiner, it would have
1275 	     been updated at the end of that handling to the edge frequency
1276 	     along the duplicated joiner path edge.  */
1277 	   update_profile (epath, NULL, path_out_count, path_out_count,
1278 			   cur_path_freq);
1279 	}
1280 
1281       /* Increment the index into the duplicated path when we processed
1282 	 a duplicated block.  */
1283       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
1284 	  || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1285 	{
1286 	  count++;
1287 	}
1288     }
1289 
1290   /* Now walk orig blocks and update their probabilities, since the
1291      counts and freqs should be updated properly by above loop.  */
1292   for (unsigned int i = 1; i < path->length (); i++)
1293     {
1294       edge epath = (*path)[i]->e;
1295       recompute_probabilities (epath->src);
1296     }
1297 
1298   /* Done with all profile and frequency updates, clear counts if they
1299      were copied.  */
1300   if (do_freqs_to_counts)
1301     clear_counts_path (rd);
1302 }
1303 
1304 /* Hash table traversal callback routine to create duplicate blocks.  */
1305 
1306 int
1307 ssa_create_duplicates (struct redirection_data **slot,
1308 		       ssa_local_info_t *local_info)
1309 {
1310   struct redirection_data *rd = *slot;
1311 
1312   /* The second duplicated block in a jump threading path is specific
1313      to the path.  So it gets stored in RD rather than in LOCAL_DATA.
1314 
1315      Each time we're called, we have to look through the path and see
1316      if a second block needs to be duplicated.
1317 
1318      Note the search starts with the third edge on the path.  The first
1319      edge is the incoming edge, the second edge always has its source
1320      duplicated.  Thus we start our search with the third edge.  */
1321   vec<jump_thread_edge *> *path = rd->path;
1322   for (unsigned int i = 2; i < path->length (); i++)
1323     {
1324       if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1325 	  || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1326 	{
1327 	  create_block_for_threading ((*path)[i]->e->src, rd, 1,
1328 				      &local_info->duplicate_blocks);
1329 	  break;
1330 	}
1331     }
1332 
1333   /* Create a template block if we have not done so already.  Otherwise
1334      use the template to create a new block.  */
1335   if (local_info->template_block == NULL)
1336     {
1337       create_block_for_threading ((*path)[1]->e->src, rd, 0,
1338 				  &local_info->duplicate_blocks);
1339       local_info->template_block = rd->dup_blocks[0];
1340 
1341       /* We do not create any outgoing edges for the template.  We will
1342 	 take care of that in a later traversal.  That way we do not
1343 	 create edges that are going to just be deleted.  */
1344     }
1345   else
1346     {
1347       create_block_for_threading (local_info->template_block, rd, 0,
1348 				  &local_info->duplicate_blocks);
1349 
1350       /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1351 	 block.   */
1352       ssa_fix_duplicate_block_edges (rd, local_info);
1353     }
1354 
1355   /* Keep walking the hash table.  */
1356   return 1;
1357 }
1358 
1359 /* We did not create any outgoing edges for the template block during
1360    block creation.  This hash table traversal callback creates the
1361    outgoing edge for the template block.  */
1362 
1363 inline int
1364 ssa_fixup_template_block (struct redirection_data **slot,
1365 			  ssa_local_info_t *local_info)
1366 {
1367   struct redirection_data *rd = *slot;
1368 
1369   /* If this is the template block halt the traversal after updating
1370      it appropriately.
1371 
1372      If we were threading through an joiner block, then we want
1373      to keep its control statement and redirect an outgoing edge.
1374      Else we want to remove the control statement & edges, then create
1375      a new outgoing edge.  In both cases we may need to update PHIs.  */
1376   if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
1377     {
1378       ssa_fix_duplicate_block_edges (rd, local_info);
1379       return 0;
1380     }
1381 
1382   return 1;
1383 }
1384 
1385 /* Hash table traversal callback to redirect each incoming edge
1386    associated with this hash table element to its new destination.  */
1387 
1388 int
1389 ssa_redirect_edges (struct redirection_data **slot,
1390 		    ssa_local_info_t *local_info)
1391 {
1392   struct redirection_data *rd = *slot;
1393   struct el *next, *el;
1394 
1395   /* Walk over all the incoming edges associated with this hash table
1396      entry.  */
1397   for (el = rd->incoming_edges; el; el = next)
1398     {
1399       edge e = el->e;
1400       vec<jump_thread_edge *> *path = THREAD_PATH (e);
1401 
1402       /* Go ahead and free this element from the list.  Doing this now
1403 	 avoids the need for another list walk when we destroy the hash
1404 	 table.  */
1405       next = el->next;
1406       free (el);
1407 
1408       thread_stats.num_threaded_edges++;
1409 
1410       if (rd->dup_blocks[0])
1411 	{
1412 	  edge e2;
1413 
1414 	  if (dump_file && (dump_flags & TDF_DETAILS))
1415 	    fprintf (dump_file, "  Threaded jump %d --> %d to %d\n",
1416 		     e->src->index, e->dest->index, rd->dup_blocks[0]->index);
1417 
1418 	  /* Redirect the incoming edge (possibly to the joiner block) to the
1419 	     appropriate duplicate block.  */
1420 	  e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
1421 	  gcc_assert (e == e2);
1422 	  flush_pending_stmts (e2);
1423 	}
1424 
1425       /* Go ahead and clear E->aux.  It's not needed anymore and failure
1426 	 to clear it will cause all kinds of unpleasant problems later.  */
1427       delete_jump_thread_path (path);
1428       e->aux = NULL;
1429 
1430     }
1431 
1432   /* Indicate that we actually threaded one or more jumps.  */
1433   if (rd->incoming_edges)
1434     local_info->jumps_threaded = true;
1435 
1436   return 1;
1437 }
1438 
1439 /* Return true if this block has no executable statements other than
1440    a simple ctrl flow instruction.  When the number of outgoing edges
1441    is one, this is equivalent to a "forwarder" block.  */
1442 
1443 static bool
1444 redirection_block_p (basic_block bb)
1445 {
1446   gimple_stmt_iterator gsi;
1447 
1448   /* Advance to the first executable statement.  */
1449   gsi = gsi_start_bb (bb);
1450   while (!gsi_end_p (gsi)
1451 	 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
1452 	     || is_gimple_debug (gsi_stmt (gsi))
1453 	     || gimple_nop_p (gsi_stmt (gsi))
1454 	     || gimple_clobber_p (gsi_stmt (gsi))))
1455     gsi_next (&gsi);
1456 
1457   /* Check if this is an empty block.  */
1458   if (gsi_end_p (gsi))
1459     return true;
1460 
1461   /* Test that we've reached the terminating control statement.  */
1462   return gsi_stmt (gsi)
1463 	 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
1464 	     || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
1465 	     || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
1466 }
1467 
1468 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1469    is reached via one or more specific incoming edges, we know which
1470    outgoing edge from BB will be traversed.
1471 
1472    We want to redirect those incoming edges to the target of the
1473    appropriate outgoing edge.  Doing so avoids a conditional branch
1474    and may expose new optimization opportunities.  Note that we have
1475    to update dominator tree and SSA graph after such changes.
1476 
1477    The key to keeping the SSA graph update manageable is to duplicate
1478    the side effects occurring in BB so that those side effects still
1479    occur on the paths which bypass BB after redirecting edges.
1480 
1481    We accomplish this by creating duplicates of BB and arranging for
1482    the duplicates to unconditionally pass control to one specific
1483    successor of BB.  We then revector the incoming edges into BB to
1484    the appropriate duplicate of BB.
1485 
1486    If NOLOOP_ONLY is true, we only perform the threading as long as it
1487    does not affect the structure of the loops in a nontrivial way.
1488 
1489    If JOINERS is true, then thread through joiner blocks as well.  */
1490 
1491 static bool
1492 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
1493 {
1494   /* E is an incoming edge into BB that we may or may not want to
1495      redirect to a duplicate of BB.  */
1496   edge e, e2;
1497   edge_iterator ei;
1498   ssa_local_info_t local_info;
1499 
1500   local_info.duplicate_blocks = BITMAP_ALLOC (NULL);
1501   local_info.need_profile_correction = false;
1502 
1503   /* To avoid scanning a linear array for the element we need we instead
1504      use a hash table.  For normal code there should be no noticeable
1505      difference.  However, if we have a block with a large number of
1506      incoming and outgoing edges such linear searches can get expensive.  */
1507   redirection_data
1508     = new hash_table<struct redirection_data> (EDGE_COUNT (bb->succs));
1509 
1510   /* Record each unique threaded destination into a hash table for
1511      efficient lookups.  */
1512   edge last = NULL;
1513   FOR_EACH_EDGE (e, ei, bb->preds)
1514     {
1515       if (e->aux == NULL)
1516 	continue;
1517 
1518       vec<jump_thread_edge *> *path = THREAD_PATH (e);
1519 
1520       if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
1521 	  || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
1522 	continue;
1523 
1524       e2 = path->last ()->e;
1525       if (!e2 || noloop_only)
1526 	{
1527 	  /* If NOLOOP_ONLY is true, we only allow threading through the
1528 	     header of a loop to exit edges.  */
1529 
1530 	  /* One case occurs when there was loop header buried in a jump
1531 	     threading path that crosses loop boundaries.  We do not try
1532 	     and thread this elsewhere, so just cancel the jump threading
1533 	     request by clearing the AUX field now.  */
1534 	  if ((bb->loop_father != e2->src->loop_father
1535 	       && !loop_exit_edge_p (e2->src->loop_father, e2))
1536 	      || (e2->src->loop_father != e2->dest->loop_father
1537 		  && !loop_exit_edge_p (e2->src->loop_father, e2)))
1538 	    {
1539 	      /* Since this case is not handled by our special code
1540 		 to thread through a loop header, we must explicitly
1541 		 cancel the threading request here.  */
1542 	      delete_jump_thread_path (path);
1543 	      e->aux = NULL;
1544 	      continue;
1545 	    }
1546 
1547 	  /* Another case occurs when trying to thread through our
1548 	     own loop header, possibly from inside the loop.  We will
1549 	     thread these later.  */
1550 	  unsigned int i;
1551 	  for (i = 1; i < path->length (); i++)
1552 	    {
1553 	      if ((*path)[i]->e->src == bb->loop_father->header
1554 		  && (!loop_exit_edge_p (bb->loop_father, e2)
1555 		      || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
1556 		break;
1557 	    }
1558 
1559 	  if (i != path->length ())
1560 	    continue;
1561 	}
1562 
1563       /* Insert the outgoing edge into the hash table if it is not
1564 	 already in the hash table.  */
1565       lookup_redirection_data (e, INSERT);
1566 
1567       /* When we have thread paths through a common joiner with different
1568 	 final destinations, then we may need corrections to deal with
1569 	 profile insanities.  See the big comment before compute_path_counts.  */
1570       if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1571 	{
1572 	  if (!last)
1573 	    last = e2;
1574 	  else if (e2 != last)
1575 	    local_info.need_profile_correction = true;
1576 	}
1577     }
1578 
1579   /* We do not update dominance info.  */
1580   free_dominance_info (CDI_DOMINATORS);
1581 
1582   /* We know we only thread through the loop header to loop exits.
1583      Let the basic block duplication hook know we are not creating
1584      a multiple entry loop.  */
1585   if (noloop_only
1586       && bb == bb->loop_father->header)
1587     set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
1588 
1589   /* Now create duplicates of BB.
1590 
1591      Note that for a block with a high outgoing degree we can waste
1592      a lot of time and memory creating and destroying useless edges.
1593 
1594      So we first duplicate BB and remove the control structure at the
1595      tail of the duplicate as well as all outgoing edges from the
1596      duplicate.  We then use that duplicate block as a template for
1597      the rest of the duplicates.  */
1598   local_info.template_block = NULL;
1599   local_info.bb = bb;
1600   local_info.jumps_threaded = false;
1601   redirection_data->traverse <ssa_local_info_t *, ssa_create_duplicates>
1602 			    (&local_info);
1603 
1604   /* The template does not have an outgoing edge.  Create that outgoing
1605      edge and update PHI nodes as the edge's target as necessary.
1606 
1607      We do this after creating all the duplicates to avoid creating
1608      unnecessary edges.  */
1609   redirection_data->traverse <ssa_local_info_t *, ssa_fixup_template_block>
1610 			    (&local_info);
1611 
1612   /* The hash table traversals above created the duplicate blocks (and the
1613      statements within the duplicate blocks).  This loop creates PHI nodes for
1614      the duplicated blocks and redirects the incoming edges into BB to reach
1615      the duplicates of BB.  */
1616   redirection_data->traverse <ssa_local_info_t *, ssa_redirect_edges>
1617 			    (&local_info);
1618 
1619   /* Done with this block.  Clear REDIRECTION_DATA.  */
1620   delete redirection_data;
1621   redirection_data = NULL;
1622 
1623   if (noloop_only
1624       && bb == bb->loop_father->header)
1625     set_loop_copy (bb->loop_father, NULL);
1626 
1627   BITMAP_FREE (local_info.duplicate_blocks);
1628   local_info.duplicate_blocks = NULL;
1629 
1630   /* Indicate to our caller whether or not any jumps were threaded.  */
1631   return local_info.jumps_threaded;
1632 }
1633 
1634 /* Wrapper for thread_block_1 so that we can first handle jump
1635    thread paths which do not involve copying joiner blocks, then
1636    handle jump thread paths which have joiner blocks.
1637 
1638    By doing things this way we can be as aggressive as possible and
1639    not worry that copying a joiner block will create a jump threading
1640    opportunity.  */
1641 
1642 static bool
1643 thread_block (basic_block bb, bool noloop_only)
1644 {
1645   bool retval;
1646   retval = thread_block_1 (bb, noloop_only, false);
1647   retval |= thread_block_1 (bb, noloop_only, true);
1648   return retval;
1649 }
1650 
1651 /* Callback for dfs_enumerate_from.  Returns true if BB is different
1652    from STOP and DBDS_CE_STOP.  */
1653 
1654 static basic_block dbds_ce_stop;
1655 static bool
1656 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
1657 {
1658   return (bb != (const_basic_block) stop
1659 	  && bb != dbds_ce_stop);
1660 }
1661 
1662 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1663    returns the state.  */
1664 
1665 enum bb_dom_status
1666 determine_bb_domination_status (struct loop *loop, basic_block bb)
1667 {
1668   basic_block *bblocks;
1669   unsigned nblocks, i;
1670   bool bb_reachable = false;
1671   edge_iterator ei;
1672   edge e;
1673 
1674   /* This function assumes BB is a successor of LOOP->header.
1675      If that is not the case return DOMST_NONDOMINATING which
1676      is always safe.  */
1677     {
1678       bool ok = false;
1679 
1680       FOR_EACH_EDGE (e, ei, bb->preds)
1681 	{
1682      	  if (e->src == loop->header)
1683 	    {
1684 	      ok = true;
1685 	      break;
1686 	    }
1687 	}
1688 
1689       if (!ok)
1690 	return DOMST_NONDOMINATING;
1691     }
1692 
1693   if (bb == loop->latch)
1694     return DOMST_DOMINATING;
1695 
1696   /* Check that BB dominates LOOP->latch, and that it is back-reachable
1697      from it.  */
1698 
1699   bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1700   dbds_ce_stop = loop->header;
1701   nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1702 				bblocks, loop->num_nodes, bb);
1703   for (i = 0; i < nblocks; i++)
1704     FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1705       {
1706 	if (e->src == loop->header)
1707 	  {
1708 	    free (bblocks);
1709 	    return DOMST_NONDOMINATING;
1710 	  }
1711 	if (e->src == bb)
1712 	  bb_reachable = true;
1713       }
1714 
1715   free (bblocks);
1716   return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1717 }
1718 
1719 /* Thread jumps through the header of LOOP.  Returns true if cfg changes.
1720    If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1721    to the inside of the loop.  */
1722 
1723 static bool
1724 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1725 {
1726   basic_block header = loop->header;
1727   edge e, tgt_edge, latch = loop_latch_edge (loop);
1728   edge_iterator ei;
1729   basic_block tgt_bb, atgt_bb;
1730   enum bb_dom_status domst;
1731 
1732   /* We have already threaded through headers to exits, so all the threading
1733      requests now are to the inside of the loop.  We need to avoid creating
1734      irreducible regions (i.e., loops with more than one entry block), and
1735      also loop with several latch edges, or new subloops of the loop (although
1736      there are cases where it might be appropriate, it is difficult to decide,
1737      and doing it wrongly may confuse other optimizers).
1738 
1739      We could handle more general cases here.  However, the intention is to
1740      preserve some information about the loop, which is impossible if its
1741      structure changes significantly, in a way that is not well understood.
1742      Thus we only handle few important special cases, in which also updating
1743      of the loop-carried information should be feasible:
1744 
1745      1) Propagation of latch edge to a block that dominates the latch block
1746 	of a loop.  This aims to handle the following idiom:
1747 
1748 	first = 1;
1749 	while (1)
1750 	  {
1751 	    if (first)
1752 	      initialize;
1753 	    first = 0;
1754 	    body;
1755 	  }
1756 
1757 	After threading the latch edge, this becomes
1758 
1759 	first = 1;
1760 	if (first)
1761 	  initialize;
1762 	while (1)
1763 	  {
1764 	    first = 0;
1765 	    body;
1766 	  }
1767 
1768 	The original header of the loop is moved out of it, and we may thread
1769 	the remaining edges through it without further constraints.
1770 
1771      2) All entry edges are propagated to a single basic block that dominates
1772 	the latch block of the loop.  This aims to handle the following idiom
1773 	(normally created for "for" loops):
1774 
1775 	i = 0;
1776 	while (1)
1777 	  {
1778 	    if (i >= 100)
1779 	      break;
1780 	    body;
1781 	    i++;
1782 	  }
1783 
1784 	This becomes
1785 
1786 	i = 0;
1787 	while (1)
1788 	  {
1789 	    body;
1790 	    i++;
1791 	    if (i >= 100)
1792 	      break;
1793 	  }
1794      */
1795 
1796   /* Threading through the header won't improve the code if the header has just
1797      one successor.  */
1798   if (single_succ_p (header))
1799     goto fail;
1800 
1801   if (!may_peel_loop_headers && !redirection_block_p (loop->header))
1802     goto fail;
1803   else
1804     {
1805       tgt_bb = NULL;
1806       tgt_edge = NULL;
1807       FOR_EACH_EDGE (e, ei, header->preds)
1808 	{
1809 	  if (!e->aux)
1810 	    {
1811 	      if (e == latch)
1812 		continue;
1813 
1814 	      /* If latch is not threaded, and there is a header
1815 		 edge that is not threaded, we would create loop
1816 		 with multiple entries.  */
1817 	      goto fail;
1818 	    }
1819 
1820 	  vec<jump_thread_edge *> *path = THREAD_PATH (e);
1821 
1822 	  if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1823 	    goto fail;
1824 	  tgt_edge = (*path)[1]->e;
1825 	  atgt_bb = tgt_edge->dest;
1826 	  if (!tgt_bb)
1827 	    tgt_bb = atgt_bb;
1828 	  /* Two targets of threading would make us create loop
1829 	     with multiple entries.  */
1830 	  else if (tgt_bb != atgt_bb)
1831 	    goto fail;
1832 	}
1833 
1834       if (!tgt_bb)
1835 	{
1836 	  /* There are no threading requests.  */
1837 	  return false;
1838 	}
1839 
1840       /* Redirecting to empty loop latch is useless.  */
1841       if (tgt_bb == loop->latch
1842 	  && empty_block_p (loop->latch))
1843 	goto fail;
1844     }
1845 
1846   /* The target block must dominate the loop latch, otherwise we would be
1847      creating a subloop.  */
1848   domst = determine_bb_domination_status (loop, tgt_bb);
1849   if (domst == DOMST_NONDOMINATING)
1850     goto fail;
1851   if (domst == DOMST_LOOP_BROKEN)
1852     {
1853       /* If the loop ceased to exist, mark it as such, and thread through its
1854 	 original header.  */
1855       mark_loop_for_removal (loop);
1856       return thread_block (header, false);
1857     }
1858 
1859   if (tgt_bb->loop_father->header == tgt_bb)
1860     {
1861       /* If the target of the threading is a header of a subloop, we need
1862 	 to create a preheader for it, so that the headers of the two loops
1863 	 do not merge.  */
1864       if (EDGE_COUNT (tgt_bb->preds) > 2)
1865 	{
1866 	  tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1867 	  gcc_assert (tgt_bb != NULL);
1868 	}
1869       else
1870 	tgt_bb = split_edge (tgt_edge);
1871     }
1872 
1873   basic_block new_preheader;
1874 
1875   /* Now consider the case entry edges are redirected to the new entry
1876      block.  Remember one entry edge, so that we can find the new
1877      preheader (its destination after threading).  */
1878   FOR_EACH_EDGE (e, ei, header->preds)
1879     {
1880       if (e->aux)
1881 	break;
1882     }
1883 
1884   /* The duplicate of the header is the new preheader of the loop.  Ensure
1885      that it is placed correctly in the loop hierarchy.  */
1886   set_loop_copy (loop, loop_outer (loop));
1887 
1888   thread_block (header, false);
1889   set_loop_copy (loop, NULL);
1890   new_preheader = e->dest;
1891 
1892   /* Create the new latch block.  This is always necessary, as the latch
1893      must have only a single successor, but the original header had at
1894      least two successors.  */
1895   loop->latch = NULL;
1896   mfb_kj_edge = single_succ_edge (new_preheader);
1897   loop->header = mfb_kj_edge->dest;
1898   latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1899   loop->header = latch->dest;
1900   loop->latch = latch->src;
1901   return true;
1902 
1903 fail:
1904   /* We failed to thread anything.  Cancel the requests.  */
1905   FOR_EACH_EDGE (e, ei, header->preds)
1906     {
1907       vec<jump_thread_edge *> *path = THREAD_PATH (e);
1908 
1909       if (path)
1910 	{
1911 	  delete_jump_thread_path (path);
1912 	  e->aux = NULL;
1913 	}
1914     }
1915   return false;
1916 }
1917 
1918 /* E1 and E2 are edges into the same basic block.  Return TRUE if the
1919    PHI arguments associated with those edges are equal or there are no
1920    PHI arguments, otherwise return FALSE.  */
1921 
1922 static bool
1923 phi_args_equal_on_edges (edge e1, edge e2)
1924 {
1925   gphi_iterator gsi;
1926   int indx1 = e1->dest_idx;
1927   int indx2 = e2->dest_idx;
1928 
1929   for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1930     {
1931       gphi *phi = gsi.phi ();
1932 
1933       if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1934 			    gimple_phi_arg_def (phi, indx2), 0))
1935 	return false;
1936     }
1937   return true;
1938 }
1939 
1940 /* Walk through the registered jump threads and convert them into a
1941    form convenient for this pass.
1942 
1943    Any block which has incoming edges threaded to outgoing edges
1944    will have its entry in THREADED_BLOCK set.
1945 
1946    Any threaded edge will have its new outgoing edge stored in the
1947    original edge's AUX field.
1948 
1949    This form avoids the need to walk all the edges in the CFG to
1950    discover blocks which need processing and avoids unnecessary
1951    hash table lookups to map from threaded edge to new target.  */
1952 
1953 static void
1954 mark_threaded_blocks (bitmap threaded_blocks)
1955 {
1956   unsigned int i;
1957   bitmap_iterator bi;
1958   bitmap tmp = BITMAP_ALLOC (NULL);
1959   basic_block bb;
1960   edge e;
1961   edge_iterator ei;
1962 
1963   /* It is possible to have jump threads in which one is a subpath
1964      of the other.  ie, (A, B), (B, C), (C, D) where B is a joiner
1965      block and (B, C), (C, D) where no joiner block exists.
1966 
1967      When this occurs ignore the jump thread request with the joiner
1968      block.  It's totally subsumed by the simpler jump thread request.
1969 
1970      This results in less block copying, simpler CFGs.  More importantly,
1971      when we duplicate the joiner block, B, in this case we will create
1972      a new threading opportunity that we wouldn't be able to optimize
1973      until the next jump threading iteration.
1974 
1975      So first convert the jump thread requests which do not require a
1976      joiner block.  */
1977   for (i = 0; i < paths.length (); i++)
1978     {
1979       vec<jump_thread_edge *> *path = paths[i];
1980 
1981       if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1982 	{
1983 	  edge e = (*path)[0]->e;
1984 	  e->aux = (void *)path;
1985 	  bitmap_set_bit (tmp, e->dest->index);
1986 	}
1987     }
1988 
1989   /* Now iterate again, converting cases where we want to thread
1990      through a joiner block, but only if no other edge on the path
1991      already has a jump thread attached to it.  We do this in two passes,
1992      to avoid situations where the order in the paths vec can hide overlapping
1993      threads (the path is recorded on the incoming edge, so we would miss
1994      cases where the second path starts at a downstream edge on the same
1995      path).  First record all joiner paths, deleting any in the unexpected
1996      case where there is already a path for that incoming edge.  */
1997   for (i = 0; i < paths.length ();)
1998     {
1999       vec<jump_thread_edge *> *path = paths[i];
2000 
2001       if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
2002 	{
2003 	  /* Attach the path to the starting edge if none is yet recorded.  */
2004 	  if ((*path)[0]->e->aux == NULL)
2005 	    {
2006 	      (*path)[0]->e->aux = path;
2007 	      i++;
2008 	    }
2009 	  else
2010 	    {
2011 	      paths.unordered_remove (i);
2012 	      if (dump_file && (dump_flags & TDF_DETAILS))
2013 		dump_jump_thread_path (dump_file, *path, false);
2014 	      delete_jump_thread_path (path);
2015 	    }
2016 	}
2017       else
2018 	{
2019 	  i++;
2020 	}
2021     }
2022 
2023   /* Second, look for paths that have any other jump thread attached to
2024      them, and either finish converting them or cancel them.  */
2025   for (i = 0; i < paths.length ();)
2026     {
2027       vec<jump_thread_edge *> *path = paths[i];
2028       edge e = (*path)[0]->e;
2029 
2030       if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && e->aux == path)
2031 	{
2032 	  unsigned int j;
2033 	  for (j = 1; j < path->length (); j++)
2034 	    if ((*path)[j]->e->aux != NULL)
2035 	      break;
2036 
2037 	  /* If we iterated through the entire path without exiting the loop,
2038 	     then we are good to go, record it.  */
2039 	  if (j == path->length ())
2040 	    {
2041 	      bitmap_set_bit (tmp, e->dest->index);
2042 	      i++;
2043 	    }
2044 	  else
2045 	    {
2046 	      e->aux = NULL;
2047 	      paths.unordered_remove (i);
2048 	      if (dump_file && (dump_flags & TDF_DETAILS))
2049 		dump_jump_thread_path (dump_file, *path, false);
2050 	      delete_jump_thread_path (path);
2051 	    }
2052 	}
2053       else
2054 	{
2055 	  i++;
2056 	}
2057     }
2058 
2059   /* If optimizing for size, only thread through block if we don't have
2060      to duplicate it or it's an otherwise empty redirection block.  */
2061   if (optimize_function_for_size_p (cfun))
2062     {
2063       EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2064 	{
2065 	  bb = BASIC_BLOCK_FOR_FN (cfun, i);
2066 	  if (EDGE_COUNT (bb->preds) > 1
2067 	      && !redirection_block_p (bb))
2068 	    {
2069 	      FOR_EACH_EDGE (e, ei, bb->preds)
2070 		{
2071 		  if (e->aux)
2072 		    {
2073 		      vec<jump_thread_edge *> *path = THREAD_PATH (e);
2074 		      delete_jump_thread_path (path);
2075 		      e->aux = NULL;
2076 		    }
2077 		}
2078 	    }
2079 	  else
2080 	    bitmap_set_bit (threaded_blocks, i);
2081 	}
2082     }
2083   else
2084     bitmap_copy (threaded_blocks, tmp);
2085 
2086   /* Look for jump threading paths which cross multiple loop headers.
2087 
2088      The code to thread through loop headers will change the CFG in ways
2089      that break assumptions made by the loop optimization code.
2090 
2091      We don't want to blindly cancel the requests.  We can instead do better
2092      by trimming off the end of the jump thread path.  */
2093   EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2094     {
2095       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2096       FOR_EACH_EDGE (e, ei, bb->preds)
2097 	{
2098 	  if (e->aux)
2099 	    {
2100 	      vec<jump_thread_edge *> *path = THREAD_PATH (e);
2101 
2102 	      for (unsigned int i = 0, crossed_headers = 0;
2103 		   i < path->length ();
2104 		   i++)
2105 		{
2106 		  basic_block dest = (*path)[i]->e->dest;
2107 		  crossed_headers += (dest == dest->loop_father->header);
2108 		  if (crossed_headers > 1)
2109 		    {
2110 		      /* Trim from entry I onwards.  */
2111 		      for (unsigned int j = i; j < path->length (); j++)
2112 			delete (*path)[j];
2113 		      path->truncate (i);
2114 
2115 		      /* Now that we've truncated the path, make sure
2116 			 what's left is still valid.   We need at least
2117 			 two edges on the path and the last edge can not
2118 			 be a joiner.  This should never happen, but let's
2119 			 be safe.  */
2120 		      if (path->length () < 2
2121 			  || (path->last ()->type
2122 			      == EDGE_COPY_SRC_JOINER_BLOCK))
2123 			{
2124 			  delete_jump_thread_path (path);
2125 			  e->aux = NULL;
2126 			}
2127 		      break;
2128 		    }
2129 		}
2130 	    }
2131 	}
2132     }
2133 
2134   /* If we have a joiner block (J) which has two successors S1 and S2 and
2135      we are threading though S1 and the final destination of the thread
2136      is S2, then we must verify that any PHI nodes in S2 have the same
2137      PHI arguments for the edge J->S2 and J->S1->...->S2.
2138 
2139      We used to detect this prior to registering the jump thread, but
2140      that prohibits propagation of edge equivalences into non-dominated
2141      PHI nodes as the equivalency test might occur before propagation.
2142 
2143      This must also occur after we truncate any jump threading paths
2144      as this scenario may only show up after truncation.
2145 
2146      This works for now, but will need improvement as part of the FSA
2147      optimization.
2148 
2149      Note since we've moved the thread request data to the edges,
2150      we have to iterate on those rather than the threaded_edges vector.  */
2151   EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2152     {
2153       bb = BASIC_BLOCK_FOR_FN (cfun, i);
2154       FOR_EACH_EDGE (e, ei, bb->preds)
2155 	{
2156 	  if (e->aux)
2157 	    {
2158 	      vec<jump_thread_edge *> *path = THREAD_PATH (e);
2159 	      bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
2160 
2161 	      if (have_joiner)
2162 		{
2163 		  basic_block joiner = e->dest;
2164 		  edge final_edge = path->last ()->e;
2165 		  basic_block final_dest = final_edge->dest;
2166 		  edge e2 = find_edge (joiner, final_dest);
2167 
2168 		  if (e2 && !phi_args_equal_on_edges (e2, final_edge))
2169 		    {
2170 		      delete_jump_thread_path (path);
2171 		      e->aux = NULL;
2172 		    }
2173 		}
2174 	    }
2175 	}
2176     }
2177 
2178   BITMAP_FREE (tmp);
2179 }
2180 
2181 
2182 /* Verify that the REGION is a valid jump thread.  A jump thread is a special
2183    case of SEME Single Entry Multiple Exits region in which all nodes in the
2184    REGION have exactly one incoming edge.  The only exception is the first block
2185    that may not have been connected to the rest of the cfg yet.  */
2186 
2187 DEBUG_FUNCTION void
2188 verify_jump_thread (basic_block *region, unsigned n_region)
2189 {
2190   for (unsigned i = 0; i < n_region; i++)
2191     gcc_assert (EDGE_COUNT (region[i]->preds) <= 1);
2192 }
2193 
2194 /* Return true when BB is one of the first N items in BBS.  */
2195 
2196 static inline bool
2197 bb_in_bbs (basic_block bb, basic_block *bbs, int n)
2198 {
2199   for (int i = 0; i < n; i++)
2200     if (bb == bbs[i])
2201       return true;
2202 
2203   return false;
2204 }
2205 
2206 /* Duplicates a jump-thread path of N_REGION basic blocks.
2207    The ENTRY edge is redirected to the duplicate of the region.
2208 
2209    Remove the last conditional statement in the last basic block in the REGION,
2210    and create a single fallthru edge pointing to the same destination as the
2211    EXIT edge.
2212 
2213    The new basic blocks are stored to REGION_COPY in the same order as they had
2214    in REGION, provided that REGION_COPY is not NULL.
2215 
2216    Returns false if it is unable to copy the region, true otherwise.  */
2217 
2218 static bool
2219 duplicate_thread_path (edge entry, edge exit,
2220 		       basic_block *region, unsigned n_region,
2221 		       basic_block *region_copy)
2222 {
2223   unsigned i;
2224   bool free_region_copy = false;
2225   struct loop *loop = entry->dest->loop_father;
2226   edge exit_copy;
2227   edge redirected;
2228   int total_freq = 0, entry_freq = 0;
2229   gcov_type total_count = 0, entry_count = 0;
2230 
2231   if (!can_copy_bbs_p (region, n_region))
2232     return false;
2233 
2234   /* Some sanity checking.  Note that we do not check for all possible
2235      missuses of the functions.  I.e. if you ask to copy something weird,
2236      it will work, but the state of structures probably will not be
2237      correct.  */
2238   for (i = 0; i < n_region; i++)
2239     {
2240       /* We do not handle subloops, i.e. all the blocks must belong to the
2241 	 same loop.  */
2242       if (region[i]->loop_father != loop)
2243 	return false;
2244     }
2245 
2246   initialize_original_copy_tables ();
2247 
2248   set_loop_copy (loop, loop);
2249 
2250   if (!region_copy)
2251     {
2252       region_copy = XNEWVEC (basic_block, n_region);
2253       free_region_copy = true;
2254     }
2255 
2256   if (entry->dest->count)
2257     {
2258       total_count = entry->dest->count;
2259       entry_count = entry->count;
2260       /* Fix up corner cases, to avoid division by zero or creation of negative
2261 	 frequencies.  */
2262       if (entry_count > total_count)
2263 	entry_count = total_count;
2264     }
2265   else
2266     {
2267       total_freq = entry->dest->frequency;
2268       entry_freq = EDGE_FREQUENCY (entry);
2269       /* Fix up corner cases, to avoid division by zero or creation of negative
2270 	 frequencies.  */
2271       if (total_freq == 0)
2272 	total_freq = 1;
2273       else if (entry_freq > total_freq)
2274 	entry_freq = total_freq;
2275     }
2276 
2277   copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
2278 	    split_edge_bb_loc (entry), false);
2279 
2280   /* Fix up: copy_bbs redirects all edges pointing to copied blocks.  The
2281      following code ensures that all the edges exiting the jump-thread path are
2282      redirected back to the original code: these edges are exceptions
2283      invalidating the property that is propagated by executing all the blocks of
2284      the jump-thread path in order.  */
2285 
2286   for (i = 0; i < n_region; i++)
2287     {
2288       edge e;
2289       edge_iterator ei;
2290       basic_block bb = region_copy[i];
2291 
2292       if (single_succ_p (bb))
2293 	{
2294 	  /* Make sure the successor is the next node in the path.  */
2295 	  gcc_assert (i + 1 == n_region
2296 		      || region_copy[i + 1] == single_succ_edge (bb)->dest);
2297 	  continue;
2298 	}
2299 
2300       /* Special case the last block on the path: make sure that it does not
2301 	 jump back on the copied path.  */
2302       if (i + 1 == n_region)
2303 	{
2304 	  FOR_EACH_EDGE (e, ei, bb->succs)
2305 	    if (bb_in_bbs (e->dest, region_copy, n_region - 1))
2306 	      {
2307 		basic_block orig = get_bb_original (e->dest);
2308 		if (orig)
2309 		  redirect_edge_and_branch_force (e, orig);
2310 	      }
2311 	  continue;
2312 	}
2313 
2314       /* Redirect all other edges jumping to non-adjacent blocks back to the
2315 	 original code.  */
2316       FOR_EACH_EDGE (e, ei, bb->succs)
2317 	if (region_copy[i + 1] != e->dest)
2318 	  {
2319 	    basic_block orig = get_bb_original (e->dest);
2320 	    if (orig)
2321 	      redirect_edge_and_branch_force (e, orig);
2322 	  }
2323     }
2324 
2325   if (total_count)
2326     {
2327       scale_bbs_frequencies_gcov_type (region, n_region,
2328 				       total_count - entry_count,
2329 				       total_count);
2330       scale_bbs_frequencies_gcov_type (region_copy, n_region, entry_count,
2331 				       total_count);
2332     }
2333   else
2334     {
2335       scale_bbs_frequencies_int (region, n_region, total_freq - entry_freq,
2336 				 total_freq);
2337       scale_bbs_frequencies_int (region_copy, n_region, entry_freq, total_freq);
2338     }
2339 
2340   if (flag_checking)
2341     verify_jump_thread (region_copy, n_region);
2342 
2343   /* Remove the last branch in the jump thread path.  */
2344   remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
2345 
2346   /* And fixup the flags on the single remaining edge.  */
2347   edge fix_e = find_edge (region_copy[n_region - 1], exit->dest);
2348   fix_e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
2349   fix_e->flags |= EDGE_FALLTHRU;
2350 
2351   edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
2352 
2353   if (e) {
2354     rescan_loop_exit (e, true, false);
2355     e->probability = REG_BR_PROB_BASE;
2356     e->count = region_copy[n_region - 1]->count;
2357   }
2358 
2359   /* Redirect the entry and add the phi node arguments.  */
2360   if (entry->dest == loop->header)
2361     mark_loop_for_removal (loop);
2362   redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
2363   gcc_assert (redirected != NULL);
2364   flush_pending_stmts (entry);
2365 
2366   /* Add the other PHI node arguments.  */
2367   add_phi_args_after_copy (region_copy, n_region, NULL);
2368 
2369   if (free_region_copy)
2370     free (region_copy);
2371 
2372   free_original_copy_tables ();
2373   return true;
2374 }
2375 
2376 /* Return true when PATH is a valid jump-thread path.  */
2377 
2378 static bool
2379 valid_jump_thread_path (vec<jump_thread_edge *> *path)
2380 {
2381   unsigned len = path->length ();
2382 
2383   /* Check that the path is connected.  */
2384   for (unsigned int j = 0; j < len - 1; j++)
2385     {
2386       edge e = (*path)[j]->e;
2387       if (e->dest != (*path)[j+1]->e->src)
2388 	return false;
2389     }
2390   return true;
2391 }
2392 
2393 /* Remove any queued jump threads that include edge E.
2394 
2395    We don't actually remove them here, just record the edges into ax
2396    hash table.  That way we can do the search once per iteration of
2397    DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR.  */
2398 
2399 void
2400 remove_jump_threads_including (edge_def *e)
2401 {
2402   if (!paths.exists ())
2403     return;
2404 
2405   if (!removed_edges)
2406     removed_edges = new hash_table<struct removed_edges> (17);
2407 
2408   edge *slot = removed_edges->find_slot (e, INSERT);
2409   *slot = e;
2410 }
2411 
2412 /* Walk through all blocks and thread incoming edges to the appropriate
2413    outgoing edge for each edge pair recorded in THREADED_EDGES.
2414 
2415    It is the caller's responsibility to fix the dominance information
2416    and rewrite duplicated SSA_NAMEs back into SSA form.
2417 
2418    If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
2419    loop headers if it does not simplify the loop.
2420 
2421    Returns true if one or more edges were threaded, false otherwise.  */
2422 
2423 bool
2424 thread_through_all_blocks (bool may_peel_loop_headers)
2425 {
2426   bool retval = false;
2427   unsigned int i;
2428   bitmap_iterator bi;
2429   bitmap threaded_blocks;
2430   struct loop *loop;
2431 
2432   if (!paths.exists ())
2433     {
2434       retval = false;
2435       goto out;
2436     }
2437 
2438   threaded_blocks = BITMAP_ALLOC (NULL);
2439   memset (&thread_stats, 0, sizeof (thread_stats));
2440 
2441   /* Remove any paths that referenced removed edges.  */
2442   if (removed_edges)
2443     for (i = 0; i < paths.length (); )
2444       {
2445 	unsigned int j;
2446 	vec<jump_thread_edge *> *path = paths[i];
2447 
2448 	for (j = 0; j < path->length (); j++)
2449 	  {
2450 	    edge e = (*path)[j]->e;
2451 	    if (removed_edges->find_slot (e, NO_INSERT))
2452 	      break;
2453 	  }
2454 
2455 	if (j != path->length ())
2456 	  {
2457 	    delete_jump_thread_path (path);
2458 	    paths.unordered_remove (i);
2459 	    continue;
2460 	  }
2461 	i++;
2462       }
2463 
2464   /* Jump-thread all FSM threads before other jump-threads.  */
2465   for (i = 0; i < paths.length ();)
2466     {
2467       vec<jump_thread_edge *> *path = paths[i];
2468       edge entry = (*path)[0]->e;
2469 
2470       /* Only code-generate FSM jump-threads in this loop.  */
2471       if ((*path)[0]->type != EDGE_FSM_THREAD)
2472 	{
2473 	  i++;
2474 	  continue;
2475 	}
2476 
2477       /* Do not jump-thread twice from the same block.  */
2478       if (bitmap_bit_p (threaded_blocks, entry->src->index)
2479 	  /* We may not want to realize this jump thread path
2480 	     for various reasons.  So check it first.  */
2481 	  || !valid_jump_thread_path (path))
2482 	{
2483 	  /* Remove invalid FSM jump-thread paths.  */
2484 	  delete_jump_thread_path (path);
2485 	  paths.unordered_remove (i);
2486 	  continue;
2487 	}
2488 
2489       unsigned len = path->length ();
2490       edge exit = (*path)[len - 1]->e;
2491       basic_block *region = XNEWVEC (basic_block, len - 1);
2492 
2493       for (unsigned int j = 0; j < len - 1; j++)
2494 	region[j] = (*path)[j]->e->dest;
2495 
2496       if (duplicate_thread_path (entry, exit, region, len - 1, NULL))
2497 	{
2498 	  /* We do not update dominance info.  */
2499 	  free_dominance_info (CDI_DOMINATORS);
2500 	  bitmap_set_bit (threaded_blocks, entry->src->index);
2501 	  retval = true;
2502 	  thread_stats.num_threaded_edges++;
2503 	}
2504 
2505       delete_jump_thread_path (path);
2506       paths.unordered_remove (i);
2507       free (region);
2508     }
2509 
2510   /* Remove from PATHS all the jump-threads starting with an edge already
2511      jump-threaded.  */
2512   for (i = 0; i < paths.length ();)
2513     {
2514       vec<jump_thread_edge *> *path = paths[i];
2515       edge entry = (*path)[0]->e;
2516 
2517       /* Do not jump-thread twice from the same block.  */
2518       if (bitmap_bit_p (threaded_blocks, entry->src->index))
2519 	{
2520 	  delete_jump_thread_path (path);
2521 	  paths.unordered_remove (i);
2522 	}
2523       else
2524 	i++;
2525     }
2526 
2527   bitmap_clear (threaded_blocks);
2528 
2529   mark_threaded_blocks (threaded_blocks);
2530 
2531   initialize_original_copy_tables ();
2532 
2533   /* First perform the threading requests that do not affect
2534      loop structure.  */
2535   EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
2536     {
2537       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2538 
2539       if (EDGE_COUNT (bb->preds) > 0)
2540 	retval |= thread_block (bb, true);
2541     }
2542 
2543   /* Then perform the threading through loop headers.  We start with the
2544      innermost loop, so that the changes in cfg we perform won't affect
2545      further threading.  */
2546   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2547     {
2548       if (!loop->header
2549 	  || !bitmap_bit_p (threaded_blocks, loop->header->index))
2550 	continue;
2551 
2552       retval |= thread_through_loop_header (loop, may_peel_loop_headers);
2553     }
2554 
2555   /* Any jump threading paths that are still attached to edges at this
2556      point must be one of two cases.
2557 
2558      First, we could have a jump threading path which went from outside
2559      a loop to inside a loop that was ignored because a prior jump thread
2560      across a backedge was realized (which indirectly causes the loop
2561      above to ignore the latter thread).  We can detect these because the
2562      loop structures will be different and we do not currently try to
2563      optimize this case.
2564 
2565      Second, we could be threading across a backedge to a point within the
2566      same loop.  This occurrs for the FSA/FSM optimization and we would
2567      like to optimize it.  However, we have to be very careful as this
2568      may completely scramble the loop structures, with the result being
2569      irreducible loops causing us to throw away our loop structure.
2570 
2571      As a compromise for the latter case, if the thread path ends in
2572      a block where the last statement is a multiway branch, then go
2573      ahead and thread it, else ignore it.  */
2574   basic_block bb;
2575   edge e;
2576   FOR_EACH_BB_FN (bb, cfun)
2577     {
2578       /* If we do end up threading here, we can remove elements from
2579 	 BB->preds.  Thus we can not use the FOR_EACH_EDGE iterator.  */
2580       for (edge_iterator ei = ei_start (bb->preds);
2581 	   (e = ei_safe_edge (ei));)
2582 	if (e->aux)
2583 	  {
2584 	    vec<jump_thread_edge *> *path = THREAD_PATH (e);
2585 
2586 	    /* Case 1, threading from outside to inside the loop
2587 	       after we'd already threaded through the header.  */
2588 	    if ((*path)[0]->e->dest->loop_father
2589 		!= path->last ()->e->src->loop_father)
2590 	      {
2591 		delete_jump_thread_path (path);
2592 		e->aux = NULL;
2593 		ei_next (&ei);
2594 	      }
2595 	    else
2596 	      {
2597 		delete_jump_thread_path (path);
2598 		e->aux = NULL;
2599 		ei_next (&ei);
2600 	      }
2601  	  }
2602 	else
2603 	  ei_next (&ei);
2604     }
2605 
2606   statistics_counter_event (cfun, "Jumps threaded",
2607 			    thread_stats.num_threaded_edges);
2608 
2609   free_original_copy_tables ();
2610 
2611   BITMAP_FREE (threaded_blocks);
2612   threaded_blocks = NULL;
2613   paths.release ();
2614 
2615   if (retval)
2616     loops_state_set (LOOPS_NEED_FIXUP);
2617 
2618  out:
2619   delete removed_edges;
2620   removed_edges = NULL;
2621   return retval;
2622 }
2623 
2624 /* Delete the jump threading path PATH.  We have to explcitly delete
2625    each entry in the vector, then the container.  */
2626 
2627 void
2628 delete_jump_thread_path (vec<jump_thread_edge *> *path)
2629 {
2630   for (unsigned int i = 0; i < path->length (); i++)
2631     delete (*path)[i];
2632   path->release();
2633   delete path;
2634 }
2635 
2636 /* Register a jump threading opportunity.  We queue up all the jump
2637    threading opportunities discovered by a pass and update the CFG
2638    and SSA form all at once.
2639 
2640    E is the edge we can thread, E2 is the new target edge, i.e., we
2641    are effectively recording that E->dest can be changed to E2->dest
2642    after fixing the SSA graph.  */
2643 
2644 void
2645 register_jump_thread (vec<jump_thread_edge *> *path)
2646 {
2647   if (!dbg_cnt (registered_jump_thread))
2648     {
2649       delete_jump_thread_path (path);
2650       return;
2651     }
2652 
2653   /* First make sure there are no NULL outgoing edges on the jump threading
2654      path.  That can happen for jumping to a constant address.  */
2655   for (unsigned int i = 0; i < path->length (); i++)
2656     {
2657       if ((*path)[i]->e == NULL)
2658 	{
2659 	  if (dump_file && (dump_flags & TDF_DETAILS))
2660 	    {
2661 	      fprintf (dump_file,
2662 		       "Found NULL edge in jump threading path.  Cancelling jump thread:\n");
2663 	      dump_jump_thread_path (dump_file, *path, false);
2664 	    }
2665 
2666 	  delete_jump_thread_path (path);
2667 	  return;
2668 	}
2669 
2670       /* Only the FSM threader is allowed to thread across
2671 	 backedges in the CFG.  */
2672       if (flag_checking
2673 	  && (*path)[0]->type != EDGE_FSM_THREAD)
2674 	gcc_assert (((*path)[i]->e->flags & EDGE_DFS_BACK) == 0);
2675     }
2676 
2677   if (dump_file && (dump_flags & TDF_DETAILS))
2678     dump_jump_thread_path (dump_file, *path, true);
2679 
2680   if (!paths.exists ())
2681     paths.create (5);
2682 
2683   paths.safe_push (path);
2684 }
2685