xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-threadupdate.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2    Copyright (C) 2004-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "flags.h"
26 #include "tm_p.h"
27 #include "basic-block.h"
28 #include "function.h"
29 #include "tree-flow.h"
30 #include "dumpfile.h"
31 #include "cfgloop.h"
32 #include "hash-table.h"
33 
34 /* Given a block B, update the CFG and SSA graph to reflect redirecting
35    one or more in-edges to B to instead reach the destination of an
36    out-edge from B while preserving any side effects in B.
37 
38    i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
39    side effects of executing B.
40 
41      1. Make a copy of B (including its outgoing edges and statements).  Call
42 	the copy B'.  Note B' has no incoming edges or PHIs at this time.
43 
44      2. Remove the control statement at the end of B' and all outgoing edges
45 	except B'->C.
46 
47      3. Add a new argument to each PHI in C with the same value as the existing
48 	argument associated with edge B->C.  Associate the new PHI arguments
49 	with the edge B'->C.
50 
51      4. For each PHI in B, find or create a PHI in B' with an identical
52 	PHI_RESULT.  Add an argument to the PHI in B' which has the same
53 	value as the PHI in B associated with the edge A->B.  Associate
54 	the new argument in the PHI in B' with the edge A->B.
55 
56      5. Change the edge A->B to A->B'.
57 
58 	5a. This automatically deletes any PHI arguments associated with the
59 	    edge A->B in B.
60 
61 	5b. This automatically associates each new argument added in step 4
62 	    with the edge A->B'.
63 
64      6. Repeat for other incoming edges into B.
65 
66      7. Put the duplicated resources in B and all the B' blocks into SSA form.
67 
68    Note that block duplication can be minimized by first collecting the
69    set of unique destination blocks that the incoming edges should
70    be threaded to.
71 
72    Block duplication can be further minimized by using B instead of
73    creating B' for one destination if all edges into B are going to be
74    threaded to a successor of B.  We had code to do this at one time, but
75    I'm not convinced it is correct with the changes to avoid mucking up
76    the loop structure (which may cancel threading requests, thus a block
77    which we thought was going to become unreachable may still be reachable).
78    This code was also going to get ugly with the introduction of the ability
79    for a single jump thread request to bypass multiple blocks.
80 
81    We further reduce the number of edges and statements we create by
82    not copying all the outgoing edges and the control statement in
83    step #1.  We instead create a template block without the outgoing
84    edges and duplicate the template.  */
85 
86 
87 /* Steps #5 and #6 of the above algorithm are best implemented by walking
88    all the incoming edges which thread to the same destination edge at
89    the same time.  That avoids lots of table lookups to get information
90    for the destination edge.
91 
92    To realize that implementation we create a list of incoming edges
93    which thread to the same outgoing edge.  Thus to implement steps
94    #5 and #6 we traverse our hash table of outgoing edge information.
95    For each entry we walk the list of incoming edges which thread to
96    the current outgoing edge.  */
97 
98 struct el
99 {
100   edge e;
101   struct el *next;
102 };
103 
104 /* Main data structure recording information regarding B's duplicate
105    blocks.  */
106 
107 /* We need to efficiently record the unique thread destinations of this
108    block and specific information associated with those destinations.  We
109    may have many incoming edges threaded to the same outgoing edge.  This
110    can be naturally implemented with a hash table.  */
111 
112 struct redirection_data : typed_free_remove<redirection_data>
113 {
114   /* A duplicate of B with the trailing control statement removed and which
115      targets a single successor of B.  */
116   basic_block dup_block;
117 
118   /* An outgoing edge from B.  DUP_BLOCK will have OUTGOING_EDGE->dest as
119      its single successor.  */
120   edge outgoing_edge;
121 
122   edge intermediate_edge;
123 
124   /* A list of incoming edges which we want to thread to
125      OUTGOING_EDGE->dest.  */
126   struct el *incoming_edges;
127 
128   /* hash_table support.  */
129   typedef redirection_data value_type;
130   typedef redirection_data compare_type;
131   static inline hashval_t hash (const value_type *);
132   static inline int equal (const value_type *, const compare_type *);
133 };
134 
135 inline hashval_t
136 redirection_data::hash (const value_type *p)
137 {
138   edge e = p->outgoing_edge;
139   return e->dest->index;
140 }
141 
142 inline int
143 redirection_data::equal (const value_type *p1, const compare_type *p2)
144 {
145   edge e1 = p1->outgoing_edge;
146   edge e2 = p2->outgoing_edge;
147   edge e3 = p1->intermediate_edge;
148   edge e4 = p2->intermediate_edge;
149   return e1 == e2 && e3 == e4;
150 }
151 
152 /* Data structure of information to pass to hash table traversal routines.  */
153 struct ssa_local_info_t
154 {
155   /* The current block we are working on.  */
156   basic_block bb;
157 
158   /* A template copy of BB with no outgoing edges or control statement that
159      we use for creating copies.  */
160   basic_block template_block;
161 
162   /* TRUE if we thread one or more jumps, FALSE otherwise.  */
163   bool jumps_threaded;
164 };
165 
166 /* Passes which use the jump threading code register jump threading
167    opportunities as they are discovered.  We keep the registered
168    jump threading opportunities in this vector as edge pairs
169    (original_edge, target_edge).  */
170 static vec<edge> threaded_edges;
171 
172 /* When we start updating the CFG for threading, data necessary for jump
173    threading is attached to the AUX field for the incoming edge.  Use these
174    macros to access the underlying structure attached to the AUX field.  */
175 #define THREAD_TARGET(E) ((edge *)(E)->aux)[0]
176 #define THREAD_TARGET2(E) ((edge *)(E)->aux)[1]
177 
178 /* Jump threading statistics.  */
179 
180 struct thread_stats_d
181 {
182   unsigned long num_threaded_edges;
183 };
184 
185 struct thread_stats_d thread_stats;
186 
187 
188 /* Remove the last statement in block BB if it is a control statement
189    Also remove all outgoing edges except the edge which reaches DEST_BB.
190    If DEST_BB is NULL, then remove all outgoing edges.  */
191 
192 static void
193 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
194 {
195   gimple_stmt_iterator gsi;
196   edge e;
197   edge_iterator ei;
198 
199   gsi = gsi_last_bb (bb);
200 
201   /* If the duplicate ends with a control statement, then remove it.
202 
203      Note that if we are duplicating the template block rather than the
204      original basic block, then the duplicate might not have any real
205      statements in it.  */
206   if (!gsi_end_p (gsi)
207       && gsi_stmt (gsi)
208       && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
209 	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
210 	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
211     gsi_remove (&gsi, true);
212 
213   for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
214     {
215       if (e->dest != dest_bb)
216 	remove_edge (e);
217       else
218 	ei_next (&ei);
219     }
220 }
221 
222 /* Create a duplicate of BB.  Record the duplicate block in RD.  */
223 
224 static void
225 create_block_for_threading (basic_block bb, struct redirection_data *rd)
226 {
227   edge_iterator ei;
228   edge e;
229 
230   /* We can use the generic block duplication code and simply remove
231      the stuff we do not need.  */
232   rd->dup_block = duplicate_block (bb, NULL, NULL);
233 
234   FOR_EACH_EDGE (e, ei, rd->dup_block->succs)
235     e->aux = NULL;
236 
237   /* Zero out the profile, since the block is unreachable for now.  */
238   rd->dup_block->frequency = 0;
239   rd->dup_block->count = 0;
240 }
241 
242 /* Main data structure to hold information for duplicates of BB.  */
243 
244 static hash_table <redirection_data> redirection_data;
245 
246 /* Given an outgoing edge E lookup and return its entry in our hash table.
247 
248    If INSERT is true, then we insert the entry into the hash table if
249    it is not already present.  INCOMING_EDGE is added to the list of incoming
250    edges associated with E in the hash table.  */
251 
252 static struct redirection_data *
253 lookup_redirection_data (edge e, enum insert_option insert)
254 {
255   struct redirection_data **slot;
256   struct redirection_data *elt;
257 
258  /* Build a hash table element so we can see if E is already
259      in the table.  */
260   elt = XNEW (struct redirection_data);
261   elt->intermediate_edge = THREAD_TARGET2 (e) ? THREAD_TARGET (e) : NULL;
262   elt->outgoing_edge = THREAD_TARGET2 (e) ? THREAD_TARGET2 (e)
263 					  : THREAD_TARGET (e);
264   elt->dup_block = NULL;
265   elt->incoming_edges = NULL;
266 
267   slot = redirection_data.find_slot (elt, insert);
268 
269   /* This will only happen if INSERT is false and the entry is not
270      in the hash table.  */
271   if (slot == NULL)
272     {
273       free (elt);
274       return NULL;
275     }
276 
277   /* This will only happen if E was not in the hash table and
278      INSERT is true.  */
279   if (*slot == NULL)
280     {
281       *slot = elt;
282       elt->incoming_edges = XNEW (struct el);
283       elt->incoming_edges->e = e;
284       elt->incoming_edges->next = NULL;
285       return elt;
286     }
287   /* E was in the hash table.  */
288   else
289     {
290       /* Free ELT as we do not need it anymore, we will extract the
291 	 relevant entry from the hash table itself.  */
292       free (elt);
293 
294       /* Get the entry stored in the hash table.  */
295       elt = *slot;
296 
297       /* If insertion was requested, then we need to add INCOMING_EDGE
298 	 to the list of incoming edges associated with E.  */
299       if (insert)
300 	{
301           struct el *el = XNEW (struct el);
302 	  el->next = elt->incoming_edges;
303 	  el->e = e;
304 	  elt->incoming_edges = el;
305 	}
306 
307       return elt;
308     }
309 }
310 
311 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.  */
312 
313 static void
314 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
315 {
316   gimple_stmt_iterator gsi;
317   int src_indx = src_e->dest_idx;
318 
319   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
320     {
321       gimple phi = gsi_stmt (gsi);
322       source_location locus = gimple_phi_arg_location (phi, src_indx);
323       add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
324     }
325 }
326 
327 /* We have recently made a copy of ORIG_BB, including its outgoing
328    edges.  The copy is NEW_BB.  Every PHI node in every direct successor of
329    ORIG_BB has a new argument associated with edge from NEW_BB to the
330    successor.  Initialize the PHI argument so that it is equal to the PHI
331    argument associated with the edge from ORIG_BB to the successor.  */
332 
333 static void
334 update_destination_phis (basic_block orig_bb, basic_block new_bb)
335 {
336   edge_iterator ei;
337   edge e;
338 
339   FOR_EACH_EDGE (e, ei, orig_bb->succs)
340     {
341       edge e2 = find_edge (new_bb, e->dest);
342       copy_phi_args (e->dest, e, e2);
343     }
344 }
345 
346 /* Given a duplicate block and its single destination (both stored
347    in RD).  Create an edge between the duplicate and its single
348    destination.
349 
350    Add an additional argument to any PHI nodes at the single
351    destination.  */
352 
353 static void
354 create_edge_and_update_destination_phis (struct redirection_data *rd,
355 					 basic_block bb)
356 {
357   edge e = make_edge (bb, rd->outgoing_edge->dest, EDGE_FALLTHRU);
358 
359   rescan_loop_exit (e, true, false);
360   e->probability = REG_BR_PROB_BASE;
361   e->count = bb->count;
362 
363   if (rd->outgoing_edge->aux)
364     {
365       e->aux = XNEWVEC (edge, 2);
366       THREAD_TARGET(e) = THREAD_TARGET (rd->outgoing_edge);
367       THREAD_TARGET2(e) = THREAD_TARGET2 (rd->outgoing_edge);
368     }
369   else
370     {
371       e->aux = NULL;
372     }
373 
374   /* If there are any PHI nodes at the destination of the outgoing edge
375      from the duplicate block, then we will need to add a new argument
376      to them.  The argument should have the same value as the argument
377      associated with the outgoing edge stored in RD.  */
378   copy_phi_args (e->dest, rd->outgoing_edge, e);
379 }
380 
381 /* Wire up the outgoing edges from the duplicate block and
382    update any PHIs as needed.  */
383 void
384 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
385 			       ssa_local_info_t *local_info)
386 {
387   /* If we were threading through an joiner block, then we want
388      to keep its control statement and redirect an outgoing edge.
389      Else we want to remove the control statement & edges, then create
390      a new outgoing edge.  In both cases we may need to update PHIs.  */
391   if (THREAD_TARGET2 (rd->incoming_edges->e))
392     {
393       edge victim;
394       edge e2;
395       edge e = rd->incoming_edges->e;
396 
397       /* This updates the PHIs at the destination of the duplicate
398 	 block.  */
399       update_destination_phis (local_info->bb, rd->dup_block);
400 
401       /* Find the edge from the duplicate block to the block we're
402 	 threading through.  That's the edge we want to redirect.  */
403       victim = find_edge (rd->dup_block, THREAD_TARGET (e)->dest);
404       e2 = redirect_edge_and_branch (victim, THREAD_TARGET2 (e)->dest);
405 
406       /* If we redirected the edge, then we need to copy PHI arguments
407 	 at the target.  If the edge already existed (e2 != victim case),
408 	 then the PHIs in the target already have the correct arguments.  */
409       if (e2 == victim)
410 	copy_phi_args (e2->dest, THREAD_TARGET2 (e), e2);
411     }
412   else
413     {
414       remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
415       create_edge_and_update_destination_phis (rd, rd->dup_block);
416     }
417 }
418 /* Hash table traversal callback routine to create duplicate blocks.  */
419 
420 int
421 ssa_create_duplicates (struct redirection_data **slot,
422 		       ssa_local_info_t *local_info)
423 {
424   struct redirection_data *rd = *slot;
425 
426   /* Create a template block if we have not done so already.  Otherwise
427      use the template to create a new block.  */
428   if (local_info->template_block == NULL)
429     {
430       create_block_for_threading (local_info->bb, rd);
431       local_info->template_block = rd->dup_block;
432 
433       /* We do not create any outgoing edges for the template.  We will
434 	 take care of that in a later traversal.  That way we do not
435 	 create edges that are going to just be deleted.  */
436     }
437   else
438     {
439       create_block_for_threading (local_info->template_block, rd);
440 
441       /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
442 	 block.   */
443       ssa_fix_duplicate_block_edges (rd, local_info);
444     }
445 
446   /* Keep walking the hash table.  */
447   return 1;
448 }
449 
450 /* We did not create any outgoing edges for the template block during
451    block creation.  This hash table traversal callback creates the
452    outgoing edge for the template block.  */
453 
454 inline int
455 ssa_fixup_template_block (struct redirection_data **slot,
456 			  ssa_local_info_t *local_info)
457 {
458   struct redirection_data *rd = *slot;
459 
460   /* If this is the template block halt the traversal after updating
461      it appropriately.
462 
463      If we were threading through an joiner block, then we want
464      to keep its control statement and redirect an outgoing edge.
465      Else we want to remove the control statement & edges, then create
466      a new outgoing edge.  In both cases we may need to update PHIs.  */
467   if (rd->dup_block && rd->dup_block == local_info->template_block)
468     {
469       ssa_fix_duplicate_block_edges (rd, local_info);
470       return 0;
471     }
472 
473   return 1;
474 }
475 
476 /* Hash table traversal callback to redirect each incoming edge
477    associated with this hash table element to its new destination.  */
478 
479 int
480 ssa_redirect_edges (struct redirection_data **slot,
481 		    ssa_local_info_t *local_info)
482 {
483   struct redirection_data *rd = *slot;
484   struct el *next, *el;
485 
486   /* Walk over all the incoming edges associated associated with this
487      hash table entry.  */
488   for (el = rd->incoming_edges; el; el = next)
489     {
490       edge e = el->e;
491 
492       /* Go ahead and free this element from the list.  Doing this now
493 	 avoids the need for another list walk when we destroy the hash
494 	 table.  */
495       next = el->next;
496       free (el);
497 
498       thread_stats.num_threaded_edges++;
499       /* If we are threading through a joiner block, then we have to
500 	 find the edge we want to redirect and update some PHI nodes.  */
501       if (THREAD_TARGET2 (e))
502 	{
503 	  edge e2;
504 
505 	  /* We want to redirect the incoming edge to the joiner block (E)
506 	     to instead reach the duplicate of the joiner block.  */
507 	  e2 = redirect_edge_and_branch (e, rd->dup_block);
508 	  flush_pending_stmts (e2);
509 	}
510       else if (rd->dup_block)
511 	{
512 	  edge e2;
513 
514 	  if (dump_file && (dump_flags & TDF_DETAILS))
515 	    fprintf (dump_file, "  Threaded jump %d --> %d to %d\n",
516 		     e->src->index, e->dest->index, rd->dup_block->index);
517 
518 	  rd->dup_block->count += e->count;
519 
520 	  /* Excessive jump threading may make frequencies large enough so
521 	     the computation overflows.  */
522 	  if (rd->dup_block->frequency < BB_FREQ_MAX * 2)
523 	    rd->dup_block->frequency += EDGE_FREQUENCY (e);
524 	  EDGE_SUCC (rd->dup_block, 0)->count += e->count;
525 	  /* Redirect the incoming edge to the appropriate duplicate
526 	     block.  */
527 	  e2 = redirect_edge_and_branch (e, rd->dup_block);
528 	  gcc_assert (e == e2);
529 	  flush_pending_stmts (e2);
530 	}
531 
532       /* Go ahead and clear E->aux.  It's not needed anymore and failure
533          to clear it will cause all kinds of unpleasant problems later.  */
534       free (e->aux);
535       e->aux = NULL;
536 
537     }
538 
539   /* Indicate that we actually threaded one or more jumps.  */
540   if (rd->incoming_edges)
541     local_info->jumps_threaded = true;
542 
543   return 1;
544 }
545 
546 /* Return true if this block has no executable statements other than
547    a simple ctrl flow instruction.  When the number of outgoing edges
548    is one, this is equivalent to a "forwarder" block.  */
549 
550 static bool
551 redirection_block_p (basic_block bb)
552 {
553   gimple_stmt_iterator gsi;
554 
555   /* Advance to the first executable statement.  */
556   gsi = gsi_start_bb (bb);
557   while (!gsi_end_p (gsi)
558          && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
559 	     || is_gimple_debug (gsi_stmt (gsi))
560              || gimple_nop_p (gsi_stmt (gsi))))
561     gsi_next (&gsi);
562 
563   /* Check if this is an empty block.  */
564   if (gsi_end_p (gsi))
565     return true;
566 
567   /* Test that we've reached the terminating control statement.  */
568   return gsi_stmt (gsi)
569          && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
570              || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
571              || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
572 }
573 
574 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
575    is reached via one or more specific incoming edges, we know which
576    outgoing edge from BB will be traversed.
577 
578    We want to redirect those incoming edges to the target of the
579    appropriate outgoing edge.  Doing so avoids a conditional branch
580    and may expose new optimization opportunities.  Note that we have
581    to update dominator tree and SSA graph after such changes.
582 
583    The key to keeping the SSA graph update manageable is to duplicate
584    the side effects occurring in BB so that those side effects still
585    occur on the paths which bypass BB after redirecting edges.
586 
587    We accomplish this by creating duplicates of BB and arranging for
588    the duplicates to unconditionally pass control to one specific
589    successor of BB.  We then revector the incoming edges into BB to
590    the appropriate duplicate of BB.
591 
592    If NOLOOP_ONLY is true, we only perform the threading as long as it
593    does not affect the structure of the loops in a nontrivial way.  */
594 
595 static bool
596 thread_block (basic_block bb, bool noloop_only)
597 {
598   /* E is an incoming edge into BB that we may or may not want to
599      redirect to a duplicate of BB.  */
600   edge e, e2;
601   edge_iterator ei;
602   ssa_local_info_t local_info;
603   struct loop *loop = bb->loop_father;
604 
605   /* To avoid scanning a linear array for the element we need we instead
606      use a hash table.  For normal code there should be no noticeable
607      difference.  However, if we have a block with a large number of
608      incoming and outgoing edges such linear searches can get expensive.  */
609   redirection_data.create (EDGE_COUNT (bb->succs));
610 
611   /* If we thread the latch of the loop to its exit, the loop ceases to
612      exist.  Make sure we do not restrict ourselves in order to preserve
613      this loop.  */
614   if (loop->header == bb)
615     {
616       e = loop_latch_edge (loop);
617 
618       if (e->aux)
619 	e2 = THREAD_TARGET (e);
620       else
621 	e2 = NULL;
622 
623       if (e2 && loop_exit_edge_p (loop, e2))
624 	{
625 	  loop->header = NULL;
626 	  loop->latch = NULL;
627 	  loops_state_set (LOOPS_NEED_FIXUP);
628 	}
629     }
630 
631   /* Record each unique threaded destination into a hash table for
632      efficient lookups.  */
633   FOR_EACH_EDGE (e, ei, bb->preds)
634     {
635       if (e->aux == NULL)
636 	continue;
637 
638       if (THREAD_TARGET2 (e))
639 	e2 = THREAD_TARGET2 (e);
640       else
641 	e2 = THREAD_TARGET (e);
642 
643       if (!e2
644 	  /* If NOLOOP_ONLY is true, we only allow threading through the
645 	     header of a loop to exit edges.  */
646 	  || (noloop_only
647 	      && bb == bb->loop_father->header
648 	      && (!loop_exit_edge_p (bb->loop_father, e2)
649 		  || THREAD_TARGET2 (e))))
650 	continue;
651 
652       if (e->dest == e2->src)
653 	update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
654 				         e->count, THREAD_TARGET (e));
655 
656       /* Insert the outgoing edge into the hash table if it is not
657 	 already in the hash table.  */
658       lookup_redirection_data (e, INSERT);
659     }
660 
661   /* We do not update dominance info.  */
662   free_dominance_info (CDI_DOMINATORS);
663 
664   /* We know we only thread through the loop header to loop exits.
665      Let the basic block duplication hook know we are not creating
666      a multiple entry loop.  */
667   if (noloop_only
668       && bb == bb->loop_father->header)
669     set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
670 
671   /* Now create duplicates of BB.
672 
673      Note that for a block with a high outgoing degree we can waste
674      a lot of time and memory creating and destroying useless edges.
675 
676      So we first duplicate BB and remove the control structure at the
677      tail of the duplicate as well as all outgoing edges from the
678      duplicate.  We then use that duplicate block as a template for
679      the rest of the duplicates.  */
680   local_info.template_block = NULL;
681   local_info.bb = bb;
682   local_info.jumps_threaded = false;
683   redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
684 			    (&local_info);
685 
686   /* The template does not have an outgoing edge.  Create that outgoing
687      edge and update PHI nodes as the edge's target as necessary.
688 
689      We do this after creating all the duplicates to avoid creating
690      unnecessary edges.  */
691   redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
692 			    (&local_info);
693 
694   /* The hash table traversals above created the duplicate blocks (and the
695      statements within the duplicate blocks).  This loop creates PHI nodes for
696      the duplicated blocks and redirects the incoming edges into BB to reach
697      the duplicates of BB.  */
698   redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
699 			    (&local_info);
700 
701   /* Done with this block.  Clear REDIRECTION_DATA.  */
702   redirection_data.dispose ();
703 
704   if (noloop_only
705       && bb == bb->loop_father->header)
706     set_loop_copy (bb->loop_father, NULL);
707 
708   /* Indicate to our caller whether or not any jumps were threaded.  */
709   return local_info.jumps_threaded;
710 }
711 
712 /* Threads edge E through E->dest to the edge THREAD_TARGET (E).  Returns the
713    copy of E->dest created during threading, or E->dest if it was not necessary
714    to copy it (E is its single predecessor).  */
715 
716 static basic_block
717 thread_single_edge (edge e)
718 {
719   basic_block bb = e->dest;
720   edge eto = THREAD_TARGET (e);
721   struct redirection_data rd;
722 
723   free (e->aux);
724   e->aux = NULL;
725 
726   thread_stats.num_threaded_edges++;
727 
728   if (single_pred_p (bb))
729     {
730       /* If BB has just a single predecessor, we should only remove the
731 	 control statements at its end, and successors except for ETO.  */
732       remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
733 
734       /* And fixup the flags on the single remaining edge.  */
735       eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
736       eto->flags |= EDGE_FALLTHRU;
737 
738       return bb;
739     }
740 
741   /* Otherwise, we need to create a copy.  */
742   if (e->dest == eto->src)
743     update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
744 
745   rd.outgoing_edge = eto;
746 
747   create_block_for_threading (bb, &rd);
748   remove_ctrl_stmt_and_useless_edges (rd.dup_block, NULL);
749   create_edge_and_update_destination_phis (&rd, rd.dup_block);
750 
751   if (dump_file && (dump_flags & TDF_DETAILS))
752     fprintf (dump_file, "  Threaded jump %d --> %d to %d\n",
753 	     e->src->index, e->dest->index, rd.dup_block->index);
754 
755   rd.dup_block->count = e->count;
756   rd.dup_block->frequency = EDGE_FREQUENCY (e);
757   single_succ_edge (rd.dup_block)->count = e->count;
758   redirect_edge_and_branch (e, rd.dup_block);
759   flush_pending_stmts (e);
760 
761   return rd.dup_block;
762 }
763 
764 /* Callback for dfs_enumerate_from.  Returns true if BB is different
765    from STOP and DBDS_CE_STOP.  */
766 
767 static basic_block dbds_ce_stop;
768 static bool
769 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
770 {
771   return (bb != (const_basic_block) stop
772 	  && bb != dbds_ce_stop);
773 }
774 
775 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
776    returns the state.  */
777 
778 enum bb_dom_status
779 {
780   /* BB does not dominate latch of the LOOP.  */
781   DOMST_NONDOMINATING,
782   /* The LOOP is broken (there is no path from the header to its latch.  */
783   DOMST_LOOP_BROKEN,
784   /* BB dominates the latch of the LOOP.  */
785   DOMST_DOMINATING
786 };
787 
788 static enum bb_dom_status
789 determine_bb_domination_status (struct loop *loop, basic_block bb)
790 {
791   basic_block *bblocks;
792   unsigned nblocks, i;
793   bool bb_reachable = false;
794   edge_iterator ei;
795   edge e;
796 
797   /* This function assumes BB is a successor of LOOP->header.
798      If that is not the case return DOMST_NONDOMINATING which
799      is always safe.  */
800     {
801       bool ok = false;
802 
803       FOR_EACH_EDGE (e, ei, bb->preds)
804 	{
805      	  if (e->src == loop->header)
806 	    {
807 	      ok = true;
808 	      break;
809 	    }
810 	}
811 
812       if (!ok)
813 	return DOMST_NONDOMINATING;
814     }
815 
816   if (bb == loop->latch)
817     return DOMST_DOMINATING;
818 
819   /* Check that BB dominates LOOP->latch, and that it is back-reachable
820      from it.  */
821 
822   bblocks = XCNEWVEC (basic_block, loop->num_nodes);
823   dbds_ce_stop = loop->header;
824   nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
825 				bblocks, loop->num_nodes, bb);
826   for (i = 0; i < nblocks; i++)
827     FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
828       {
829 	if (e->src == loop->header)
830 	  {
831 	    free (bblocks);
832 	    return DOMST_NONDOMINATING;
833 	  }
834 	if (e->src == bb)
835 	  bb_reachable = true;
836       }
837 
838   free (bblocks);
839   return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
840 }
841 
842 /* Return true if BB is part of the new pre-header that is created
843    when threading the latch to DATA.  */
844 
845 static bool
846 def_split_header_continue_p (const_basic_block bb, const void *data)
847 {
848   const_basic_block new_header = (const_basic_block) data;
849   const struct loop *l;
850 
851   if (bb == new_header
852       || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
853     return false;
854   for (l = bb->loop_father; l; l = loop_outer (l))
855     if (l == new_header->loop_father)
856       return true;
857   return false;
858 }
859 
860 /* Thread jumps through the header of LOOP.  Returns true if cfg changes.
861    If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
862    to the inside of the loop.  */
863 
864 static bool
865 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
866 {
867   basic_block header = loop->header;
868   edge e, tgt_edge, latch = loop_latch_edge (loop);
869   edge_iterator ei;
870   basic_block tgt_bb, atgt_bb;
871   enum bb_dom_status domst;
872 
873   /* We have already threaded through headers to exits, so all the threading
874      requests now are to the inside of the loop.  We need to avoid creating
875      irreducible regions (i.e., loops with more than one entry block), and
876      also loop with several latch edges, or new subloops of the loop (although
877      there are cases where it might be appropriate, it is difficult to decide,
878      and doing it wrongly may confuse other optimizers).
879 
880      We could handle more general cases here.  However, the intention is to
881      preserve some information about the loop, which is impossible if its
882      structure changes significantly, in a way that is not well understood.
883      Thus we only handle few important special cases, in which also updating
884      of the loop-carried information should be feasible:
885 
886      1) Propagation of latch edge to a block that dominates the latch block
887 	of a loop.  This aims to handle the following idiom:
888 
889 	first = 1;
890 	while (1)
891 	  {
892 	    if (first)
893 	      initialize;
894 	    first = 0;
895 	    body;
896 	  }
897 
898 	After threading the latch edge, this becomes
899 
900 	first = 1;
901 	if (first)
902 	  initialize;
903 	while (1)
904 	  {
905 	    first = 0;
906 	    body;
907 	  }
908 
909 	The original header of the loop is moved out of it, and we may thread
910 	the remaining edges through it without further constraints.
911 
912      2) All entry edges are propagated to a single basic block that dominates
913 	the latch block of the loop.  This aims to handle the following idiom
914 	(normally created for "for" loops):
915 
916 	i = 0;
917 	while (1)
918 	  {
919 	    if (i >= 100)
920 	      break;
921 	    body;
922 	    i++;
923 	  }
924 
925 	This becomes
926 
927 	i = 0;
928 	while (1)
929 	  {
930 	    body;
931 	    i++;
932 	    if (i >= 100)
933 	      break;
934 	  }
935      */
936 
937   /* Threading through the header won't improve the code if the header has just
938      one successor.  */
939   if (single_succ_p (header))
940     goto fail;
941 
942   if (latch->aux)
943     {
944       if (THREAD_TARGET2 (latch))
945 	goto fail;
946       tgt_edge = THREAD_TARGET (latch);
947       tgt_bb = tgt_edge->dest;
948     }
949   else if (!may_peel_loop_headers
950 	   && !redirection_block_p (loop->header))
951     goto fail;
952   else
953     {
954       tgt_bb = NULL;
955       tgt_edge = NULL;
956       FOR_EACH_EDGE (e, ei, header->preds)
957 	{
958 	  if (!e->aux)
959 	    {
960 	      if (e == latch)
961 		continue;
962 
963 	      /* If latch is not threaded, and there is a header
964 		 edge that is not threaded, we would create loop
965 		 with multiple entries.  */
966 	      goto fail;
967 	    }
968 
969 	  if (THREAD_TARGET2 (e))
970 	    goto fail;
971 	  tgt_edge = THREAD_TARGET (e);
972 	  atgt_bb = tgt_edge->dest;
973 	  if (!tgt_bb)
974 	    tgt_bb = atgt_bb;
975 	  /* Two targets of threading would make us create loop
976 	     with multiple entries.  */
977 	  else if (tgt_bb != atgt_bb)
978 	    goto fail;
979 	}
980 
981       if (!tgt_bb)
982 	{
983 	  /* There are no threading requests.  */
984 	  return false;
985 	}
986 
987       /* Redirecting to empty loop latch is useless.  */
988       if (tgt_bb == loop->latch
989 	  && empty_block_p (loop->latch))
990 	goto fail;
991     }
992 
993   /* The target block must dominate the loop latch, otherwise we would be
994      creating a subloop.  */
995   domst = determine_bb_domination_status (loop, tgt_bb);
996   if (domst == DOMST_NONDOMINATING)
997     goto fail;
998   if (domst == DOMST_LOOP_BROKEN)
999     {
1000       /* If the loop ceased to exist, mark it as such, and thread through its
1001 	 original header.  */
1002       loop->header = NULL;
1003       loop->latch = NULL;
1004       loops_state_set (LOOPS_NEED_FIXUP);
1005       return thread_block (header, false);
1006     }
1007 
1008   if (tgt_bb->loop_father->header == tgt_bb)
1009     {
1010       /* If the target of the threading is a header of a subloop, we need
1011 	 to create a preheader for it, so that the headers of the two loops
1012 	 do not merge.  */
1013       if (EDGE_COUNT (tgt_bb->preds) > 2)
1014 	{
1015 	  tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1016 	  gcc_assert (tgt_bb != NULL);
1017 	}
1018       else
1019 	tgt_bb = split_edge (tgt_edge);
1020     }
1021 
1022   if (latch->aux)
1023     {
1024       basic_block *bblocks;
1025       unsigned nblocks, i;
1026 
1027       /* First handle the case latch edge is redirected.  We are copying
1028          the loop header but not creating a multiple entry loop.  Make the
1029 	 cfg manipulation code aware of that fact.  */
1030       set_loop_copy (loop, loop);
1031       loop->latch = thread_single_edge (latch);
1032       set_loop_copy (loop, NULL);
1033       gcc_assert (single_succ (loop->latch) == tgt_bb);
1034       loop->header = tgt_bb;
1035 
1036       /* Remove the new pre-header blocks from our loop.  */
1037       bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1038       nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1039 				    bblocks, loop->num_nodes, tgt_bb);
1040       for (i = 0; i < nblocks; i++)
1041 	if (bblocks[i]->loop_father == loop)
1042 	  {
1043 	    remove_bb_from_loops (bblocks[i]);
1044 	    add_bb_to_loop (bblocks[i], loop_outer (loop));
1045 	  }
1046       free (bblocks);
1047 
1048       /* If the new header has multiple latches mark it so.  */
1049       FOR_EACH_EDGE (e, ei, loop->header->preds)
1050 	if (e->src->loop_father == loop
1051 	    && e->src != loop->latch)
1052 	  {
1053 	    loop->latch = NULL;
1054 	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1055 	  }
1056 
1057       /* Cancel remaining threading requests that would make the
1058 	 loop a multiple entry loop.  */
1059       FOR_EACH_EDGE (e, ei, header->preds)
1060 	{
1061 	  edge e2;
1062 
1063 	  if (e->aux == NULL)
1064 	    continue;
1065 
1066 	  if (THREAD_TARGET2 (e))
1067 	    e2 = THREAD_TARGET2 (e);
1068 	  else
1069 	    e2 = THREAD_TARGET (e);
1070 
1071 	  if (e->src->loop_father != e2->dest->loop_father
1072 	      && e2->dest != loop->header)
1073 	    {
1074 	      free (e->aux);
1075 	      e->aux = NULL;
1076 	    }
1077 	}
1078 
1079       /* Thread the remaining edges through the former header.  */
1080       thread_block (header, false);
1081     }
1082   else
1083     {
1084       basic_block new_preheader;
1085 
1086       /* Now consider the case entry edges are redirected to the new entry
1087 	 block.  Remember one entry edge, so that we can find the new
1088 	 preheader (its destination after threading).  */
1089       FOR_EACH_EDGE (e, ei, header->preds)
1090 	{
1091 	  if (e->aux)
1092 	    break;
1093 	}
1094 
1095       /* The duplicate of the header is the new preheader of the loop.  Ensure
1096 	 that it is placed correctly in the loop hierarchy.  */
1097       set_loop_copy (loop, loop_outer (loop));
1098 
1099       thread_block (header, false);
1100       set_loop_copy (loop, NULL);
1101       new_preheader = e->dest;
1102 
1103       /* Create the new latch block.  This is always necessary, as the latch
1104 	 must have only a single successor, but the original header had at
1105 	 least two successors.  */
1106       loop->latch = NULL;
1107       mfb_kj_edge = single_succ_edge (new_preheader);
1108       loop->header = mfb_kj_edge->dest;
1109       latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1110       loop->header = latch->dest;
1111       loop->latch = latch->src;
1112     }
1113 
1114   return true;
1115 
1116 fail:
1117   /* We failed to thread anything.  Cancel the requests.  */
1118   FOR_EACH_EDGE (e, ei, header->preds)
1119     {
1120       free (e->aux);
1121       e->aux = NULL;
1122     }
1123   return false;
1124 }
1125 
1126 /* Walk through the registered jump threads and convert them into a
1127    form convenient for this pass.
1128 
1129    Any block which has incoming edges threaded to outgoing edges
1130    will have its entry in THREADED_BLOCK set.
1131 
1132    Any threaded edge will have its new outgoing edge stored in the
1133    original edge's AUX field.
1134 
1135    This form avoids the need to walk all the edges in the CFG to
1136    discover blocks which need processing and avoids unnecessary
1137    hash table lookups to map from threaded edge to new target.  */
1138 
1139 static void
1140 mark_threaded_blocks (bitmap threaded_blocks)
1141 {
1142   unsigned int i;
1143   bitmap_iterator bi;
1144   bitmap tmp = BITMAP_ALLOC (NULL);
1145   basic_block bb;
1146   edge e;
1147   edge_iterator ei;
1148 
1149   for (i = 0; i < threaded_edges.length (); i += 3)
1150     {
1151       edge e = threaded_edges[i];
1152       edge *x = XNEWVEC (edge, 2);
1153 
1154       e->aux = x;
1155       THREAD_TARGET (e) = threaded_edges[i + 1];
1156       THREAD_TARGET2 (e) = threaded_edges[i + 2];
1157       bitmap_set_bit (tmp, e->dest->index);
1158     }
1159 
1160   /* If optimizing for size, only thread through block if we don't have
1161      to duplicate it or it's an otherwise empty redirection block.  */
1162   if (optimize_function_for_size_p (cfun))
1163     {
1164       EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1165 	{
1166 	  bb = BASIC_BLOCK (i);
1167 	  if (EDGE_COUNT (bb->preds) > 1
1168 	      && !redirection_block_p (bb))
1169 	    {
1170 	      FOR_EACH_EDGE (e, ei, bb->preds)
1171 		{
1172 		  free (e->aux);
1173 		  e->aux = NULL;
1174 		}
1175 	    }
1176 	  else
1177 	    bitmap_set_bit (threaded_blocks, i);
1178 	}
1179     }
1180   else
1181     bitmap_copy (threaded_blocks, tmp);
1182 
1183   BITMAP_FREE(tmp);
1184 }
1185 
1186 
1187 /* Walk through all blocks and thread incoming edges to the appropriate
1188    outgoing edge for each edge pair recorded in THREADED_EDGES.
1189 
1190    It is the caller's responsibility to fix the dominance information
1191    and rewrite duplicated SSA_NAMEs back into SSA form.
1192 
1193    If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1194    loop headers if it does not simplify the loop.
1195 
1196    Returns true if one or more edges were threaded, false otherwise.  */
1197 
1198 bool
1199 thread_through_all_blocks (bool may_peel_loop_headers)
1200 {
1201   bool retval = false;
1202   unsigned int i;
1203   bitmap_iterator bi;
1204   bitmap threaded_blocks;
1205   struct loop *loop;
1206   loop_iterator li;
1207 
1208   /* We must know about loops in order to preserve them.  */
1209   gcc_assert (current_loops != NULL);
1210 
1211   if (!threaded_edges.exists ())
1212     return false;
1213 
1214   threaded_blocks = BITMAP_ALLOC (NULL);
1215   memset (&thread_stats, 0, sizeof (thread_stats));
1216 
1217   mark_threaded_blocks (threaded_blocks);
1218 
1219   initialize_original_copy_tables ();
1220 
1221   /* First perform the threading requests that do not affect
1222      loop structure.  */
1223   EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1224     {
1225       basic_block bb = BASIC_BLOCK (i);
1226 
1227       if (EDGE_COUNT (bb->preds) > 0)
1228 	retval |= thread_block (bb, true);
1229     }
1230 
1231   /* Then perform the threading through loop headers.  We start with the
1232      innermost loop, so that the changes in cfg we perform won't affect
1233      further threading.  */
1234   FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1235     {
1236       if (!loop->header
1237 	  || !bitmap_bit_p (threaded_blocks, loop->header->index))
1238 	continue;
1239 
1240       retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1241     }
1242 
1243   statistics_counter_event (cfun, "Jumps threaded",
1244 			    thread_stats.num_threaded_edges);
1245 
1246   free_original_copy_tables ();
1247 
1248   BITMAP_FREE (threaded_blocks);
1249   threaded_blocks = NULL;
1250   threaded_edges.release ();
1251 
1252   if (retval)
1253     loops_state_set (LOOPS_NEED_FIXUP);
1254 
1255   return retval;
1256 }
1257 
1258 /* Register a jump threading opportunity.  We queue up all the jump
1259    threading opportunities discovered by a pass and update the CFG
1260    and SSA form all at once.
1261 
1262    E is the edge we can thread, E2 is the new target edge, i.e., we
1263    are effectively recording that E->dest can be changed to E2->dest
1264    after fixing the SSA graph.  */
1265 
1266 void
1267 register_jump_thread (edge e, edge e2, edge e3)
1268 {
1269   /* This can occur if we're jumping to a constant address or
1270      or something similar.  Just get out now.  */
1271   if (e2 == NULL)
1272     return;
1273 
1274   if (!threaded_edges.exists ())
1275     threaded_edges.create (15);
1276 
1277   if (dump_file && (dump_flags & TDF_DETAILS)
1278       && e->dest != e2->src)
1279     fprintf (dump_file,
1280 	     "  Registering jump thread around one or more intermediate blocks\n");
1281 
1282   threaded_edges.safe_push (e);
1283   threaded_edges.safe_push (e2);
1284   threaded_edges.safe_push (e3);
1285 }
1286