xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-threadedge.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /* SSA Jump Threading
2    Copyright (C) 2005-2017 Free Software Foundation, Inc.
3    Contributed by Jeff Law  <law@redhat.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfgloop.h"
31 #include "gimple-iterator.h"
32 #include "tree-cfg.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "params.h"
35 #include "tree-ssa-scopedtables.h"
36 #include "tree-ssa-threadedge.h"
37 #include "tree-ssa-dom.h"
38 #include "gimple-fold.h"
39 #include "cfganal.h"
40 
41 /* To avoid code explosion due to jump threading, we limit the
42    number of statements we are going to copy.  This variable
43    holds the number of statements currently seen that we'll have
44    to copy as part of the jump threading process.  */
45 static int stmt_count;
46 
47 /* Array to record value-handles per SSA_NAME.  */
48 vec<tree> ssa_name_values;
49 
50 typedef tree (pfn_simplify) (gimple *, gimple *,
51 			     class avail_exprs_stack *,
52 			     basic_block);
53 
54 /* Set the value for the SSA name NAME to VALUE.  */
55 
56 void
57 set_ssa_name_value (tree name, tree value)
58 {
59   if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
60     ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
61   if (value && TREE_OVERFLOW_P (value))
62     value = drop_tree_overflow (value);
63   ssa_name_values[SSA_NAME_VERSION (name)] = value;
64 }
65 
66 /* Initialize the per SSA_NAME value-handles array.  Returns it.  */
67 void
68 threadedge_initialize_values (void)
69 {
70   gcc_assert (!ssa_name_values.exists ());
71   ssa_name_values.create (num_ssa_names);
72 }
73 
74 /* Free the per SSA_NAME value-handle array.  */
75 void
76 threadedge_finalize_values (void)
77 {
78   ssa_name_values.release ();
79 }
80 
81 /* Return TRUE if we may be able to thread an incoming edge into
82    BB to an outgoing edge from BB.  Return FALSE otherwise.  */
83 
84 bool
85 potentially_threadable_block (basic_block bb)
86 {
87   gimple_stmt_iterator gsi;
88 
89   /* Special case.  We can get blocks that are forwarders, but are
90      not optimized away because they forward from outside a loop
91      to the loop header.   We want to thread through them as we can
92      sometimes thread to the loop exit, which is obviously profitable.
93      the interesting case here is when the block has PHIs.  */
94   if (gsi_end_p (gsi_start_nondebug_bb (bb))
95       && !gsi_end_p (gsi_start_phis (bb)))
96     return true;
97 
98   /* If BB has a single successor or a single predecessor, then
99      there is no threading opportunity.  */
100   if (single_succ_p (bb) || single_pred_p (bb))
101     return false;
102 
103   /* If BB does not end with a conditional, switch or computed goto,
104      then there is no threading opportunity.  */
105   gsi = gsi_last_bb (bb);
106   if (gsi_end_p (gsi)
107       || ! gsi_stmt (gsi)
108       || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
109 	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
110 	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
111     return false;
112 
113   return true;
114 }
115 
116 /* Record temporary equivalences created by PHIs at the target of the
117    edge E.  Record unwind information for the equivalences onto STACK.
118 
119    If a PHI which prevents threading is encountered, then return FALSE
120    indicating we should not thread this edge, else return TRUE.
121 
122    If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
123    of any equivalences recorded.  We use this to make invalidation after
124    traversing back edges less painful.  */
125 
126 static bool
127 record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_copies)
128 {
129   gphi_iterator gsi;
130 
131   /* Each PHI creates a temporary equivalence, record them.
132      These are context sensitive equivalences and will be removed
133      later.  */
134   for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
135     {
136       gphi *phi = gsi.phi ();
137       tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
138       tree dst = gimple_phi_result (phi);
139 
140       /* If the desired argument is not the same as this PHI's result
141 	 and it is set by a PHI in E->dest, then we can not thread
142 	 through E->dest.  */
143       if (src != dst
144 	  && TREE_CODE (src) == SSA_NAME
145 	  && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
146 	  && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
147 	return false;
148 
149       /* We consider any non-virtual PHI as a statement since it
150 	 count result in a constant assignment or copy operation.  */
151       if (!virtual_operand_p (dst))
152 	stmt_count++;
153 
154       const_and_copies->record_const_or_copy (dst, src);
155     }
156   return true;
157 }
158 
159 /* Valueize hook for gimple_fold_stmt_to_constant_1.  */
160 
161 static tree
162 threadedge_valueize (tree t)
163 {
164   if (TREE_CODE (t) == SSA_NAME)
165     {
166       tree tem = SSA_NAME_VALUE (t);
167       if (tem)
168 	return tem;
169     }
170   return t;
171 }
172 
173 /* Try to simplify each statement in E->dest, ultimately leading to
174    a simplification of the COND_EXPR at the end of E->dest.
175 
176    Record unwind information for temporary equivalences onto STACK.
177 
178    Use SIMPLIFY (a pointer to a callback function) to further simplify
179    statements using pass specific information.
180 
181    We might consider marking just those statements which ultimately
182    feed the COND_EXPR.  It's not clear if the overhead of bookkeeping
183    would be recovered by trying to simplify fewer statements.
184 
185    If we are able to simplify a statement into the form
186    SSA_NAME = (SSA_NAME | gimple invariant), then we can record
187    a context sensitive equivalence which may help us simplify
188    later statements in E->dest.  */
189 
190 static gimple *
191 record_temporary_equivalences_from_stmts_at_dest (edge e,
192     const_and_copies *const_and_copies,
193     avail_exprs_stack *avail_exprs_stack,
194     pfn_simplify simplify)
195 {
196   gimple *stmt = NULL;
197   gimple_stmt_iterator gsi;
198   int max_stmt_count;
199 
200   max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
201 
202   /* Walk through each statement in the block recording equivalences
203      we discover.  Note any equivalences we discover are context
204      sensitive (ie, are dependent on traversing E) and must be unwound
205      when we're finished processing E.  */
206   for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
207     {
208       tree cached_lhs = NULL;
209 
210       stmt = gsi_stmt (gsi);
211 
212       /* Ignore empty statements and labels.  */
213       if (gimple_code (stmt) == GIMPLE_NOP
214 	  || gimple_code (stmt) == GIMPLE_LABEL
215 	  || is_gimple_debug (stmt))
216 	continue;
217 
218       /* If the statement has volatile operands, then we assume we
219 	 can not thread through this block.  This is overly
220 	 conservative in some ways.  */
221       if (gimple_code (stmt) == GIMPLE_ASM
222 	  && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
223 	return NULL;
224 
225       /* If the statement is a unique builtin, we can not thread
226 	 through here.  */
227       if (gimple_code (stmt) == GIMPLE_CALL
228 	  && gimple_call_internal_p (stmt)
229 	  && gimple_call_internal_unique_p (stmt))
230 	return NULL;
231 
232       /* If duplicating this block is going to cause too much code
233 	 expansion, then do not thread through this block.  */
234       stmt_count++;
235       if (stmt_count > max_stmt_count)
236 	return NULL;
237 
238       /* If this is not a statement that sets an SSA_NAME to a new
239 	 value, then do not try to simplify this statement as it will
240 	 not simplify in any way that is helpful for jump threading.  */
241       if ((gimple_code (stmt) != GIMPLE_ASSIGN
242            || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
243           && (gimple_code (stmt) != GIMPLE_CALL
244               || gimple_call_lhs (stmt) == NULL_TREE
245               || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
246 	continue;
247 
248       /* The result of __builtin_object_size depends on all the arguments
249 	 of a phi node. Temporarily using only one edge produces invalid
250 	 results. For example
251 
252 	 if (x < 6)
253 	   goto l;
254 	 else
255 	   goto l;
256 
257 	 l:
258 	 r = PHI <&w[2].a[1](2), &a.a[6](3)>
259 	 __builtin_object_size (r, 0)
260 
261 	 The result of __builtin_object_size is defined to be the maximum of
262 	 remaining bytes. If we use only one edge on the phi, the result will
263 	 change to be the remaining bytes for the corresponding phi argument.
264 
265 	 Similarly for __builtin_constant_p:
266 
267 	 r = PHI <1(2), 2(3)>
268 	 __builtin_constant_p (r)
269 
270 	 Both PHI arguments are constant, but x ? 1 : 2 is still not
271 	 constant.  */
272 
273       if (is_gimple_call (stmt))
274 	{
275 	  tree fndecl = gimple_call_fndecl (stmt);
276 	  if (fndecl
277 	      && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
278 		  || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
279 	    continue;
280 	}
281 
282       /* At this point we have a statement which assigns an RHS to an
283 	 SSA_VAR on the LHS.  We want to try and simplify this statement
284 	 to expose more context sensitive equivalences which in turn may
285 	 allow us to simplify the condition at the end of the loop.
286 
287 	 Handle simple copy operations as well as implied copies from
288 	 ASSERT_EXPRs.  */
289       if (gimple_assign_single_p (stmt)
290           && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
291 	cached_lhs = gimple_assign_rhs1 (stmt);
292       else if (gimple_assign_single_p (stmt)
293                && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
294 	cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
295       else
296 	{
297 	  /* A statement that is not a trivial copy or ASSERT_EXPR.
298 	     Try to fold the new expression.  Inserting the
299 	     expression into the hash table is unlikely to help.  */
300 	  /* ???  The DOM callback below can be changed to setting
301 	     the mprts_hook around the call to thread_across_edge,
302 	     avoiding the use substitution.  The VRP hook should be
303 	     changed to properly valueize operands itself using
304 	     SSA_NAME_VALUE in addition to its own lattice.  */
305 	  cached_lhs = gimple_fold_stmt_to_constant_1 (stmt,
306 						       threadedge_valueize);
307           if (NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES) != 0
308 	      && (!cached_lhs
309                   || (TREE_CODE (cached_lhs) != SSA_NAME
310                       && !is_gimple_min_invariant (cached_lhs))))
311 	    {
312 	      /* We're going to temporarily copy propagate the operands
313 		 and see if that allows us to simplify this statement.  */
314 	      tree *copy;
315 	      ssa_op_iter iter;
316 	      use_operand_p use_p;
317 	      unsigned int num, i = 0;
318 
319 	      num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES);
320 	      copy = XALLOCAVEC (tree, num);
321 
322 	      /* Make a copy of the uses & vuses into USES_COPY, then cprop into
323 		 the operands.  */
324 	      FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
325 		{
326 		  tree tmp = NULL;
327 		  tree use = USE_FROM_PTR (use_p);
328 
329 		  copy[i++] = use;
330 		  if (TREE_CODE (use) == SSA_NAME)
331 		    tmp = SSA_NAME_VALUE (use);
332 		  if (tmp)
333 		    SET_USE (use_p, tmp);
334 		}
335 
336 	      cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack, e->src);
337 
338 	      /* Restore the statement's original uses/defs.  */
339 	      i = 0;
340 	      FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
341 		SET_USE (use_p, copy[i++]);
342 	    }
343 	}
344 
345       /* Record the context sensitive equivalence if we were able
346 	 to simplify this statement.  */
347       if (cached_lhs
348 	  && (TREE_CODE (cached_lhs) == SSA_NAME
349 	      || is_gimple_min_invariant (cached_lhs)))
350 	const_and_copies->record_const_or_copy (gimple_get_lhs (stmt),
351 						cached_lhs);
352     }
353   return stmt;
354 }
355 
356 static tree simplify_control_stmt_condition_1 (edge, gimple *,
357 					       class avail_exprs_stack *,
358 					       tree, enum tree_code, tree,
359 					       gcond *, pfn_simplify,
360 					       unsigned);
361 
362 /* Simplify the control statement at the end of the block E->dest.
363 
364    To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
365    is available to use/clobber in DUMMY_COND.
366 
367    Use SIMPLIFY (a pointer to a callback function) to further simplify
368    a condition using pass specific information.
369 
370    Return the simplified condition or NULL if simplification could
371    not be performed.  When simplifying a GIMPLE_SWITCH, we may return
372    the CASE_LABEL_EXPR that will be taken.
373 
374    The available expression table is referenced via AVAIL_EXPRS_STACK.  */
375 
376 static tree
377 simplify_control_stmt_condition (edge e,
378 				 gimple *stmt,
379 				 class avail_exprs_stack *avail_exprs_stack,
380 				 gcond *dummy_cond,
381 				 pfn_simplify simplify)
382 {
383   tree cond, cached_lhs;
384   enum gimple_code code = gimple_code (stmt);
385 
386   /* For comparisons, we have to update both operands, then try
387      to simplify the comparison.  */
388   if (code == GIMPLE_COND)
389     {
390       tree op0, op1;
391       enum tree_code cond_code;
392 
393       op0 = gimple_cond_lhs (stmt);
394       op1 = gimple_cond_rhs (stmt);
395       cond_code = gimple_cond_code (stmt);
396 
397       /* Get the current value of both operands.  */
398       if (TREE_CODE (op0) == SSA_NAME)
399 	{
400 	  for (int i = 0; i < 2; i++)
401 	    {
402 	      if (TREE_CODE (op0) == SSA_NAME
403 		  && SSA_NAME_VALUE (op0))
404 		op0 = SSA_NAME_VALUE (op0);
405 	      else
406 		break;
407 	    }
408 	}
409 
410       if (TREE_CODE (op1) == SSA_NAME)
411 	{
412 	  for (int i = 0; i < 2; i++)
413 	    {
414 	      if (TREE_CODE (op1) == SSA_NAME
415 		  && SSA_NAME_VALUE (op1))
416 		op1 = SSA_NAME_VALUE (op1);
417 	      else
418 		break;
419 	    }
420 	}
421 
422       const unsigned recursion_limit = 4;
423 
424       cached_lhs
425 	= simplify_control_stmt_condition_1 (e, stmt, avail_exprs_stack,
426 					     op0, cond_code, op1,
427 					     dummy_cond, simplify,
428 					     recursion_limit);
429 
430       /* If we were testing an integer/pointer against a constant, then
431 	 we can use the FSM code to trace the value of the SSA_NAME.  If
432 	 a value is found, then the condition will collapse to a constant.
433 
434 	 Return the SSA_NAME we want to trace back rather than the full
435 	 expression and give the FSM threader a chance to find its value.  */
436       if (cached_lhs == NULL)
437 	{
438 	  /* Recover the original operands.  They may have been simplified
439 	     using context sensitive equivalences.  Those context sensitive
440 	     equivalences may not be valid on paths found by the FSM optimizer.  */
441 	  tree op0 = gimple_cond_lhs (stmt);
442 	  tree op1 = gimple_cond_rhs (stmt);
443 
444 	  if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))
445 	       || POINTER_TYPE_P (TREE_TYPE (op0)))
446 	      && TREE_CODE (op0) == SSA_NAME
447 	      && TREE_CODE (op1) == INTEGER_CST)
448 	    return op0;
449 	}
450 
451       return cached_lhs;
452     }
453 
454   if (code == GIMPLE_SWITCH)
455     cond = gimple_switch_index (as_a <gswitch *> (stmt));
456   else if (code == GIMPLE_GOTO)
457     cond = gimple_goto_dest (stmt);
458   else
459     gcc_unreachable ();
460 
461   /* We can have conditionals which just test the state of a variable
462      rather than use a relational operator.  These are simpler to handle.  */
463   if (TREE_CODE (cond) == SSA_NAME)
464     {
465       tree original_lhs = cond;
466       cached_lhs = cond;
467 
468       /* Get the variable's current value from the equivalence chains.
469 
470 	 It is possible to get loops in the SSA_NAME_VALUE chains
471 	 (consider threading the backedge of a loop where we have
472 	 a loop invariant SSA_NAME used in the condition).  */
473       if (cached_lhs)
474 	{
475 	  for (int i = 0; i < 2; i++)
476 	    {
477 	      if (TREE_CODE (cached_lhs) == SSA_NAME
478 		  && SSA_NAME_VALUE (cached_lhs))
479 		cached_lhs = SSA_NAME_VALUE (cached_lhs);
480 	      else
481 		break;
482 	    }
483 	}
484 
485       /* If we haven't simplified to an invariant yet, then use the
486 	 pass specific callback to try and simplify it further.  */
487       if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
488 	{
489 	  if (code == GIMPLE_SWITCH)
490 	    {
491 	      /* Replace the index operand of the GIMPLE_SWITCH with any LHS
492 		 we found before handing off to VRP.  If simplification is
493 	         possible, the simplified value will be a CASE_LABEL_EXPR of
494 		 the label that is proven to be taken.  */
495 	      gswitch *dummy_switch = as_a<gswitch *> (gimple_copy (stmt));
496 	      gimple_switch_set_index (dummy_switch, cached_lhs);
497 	      cached_lhs = (*simplify) (dummy_switch, stmt,
498 					avail_exprs_stack, e->src);
499 	      ggc_free (dummy_switch);
500 	    }
501 	  else
502 	    cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack, e->src);
503 	}
504 
505       /* We couldn't find an invariant.  But, callers of this
506 	 function may be able to do something useful with the
507 	 unmodified destination.  */
508       if (!cached_lhs)
509 	cached_lhs = original_lhs;
510     }
511   else
512     cached_lhs = NULL;
513 
514   return cached_lhs;
515 }
516 
517 /* Recursive helper for simplify_control_stmt_condition.  */
518 
519 static tree
520 simplify_control_stmt_condition_1 (edge e,
521 				   gimple *stmt,
522 				   class avail_exprs_stack *avail_exprs_stack,
523 				   tree op0,
524 				   enum tree_code cond_code,
525 				   tree op1,
526 				   gcond *dummy_cond,
527 				   pfn_simplify simplify,
528 				   unsigned limit)
529 {
530   if (limit == 0)
531     return NULL_TREE;
532 
533   /* We may need to canonicalize the comparison.  For
534      example, op0 might be a constant while op1 is an
535      SSA_NAME.  Failure to canonicalize will cause us to
536      miss threading opportunities.  */
537   if (tree_swap_operands_p (op0, op1))
538     {
539       cond_code = swap_tree_comparison (cond_code);
540       std::swap (op0, op1);
541     }
542 
543   /* If the condition has the form (A & B) CMP 0 or (A | B) CMP 0 then
544      recurse into the LHS to see if there is a dominating ASSERT_EXPR
545      of A or of B that makes this condition always true or always false
546      along the edge E.  */
547   if ((cond_code == EQ_EXPR || cond_code == NE_EXPR)
548       && TREE_CODE (op0) == SSA_NAME
549       && integer_zerop (op1))
550     {
551       gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
552       if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
553         ;
554       else if (gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR
555 	       || gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
556 	{
557 	  enum tree_code rhs_code = gimple_assign_rhs_code (def_stmt);
558 	  const tree rhs1 = gimple_assign_rhs1 (def_stmt);
559 	  const tree rhs2 = gimple_assign_rhs2 (def_stmt);
560 
561 	  /* Is A != 0 ?  */
562 	  const tree res1
563 	    = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack,
564 						 rhs1, NE_EXPR, op1,
565 						 dummy_cond, simplify,
566 						 limit - 1);
567 	  if (res1 == NULL_TREE)
568 	    ;
569 	  else if (rhs_code == BIT_AND_EXPR && integer_zerop (res1))
570 	    {
571 	      /* If A == 0 then (A & B) != 0 is always false.  */
572 	      if (cond_code == NE_EXPR)
573 	        return boolean_false_node;
574 	      /* If A == 0 then (A & B) == 0 is always true.  */
575 	      if (cond_code == EQ_EXPR)
576 		return boolean_true_node;
577 	    }
578 	  else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res1))
579 	    {
580 	      /* If A != 0 then (A | B) != 0 is always true.  */
581 	      if (cond_code == NE_EXPR)
582 		return boolean_true_node;
583 	      /* If A != 0 then (A | B) == 0 is always false.  */
584 	      if (cond_code == EQ_EXPR)
585 		return boolean_false_node;
586 	    }
587 
588 	  /* Is B != 0 ?  */
589 	  const tree res2
590 	    = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack,
591 						 rhs2, NE_EXPR, op1,
592 						 dummy_cond, simplify,
593 						 limit - 1);
594 	  if (res2 == NULL_TREE)
595 	    ;
596 	  else if (rhs_code == BIT_AND_EXPR && integer_zerop (res2))
597 	    {
598 	      /* If B == 0 then (A & B) != 0 is always false.  */
599 	      if (cond_code == NE_EXPR)
600 	        return boolean_false_node;
601 	      /* If B == 0 then (A & B) == 0 is always true.  */
602 	      if (cond_code == EQ_EXPR)
603 		return boolean_true_node;
604 	    }
605 	  else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res2))
606 	    {
607 	      /* If B != 0 then (A | B) != 0 is always true.  */
608 	      if (cond_code == NE_EXPR)
609 		return boolean_true_node;
610 	      /* If B != 0 then (A | B) == 0 is always false.  */
611 	      if (cond_code == EQ_EXPR)
612 		return boolean_false_node;
613 	    }
614 
615 	  if (res1 != NULL_TREE && res2 != NULL_TREE)
616 	    {
617 	      if (rhs_code == BIT_AND_EXPR
618 		  && TYPE_PRECISION (TREE_TYPE (op0)) == 1
619 		  && integer_nonzerop (res1)
620 		  && integer_nonzerop (res2))
621 		{
622 		  /* If A != 0 and B != 0 then (bool)(A & B) != 0 is true.  */
623 		  if (cond_code == NE_EXPR)
624 		    return boolean_true_node;
625 		  /* If A != 0 and B != 0 then (bool)(A & B) == 0 is false.  */
626 		  if (cond_code == EQ_EXPR)
627 		    return boolean_false_node;
628 		}
629 
630 	      if (rhs_code == BIT_IOR_EXPR
631 		  && integer_zerop (res1)
632 		  && integer_zerop (res2))
633 		{
634 		  /* If A == 0 and B == 0 then (A | B) != 0 is false.  */
635 		  if (cond_code == NE_EXPR)
636 		    return boolean_false_node;
637 		  /* If A == 0 and B == 0 then (A | B) == 0 is true.  */
638 		  if (cond_code == EQ_EXPR)
639 		    return boolean_true_node;
640 		}
641 	    }
642 	}
643       /* Handle (A CMP B) CMP 0.  */
644       else if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
645 	       == tcc_comparison)
646 	{
647 	  tree rhs1 = gimple_assign_rhs1 (def_stmt);
648 	  tree rhs2 = gimple_assign_rhs2 (def_stmt);
649 
650 	  tree_code new_cond = gimple_assign_rhs_code (def_stmt);
651 	  if (cond_code == EQ_EXPR)
652 	    new_cond = invert_tree_comparison (new_cond, false);
653 
654 	  tree res
655 	    = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack,
656 						 rhs1, new_cond, rhs2,
657 						 dummy_cond, simplify,
658 						 limit - 1);
659 	  if (res != NULL_TREE && is_gimple_min_invariant (res))
660 	    return res;
661 	}
662     }
663 
664   gimple_cond_set_code (dummy_cond, cond_code);
665   gimple_cond_set_lhs (dummy_cond, op0);
666   gimple_cond_set_rhs (dummy_cond, op1);
667 
668   /* We absolutely do not care about any type conversions
669      we only care about a zero/nonzero value.  */
670   fold_defer_overflow_warnings ();
671 
672   tree res = fold_binary (cond_code, boolean_type_node, op0, op1);
673   if (res)
674     while (CONVERT_EXPR_P (res))
675       res = TREE_OPERAND (res, 0);
676 
677   fold_undefer_overflow_warnings ((res && is_gimple_min_invariant (res)),
678 				  stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
679 
680   /* If we have not simplified the condition down to an invariant,
681      then use the pass specific callback to simplify the condition.  */
682   if (!res
683       || !is_gimple_min_invariant (res))
684     res = (*simplify) (dummy_cond, stmt, avail_exprs_stack, e->src);
685 
686   return res;
687 }
688 
689 /* Copy debug stmts from DEST's chain of single predecessors up to
690    SRC, so that we don't lose the bindings as PHI nodes are introduced
691    when DEST gains new predecessors.  */
692 void
693 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
694 {
695   if (!MAY_HAVE_DEBUG_STMTS)
696     return;
697 
698   if (!single_pred_p (dest))
699     return;
700 
701   gcc_checking_assert (dest != src);
702 
703   gimple_stmt_iterator gsi = gsi_after_labels (dest);
704   int i = 0;
705   const int alloc_count = 16; // ?? Should this be a PARAM?
706 
707   /* Estimate the number of debug vars overridden in the beginning of
708      DEST, to tell how many we're going to need to begin with.  */
709   for (gimple_stmt_iterator si = gsi;
710        i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
711     {
712       gimple *stmt = gsi_stmt (si);
713       if (!is_gimple_debug (stmt))
714 	break;
715       i++;
716     }
717 
718   auto_vec<tree, alloc_count> fewvars;
719   hash_set<tree> *vars = NULL;
720 
721   /* If we're already starting with 3/4 of alloc_count, go for a
722      hash_set, otherwise start with an unordered stack-allocated
723      VEC.  */
724   if (i * 4 > alloc_count * 3)
725     vars = new hash_set<tree>;
726 
727   /* Now go through the initial debug stmts in DEST again, this time
728      actually inserting in VARS or FEWVARS.  Don't bother checking for
729      duplicates in FEWVARS.  */
730   for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
731     {
732       gimple *stmt = gsi_stmt (si);
733       if (!is_gimple_debug (stmt))
734 	break;
735 
736       tree var;
737 
738       if (gimple_debug_bind_p (stmt))
739 	var = gimple_debug_bind_get_var (stmt);
740       else if (gimple_debug_source_bind_p (stmt))
741 	var = gimple_debug_source_bind_get_var (stmt);
742       else
743 	gcc_unreachable ();
744 
745       if (vars)
746 	vars->add (var);
747       else
748 	fewvars.quick_push (var);
749     }
750 
751   basic_block bb = dest;
752 
753   do
754     {
755       bb = single_pred (bb);
756       for (gimple_stmt_iterator si = gsi_last_bb (bb);
757 	   !gsi_end_p (si); gsi_prev (&si))
758 	{
759 	  gimple *stmt = gsi_stmt (si);
760 	  if (!is_gimple_debug (stmt))
761 	    continue;
762 
763 	  tree var;
764 
765 	  if (gimple_debug_bind_p (stmt))
766 	    var = gimple_debug_bind_get_var (stmt);
767 	  else if (gimple_debug_source_bind_p (stmt))
768 	    var = gimple_debug_source_bind_get_var (stmt);
769 	  else
770 	    gcc_unreachable ();
771 
772 	  /* Discard debug bind overlaps.  ??? Unlike stmts from src,
773 	     copied into a new block that will precede BB, debug bind
774 	     stmts in bypassed BBs may actually be discarded if
775 	     they're overwritten by subsequent debug bind stmts, which
776 	     might be a problem once we introduce stmt frontier notes
777 	     or somesuch.  Adding `&& bb == src' to the condition
778 	     below will preserve all potentially relevant debug
779 	     notes.  */
780 	  if (vars && vars->add (var))
781 	    continue;
782 	  else if (!vars)
783 	    {
784 	      int i = fewvars.length ();
785 	      while (i--)
786 		if (fewvars[i] == var)
787 		  break;
788 	      if (i >= 0)
789 		continue;
790 
791 	      if (fewvars.length () < (unsigned) alloc_count)
792 		fewvars.quick_push (var);
793 	      else
794 		{
795 		  vars = new hash_set<tree>;
796 		  for (i = 0; i < alloc_count; i++)
797 		    vars->add (fewvars[i]);
798 		  fewvars.release ();
799 		  vars->add (var);
800 		}
801 	    }
802 
803 	  stmt = gimple_copy (stmt);
804 	  /* ??? Should we drop the location of the copy to denote
805 	     they're artificial bindings?  */
806 	  gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
807 	}
808     }
809   while (bb != src && single_pred_p (bb));
810 
811   if (vars)
812     delete vars;
813   else if (fewvars.exists ())
814     fewvars.release ();
815 }
816 
817 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
818    need not be duplicated as part of the CFG/SSA updating process).
819 
820    If it is threadable, add it to PATH and VISITED and recurse, ultimately
821    returning TRUE from the toplevel call.   Otherwise do nothing and
822    return false.
823 
824    DUMMY_COND, SIMPLIFY are used to try and simplify the condition at the
825    end of TAKEN_EDGE->dest.
826 
827    The available expression table is referenced via AVAIL_EXPRS_STACK.  */
828 
829 static bool
830 thread_around_empty_blocks (edge taken_edge,
831 			    gcond *dummy_cond,
832 			    class avail_exprs_stack *avail_exprs_stack,
833 			    pfn_simplify simplify,
834 			    bitmap visited,
835 			    vec<jump_thread_edge *> *path)
836 {
837   basic_block bb = taken_edge->dest;
838   gimple_stmt_iterator gsi;
839   gimple *stmt;
840   tree cond;
841 
842   /* The key property of these blocks is that they need not be duplicated
843      when threading.  Thus they can not have visible side effects such
844      as PHI nodes.  */
845   if (!gsi_end_p (gsi_start_phis (bb)))
846     return false;
847 
848   /* Skip over DEBUG statements at the start of the block.  */
849   gsi = gsi_start_nondebug_bb (bb);
850 
851   /* If the block has no statements, but does have a single successor, then
852      it's just a forwarding block and we can thread through it trivially.
853 
854      However, note that just threading through empty blocks with single
855      successors is not inherently profitable.  For the jump thread to
856      be profitable, we must avoid a runtime conditional.
857 
858      By taking the return value from the recursive call, we get the
859      desired effect of returning TRUE when we found a profitable jump
860      threading opportunity and FALSE otherwise.
861 
862      This is particularly important when this routine is called after
863      processing a joiner block.  Returning TRUE too aggressively in
864      that case results in pointless duplication of the joiner block.  */
865   if (gsi_end_p (gsi))
866     {
867       if (single_succ_p (bb))
868 	{
869 	  taken_edge = single_succ_edge (bb);
870 
871 	  if ((taken_edge->flags & EDGE_DFS_BACK) != 0)
872 	    return false;
873 
874 	  if (!bitmap_bit_p (visited, taken_edge->dest->index))
875 	    {
876 	      jump_thread_edge *x
877 		= new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
878 	      path->safe_push (x);
879 	      bitmap_set_bit (visited, taken_edge->dest->index);
880 	      return thread_around_empty_blocks (taken_edge,
881 						 dummy_cond,
882 						 avail_exprs_stack,
883 						 simplify,
884 						 visited,
885 						 path);
886 	    }
887 	}
888 
889       /* We have a block with no statements, but multiple successors?  */
890       return false;
891     }
892 
893   /* The only real statements this block can have are a control
894      flow altering statement.  Anything else stops the thread.  */
895   stmt = gsi_stmt (gsi);
896   if (gimple_code (stmt) != GIMPLE_COND
897       && gimple_code (stmt) != GIMPLE_GOTO
898       && gimple_code (stmt) != GIMPLE_SWITCH)
899     return false;
900 
901   /* Extract and simplify the condition.  */
902   cond = simplify_control_stmt_condition (taken_edge, stmt,
903 					  avail_exprs_stack, dummy_cond,
904 					  simplify);
905 
906   /* If the condition can be statically computed and we have not already
907      visited the destination edge, then add the taken edge to our thread
908      path.  */
909   if (cond != NULL_TREE
910       && (is_gimple_min_invariant (cond)
911 	  || TREE_CODE (cond) == CASE_LABEL_EXPR))
912     {
913       if (TREE_CODE (cond) == CASE_LABEL_EXPR)
914 	taken_edge = find_edge (bb, label_to_block (CASE_LABEL (cond)));
915       else
916 	taken_edge = find_taken_edge (bb, cond);
917 
918       if ((taken_edge->flags & EDGE_DFS_BACK) != 0)
919 	return false;
920 
921       if (bitmap_bit_p (visited, taken_edge->dest->index))
922 	return false;
923       bitmap_set_bit (visited, taken_edge->dest->index);
924 
925       jump_thread_edge *x
926 	= new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
927       path->safe_push (x);
928 
929       thread_around_empty_blocks (taken_edge,
930 				  dummy_cond,
931 				  avail_exprs_stack,
932 				  simplify,
933 				  visited,
934 				  path);
935       return true;
936     }
937 
938   return false;
939 }
940 
941 /* We are exiting E->src, see if E->dest ends with a conditional
942    jump which has a known value when reached via E.
943 
944    E->dest can have arbitrary side effects which, if threading is
945    successful, will be maintained.
946 
947    Special care is necessary if E is a back edge in the CFG as we
948    may have already recorded equivalences for E->dest into our
949    various tables, including the result of the conditional at
950    the end of E->dest.  Threading opportunities are severely
951    limited in that case to avoid short-circuiting the loop
952    incorrectly.
953 
954    DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
955    to avoid allocating memory.
956 
957    STACK is used to undo temporary equivalences created during the walk of
958    E->dest.
959 
960    SIMPLIFY is a pass-specific function used to simplify statements.
961 
962    Our caller is responsible for restoring the state of the expression
963    and const_and_copies stacks.
964 
965    Positive return value is success.  Zero return value is failure, but
966    the block can still be duplicated as a joiner in a jump thread path,
967    negative indicates the block should not be duplicated and thus is not
968    suitable for a joiner in a jump threading path.  */
969 
970 static int
971 thread_through_normal_block (edge e,
972 			     gcond *dummy_cond,
973 			     const_and_copies *const_and_copies,
974 			     avail_exprs_stack *avail_exprs_stack,
975 			     pfn_simplify simplify,
976 			     vec<jump_thread_edge *> *path,
977 			     bitmap visited)
978 {
979   /* We want to record any equivalences created by traversing E.  */
980   record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
981 
982   /* PHIs create temporary equivalences.
983      Note that if we found a PHI that made the block non-threadable, then
984      we need to bubble that up to our caller in the same manner we do
985      when we prematurely stop processing statements below.  */
986   if (!record_temporary_equivalences_from_phis (e, const_and_copies))
987     return -1;
988 
989   /* Now walk each statement recording any context sensitive
990      temporary equivalences we can detect.  */
991   gimple *stmt
992     = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies,
993 							avail_exprs_stack,
994 							simplify);
995 
996   /* There's two reasons STMT might be null, and distinguishing
997      between them is important.
998 
999      First the block may not have had any statements.  For example, it
1000      might have some PHIs and unconditionally transfer control elsewhere.
1001      Such blocks are suitable for jump threading, particularly as a
1002      joiner block.
1003 
1004      The second reason would be if we did not process all the statements
1005      in the block (because there were too many to make duplicating the
1006      block profitable.   If we did not look at all the statements, then
1007      we may not have invalidated everything needing invalidation.  Thus
1008      we must signal to our caller that this block is not suitable for
1009      use as a joiner in a threading path.  */
1010   if (!stmt)
1011     {
1012       /* First case.  The statement simply doesn't have any instructions, but
1013 	 does have PHIs.  */
1014       if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
1015 	  && !gsi_end_p (gsi_start_phis (e->dest)))
1016 	return 0;
1017 
1018       /* Second case.  */
1019       return -1;
1020     }
1021 
1022   /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1023      will be taken.  */
1024   if (gimple_code (stmt) == GIMPLE_COND
1025       || gimple_code (stmt) == GIMPLE_GOTO
1026       || gimple_code (stmt) == GIMPLE_SWITCH)
1027     {
1028       tree cond;
1029 
1030       /* Extract and simplify the condition.  */
1031       cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack,
1032 					      dummy_cond, simplify);
1033 
1034       if (!cond)
1035 	return 0;
1036 
1037       if (is_gimple_min_invariant (cond)
1038 	  || TREE_CODE (cond) == CASE_LABEL_EXPR)
1039 	{
1040 	  edge taken_edge;
1041 	  if (TREE_CODE (cond) == CASE_LABEL_EXPR)
1042 	    taken_edge = find_edge (e->dest,
1043 				    label_to_block (CASE_LABEL (cond)));
1044 	  else
1045 	    taken_edge = find_taken_edge (e->dest, cond);
1046 
1047 	  basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1048 
1049 	  /* DEST could be NULL for a computed jump to an absolute
1050 	     address.  */
1051 	  if (dest == NULL
1052 	      || dest == e->dest
1053 	      || (taken_edge->flags & EDGE_DFS_BACK) != 0
1054 	      || bitmap_bit_p (visited, dest->index))
1055 	    return 0;
1056 
1057 	  /* Only push the EDGE_START_JUMP_THREAD marker if this is
1058 	     first edge on the path.  */
1059 	  if (path->length () == 0)
1060 	    {
1061               jump_thread_edge *x
1062 	        = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1063 	      path->safe_push (x);
1064 	    }
1065 
1066 	  jump_thread_edge *x
1067 	    = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1068 	  path->safe_push (x);
1069 
1070 	  /* See if we can thread through DEST as well, this helps capture
1071 	     secondary effects of threading without having to re-run DOM or
1072 	     VRP.
1073 
1074 	     We don't want to thread back to a block we have already
1075  	     visited.  This may be overly conservative.  */
1076 	  bitmap_set_bit (visited, dest->index);
1077 	  bitmap_set_bit (visited, e->dest->index);
1078 	  thread_around_empty_blocks (taken_edge,
1079 				      dummy_cond,
1080 				      avail_exprs_stack,
1081 				      simplify,
1082 				      visited,
1083 				      path);
1084 	  return 1;
1085 	}
1086     }
1087   return 0;
1088 }
1089 
1090 /* We are exiting E->src, see if E->dest ends with a conditional
1091    jump which has a known value when reached via E.
1092 
1093    DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1094    to avoid allocating memory.
1095 
1096    CONST_AND_COPIES is used to undo temporary equivalences created during the
1097    walk of E->dest.
1098 
1099    The available expression table is referenced vai AVAIL_EXPRS_STACK.
1100 
1101    SIMPLIFY is a pass-specific function used to simplify statements.  */
1102 
1103 static void
1104 thread_across_edge (gcond *dummy_cond,
1105 		    edge e,
1106 		    class const_and_copies *const_and_copies,
1107 		    class avail_exprs_stack *avail_exprs_stack,
1108 		    pfn_simplify simplify)
1109 {
1110   bitmap visited = BITMAP_ALLOC (NULL);
1111 
1112   const_and_copies->push_marker ();
1113   avail_exprs_stack->push_marker ();
1114 
1115   stmt_count = 0;
1116 
1117   vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1118   bitmap_clear (visited);
1119   bitmap_set_bit (visited, e->src->index);
1120   bitmap_set_bit (visited, e->dest->index);
1121 
1122   int threaded;
1123   if ((e->flags & EDGE_DFS_BACK) == 0)
1124     threaded = thread_through_normal_block (e, dummy_cond,
1125 					    const_and_copies,
1126 					    avail_exprs_stack,
1127 					    simplify, path,
1128 					    visited);
1129   else
1130     threaded = 0;
1131 
1132   if (threaded > 0)
1133     {
1134       propagate_threaded_block_debug_into (path->last ()->e->dest,
1135 					   e->dest);
1136       const_and_copies->pop_to_marker ();
1137       avail_exprs_stack->pop_to_marker ();
1138       BITMAP_FREE (visited);
1139       register_jump_thread (path);
1140       return;
1141     }
1142   else
1143     {
1144       /* Negative and zero return values indicate no threading was possible,
1145 	 thus there should be no edges on the thread path and no need to walk
1146 	 through the vector entries.  */
1147       gcc_assert (path->length () == 0);
1148       path->release ();
1149       delete path;
1150 
1151       /* A negative status indicates the target block was deemed too big to
1152 	 duplicate.  Just quit now rather than trying to use the block as
1153 	 a joiner in a jump threading path.
1154 
1155 	 This prevents unnecessary code growth, but more importantly if we
1156 	 do not look at all the statements in the block, then we may have
1157 	 missed some invalidations if we had traversed a backedge!  */
1158       if (threaded < 0)
1159 	{
1160 	  BITMAP_FREE (visited);
1161 	  const_and_copies->pop_to_marker ();
1162           avail_exprs_stack->pop_to_marker ();
1163 	  return;
1164 	}
1165     }
1166 
1167  /* We were unable to determine what out edge from E->dest is taken.  However,
1168     we might still be able to thread through successors of E->dest.  This
1169     often occurs when E->dest is a joiner block which then fans back out
1170     based on redundant tests.
1171 
1172     If so, we'll copy E->dest and redirect the appropriate predecessor to
1173     the copy.  Within the copy of E->dest, we'll thread one or more edges
1174     to points deeper in the CFG.
1175 
1176     This is a stopgap until we have a more structured approach to path
1177     isolation.  */
1178   {
1179     edge taken_edge;
1180     edge_iterator ei;
1181     bool found;
1182 
1183     /* If E->dest has abnormal outgoing edges, then there's no guarantee
1184        we can safely redirect any of the edges.  Just punt those cases.  */
1185     FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1186       if (taken_edge->flags & EDGE_ABNORMAL)
1187 	{
1188 	  const_and_copies->pop_to_marker ();
1189           avail_exprs_stack->pop_to_marker ();
1190 	  BITMAP_FREE (visited);
1191 	  return;
1192 	}
1193 
1194     /* Look at each successor of E->dest to see if we can thread through it.  */
1195     FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1196       {
1197 	if ((e->flags & EDGE_DFS_BACK) != 0
1198 	    || (taken_edge->flags & EDGE_DFS_BACK) != 0)
1199 	  continue;
1200 
1201 	/* Push a fresh marker so we can unwind the equivalences created
1202 	   for each of E->dest's successors.  */
1203 	const_and_copies->push_marker ();
1204 	avail_exprs_stack->push_marker ();
1205 
1206 	/* Avoid threading to any block we have already visited.  */
1207 	bitmap_clear (visited);
1208 	bitmap_set_bit (visited, e->src->index);
1209 	bitmap_set_bit (visited, e->dest->index);
1210 	bitmap_set_bit (visited, taken_edge->dest->index);
1211         vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1212 
1213 	/* Record whether or not we were able to thread through a successor
1214 	   of E->dest.  */
1215         jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1216 	path->safe_push (x);
1217 
1218         x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1219 	path->safe_push (x);
1220 	found = false;
1221 	found = thread_around_empty_blocks (taken_edge,
1222 					    dummy_cond,
1223 					    avail_exprs_stack,
1224 					    simplify,
1225 					    visited,
1226 					    path);
1227 
1228 	if (!found)
1229 	  found = thread_through_normal_block (path->last ()->e, dummy_cond,
1230 					       const_and_copies,
1231 					       avail_exprs_stack,
1232 					       simplify, path,
1233 					       visited) > 0;
1234 
1235 	/* If we were able to thread through a successor of E->dest, then
1236 	   record the jump threading opportunity.  */
1237 	if (found)
1238 	  {
1239 	    propagate_threaded_block_debug_into (path->last ()->e->dest,
1240 						 taken_edge->dest);
1241 	    register_jump_thread (path);
1242 	  }
1243 	else
1244 	  delete_jump_thread_path (path);
1245 
1246 	/* And unwind the equivalence table.  */
1247 	avail_exprs_stack->pop_to_marker ();
1248 	const_and_copies->pop_to_marker ();
1249       }
1250     BITMAP_FREE (visited);
1251   }
1252 
1253   const_and_copies->pop_to_marker ();
1254   avail_exprs_stack->pop_to_marker ();
1255 }
1256 
1257 /* Examine the outgoing edges from BB and conditionally
1258    try to thread them.
1259 
1260    DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1261    to avoid allocating memory.
1262 
1263    CONST_AND_COPIES is used to undo temporary equivalences created during the
1264    walk of E->dest.
1265 
1266    The available expression table is referenced vai AVAIL_EXPRS_STACK.
1267 
1268    SIMPLIFY is a pass-specific function used to simplify statements.  */
1269 
1270 void
1271 thread_outgoing_edges (basic_block bb, gcond *dummy_cond,
1272 		       class const_and_copies *const_and_copies,
1273 		       class avail_exprs_stack *avail_exprs_stack,
1274 		       tree (*simplify) (gimple *, gimple *,
1275 					 class avail_exprs_stack *,
1276 					 basic_block))
1277 {
1278   int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
1279   gimple *last;
1280 
1281   /* If we have an outgoing edge to a block with multiple incoming and
1282      outgoing edges, then we may be able to thread the edge, i.e., we
1283      may be able to statically determine which of the outgoing edges
1284      will be traversed when the incoming edge from BB is traversed.  */
1285   if (single_succ_p (bb)
1286       && (single_succ_edge (bb)->flags & flags) == 0
1287       && potentially_threadable_block (single_succ (bb)))
1288     {
1289       thread_across_edge (dummy_cond, single_succ_edge (bb),
1290 			  const_and_copies, avail_exprs_stack,
1291 			  simplify);
1292     }
1293   else if ((last = last_stmt (bb))
1294 	   && gimple_code (last) == GIMPLE_COND
1295 	   && EDGE_COUNT (bb->succs) == 2
1296 	   && (EDGE_SUCC (bb, 0)->flags & flags) == 0
1297 	   && (EDGE_SUCC (bb, 1)->flags & flags) == 0)
1298     {
1299       edge true_edge, false_edge;
1300 
1301       extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1302 
1303       /* Only try to thread the edge if it reaches a target block with
1304 	 more than one predecessor and more than one successor.  */
1305       if (potentially_threadable_block (true_edge->dest))
1306 	thread_across_edge (dummy_cond, true_edge,
1307 			    const_and_copies, avail_exprs_stack, simplify);
1308 
1309       /* Similarly for the ELSE arm.  */
1310       if (potentially_threadable_block (false_edge->dest))
1311 	thread_across_edge (dummy_cond, false_edge,
1312 			    const_and_copies, avail_exprs_stack, simplify);
1313     }
1314 }
1315